diff --git "a/4288.jsonl" "b/4288.jsonl" new file mode 100644--- /dev/null +++ "b/4288.jsonl" @@ -0,0 +1,696 @@ +{"seq_id":"616384498","text":"# variaveis\nsoma = 0\ncont = 0\npares = 0\n\nfor c in range(1, 500, 2):\n if c % 3 == 0:\n cont = cont + 1\n soma = soma + c\n print(c)\nprint(f'O total é {soma}')\n\n\n\n\n","sub_path":"Exercicios_Python/exercicio48.py","file_name":"exercicio48.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81680910","text":"__author__ = \"David Laehnemann, Antonie Vietor\"\n__copyright__ = \"Copyright 2020, David Laehnemann, Antonie Vietor\"\n__email__ = \"antonie.v@gmx.de\"\n__license__ = \"MIT\"\n\nimport sys\nfrom snakemake.shell import shell\n\nlog = snakemake.log_fmt_shell(stdout=False, stderr=True)\n\nres = snakemake.resources.get(\"mem_gb\", \"3\")\nif not res or res is None:\n res = 3\n\nprogs = set()\nextensions = set()\n\nfor file in snakemake.output:\n if \"alignment_summary\" in file:\n progs.add(\"CollectAlignmentSummaryMetrics \")\n extensions.add(\".alignment_summary_metrics\")\n elif \"insert_size\" in file:\n progs.add(\"CollectInsertSizeMetrics \")\n extensions.add(\".insert_size_metrics\")\n extensions.add(\".insert_size_histogram.pdf\")\n elif \"quality_distribution\" in file:\n progs.add(\"QualityScoreDistribution \")\n extensions.add(\".quality_distribution_metrics\")\n extensions.add(\".quality_distribution.pdf\")\n elif \"quality_by_cycle\" in file:\n progs.add(\"MeanQualityByCycle \")\n extensions.add(\".quality_by_cycle_metrics\")\n extensions.add(\".quality_by_cycle.pdf\")\n elif \"base_distribution_by_cycle\" in file:\n progs.add(\"CollectBaseDistributionByCycle \")\n extensions.add(\".base_distribution_by_cycle_metrics\")\n extensions.add(\".base_distribution_by_cycle.pdf\")\n elif \"gc_bias\" in file:\n progs.add(\"CollectGcBiasMetrics \")\n extensions.add(\".gc_bias.detail_metrics\")\n extensions.add(\".gc_bias.summary_metrics\")\n extensions.add(\".gc_bias.pdf\")\n elif \"rna_metrics\" in file:\n progs.add(\"RnaSeqMetrics \")\n extensions.add(\".rna_metrics\")\n elif \"bait_bias\" in file or \"error_summary\" in file or \"pre_adapter\" in file:\n progs.add(\"CollectSequencingArtifactMetrics \")\n extensions.add(\".bait_bias_detail_metrics\")\n extensions.add(\".bait_bias_summary_metrics\")\n extensions.add(\".error_summary_metrics\")\n extensions.add(\".pre_adapter_detail_metrics\")\n extensions.add(\".pre_adapter_summary_metrics\")\n elif \"quality_yield\" in file:\n progs.add(\"CollectQualityYieldMetrics \")\n extensions.add(\".quality_yield_metrics\")\n else:\n sys.exit(\n \"Unknown type of metrics file requested, for possible metrics files, see https://snakemake-wrappers.readthedocs.io/en/stable/wrappers/picard/collectmultiplemetrics.html\"\n )\nprograms = \" PROGRAM=\" + \"PROGRAM=\".join(progs)\n\nout = str(snakemake.wildcards.sample) # as default\noutput_file = str(snakemake.output[0])\nfor ext in extensions:\n if ext in output_file:\n if output_file.endswith(ext):\n out = output_file[: -len(ext)]\n break\n\nshell(\n \"(picard -Xmx{res}g CollectMultipleMetrics \"\n \"I={snakemake.input.bam} \"\n \"O={out} \"\n \"R={snakemake.input.ref} \"\n \"{snakemake.params}{programs}) {log}\"\n)\n","sub_path":"bio/picard/collectmultiplemetrics/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500267538","text":"from .base import *\nimport os\n\n\nDEBUG = True\n\n# This allows oauth to operate over http. DO NOT USE THIS CONFIG IN PRODUCTION\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\n\nSECRET_KEY = \"BBCLIENT-LOCAL-_cdlv24!g$4)b&wq9fjn)p!vrs729idssk2qp7iy!u#!\"\n\nDBPATH = os.path.join(BASE_DIR, 'db/db.db')\nDATABASES = {\n 'default': {\n # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'ENGINE': 'django.db.backends.sqlite3',\n # Or path to database file if using sqlite3.\n 'NAME': DBPATH,\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n # Set to empty string for localhost. Not used with sqlite3.\n 'HOST': '',\n # Set to empty string for default. Not used with sqlite3.\n 'PORT': '',\n }\n}\n\n# Set these values for you environment\nSOCIAL_AUTH_OAUTH2IO_KEY = ''\nSOCIAL_AUTH_OAUTH2IO_SECRET = ''\nSOCIAL_AUTH_OAUTH2IO_SCOPE = []\nOAUTH2IO_HOST = \"http://example.com\"\nOAUTH2_PROVIDER_NAME = \"Your OAuth2\"\nAPP_TITLE = \"Your Client Example\"\n\n","sub_path":"bbc/bbc/settings/local_sample.py","file_name":"local_sample.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597481940","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 8 23:32:09 2017\n\n@author: davidmoran\n\"\"\"\nfrom investor import Investor\nfrom honchopool import pools\n\nclass NonHoncho(Investor):\n \"\"\"Represents any account without Honcho status\"\"\"\n \n def __init__(self, username):\n \"\"\"Creates an object representing a non-Honcho\"\"\"\n Investor.__init__(self, username=username)\n self.honcho_share_portfolio = {}\n\n def get_nonhoncho_equity(self, personal=True, cash=True, honcho_shares=True):\n \"\"\"Returns non-Honcho equity including their personal portfolio, Honcho share portfolio, anc cash as specified\"\"\"\n equity = 0\n equity += self.get_current_equity(portfolio=personal, cash=cash)[1]\n\n if honcho_shares:\n for pool in self.honcho_share_portfolio:\n equity += self.get_pool_equity(pool)[1]\n return equity\n\n def get_pool_equity(self, pool):\n \"\"\"Returns current price of specified Honcho pool and non-Honcho's current equity\"\"\"\n current_price = pool.get_current_price()\n if pool in self.honcho_share_portfolio:\n return current_price, current_price * self.honcho_share_portfolio[pool]\n else:\n return current_price, 0.0\n\n def buy_honcho_shares(self, equity, industry, low_risk=True, high_diverse=True):\n \"\"\"Buys Honcho shares for non-Honcho\"\"\"\n pool = pools[industry][low_risk][high_diverse]\n if equity > self.cash:\n return \"You can only buy $%s worth of shares.\" % self.cash\n\n current_price = self.get_pool_equity(pool)[0]\n shares = equity / current_price\n self.cash -= equity\n\n if pool in self.honcho_share_portfolio:\n self.honcho_share_portfolio[pool] += shares\n else:\n self.honcho_share_portfolio[pool] = shares\n pool.nonhonchos.append(self)\n\n def sell_honcho_shares(self, equity, industry, low_risk, high_diverse):\n \"\"\"Sells Honcho shares for non-Honcho\"\"\"\n pool = pools[industry][low_risk][high_diverse]\n if pool not in self.honcho_share_portfolio:\n return \"This non-Honcho is not invested in this pool.\"\n\n current_price, current_equity = self.get_pool_equity(pool)\n\n if equity > current_equity:\n return \"You can only sell $%s worth of shares.\" % current_equity\n\n self.cash += equity\n\n shares = equity / current_price\n self.honcho_share_portfolio[pool] -= shares\n\n if self.honcho_share_portfolio[pool] == 0:\n self.honcho_share_portfolio.pop(pool)\n pool.nonhonchos.pop(self)\n ","sub_path":"Structure/nonhoncho.py","file_name":"nonhoncho.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457630236","text":"student=input(\"Please Enter Your Name :: \\n\")\nprint(\"Questions:: \\n\")\nimport random\nfor questions in range(10):\n a= random.randint(0,5000)\n b= random.randint(0,5000)\n opt = computer=random.choice([ \"-\" , \"+\" , \"*\" , \"/\" ])\n print(a,opt,b)\n if opt==\"+\":\n ans=float(a+b)\n elif opt==\"-\":\n ans=float(a-b)\n elif opt==\"*\":\n ans=float(a*b)\n elif opt==\"/\":\n ans=float(a/b) \n student=float(input(\"Please Enter Your Answer:: \\n\"))\n if student==ans:\n print(\"Correct\")\n elif student!=ans:\n print(\"Incorrect\")\n \n print()\n","sub_path":"Arithmetic.py","file_name":"Arithmetic.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342186775","text":"#!/usr/bin/env python3\n\nimport sys\nimport seaborn\nimport matplotlib.pyplot as plt\n\ndef file_to_data (file_path: 'Path') -> list:\n my_file = None\n data = []\n try:\n my_file = open(file_path, 'r')\n for line in my_file:\n data.append(int(line))\n finally:\n if my_file != None:\n my_file.close()\n return data\n\ndef plot(data1: 'iso1 contigs', data2: 'dmel contigs') -> 'plot':\n cdf_plot = seaborn.distplot(data1, kde_kws = dict(cumulative=True), bins = 1000, label = 'iso1 assembly')\n seaborn.distplot(data2, kde_kws = dict(cumulative=True), bins = 1000, label = 'dmel assembly')\n cdf_plot.set_title('CDF Plot: D. mel Reference vs. Iso1 Nanopore Genome Assembly')\n cdf_plot.set_ylabel('Proportion of Assembly')\n cdf_plot.set_xlabel('Contig Length')\n plt.legend()\n plt.show()\n\nif __name__ == '__main__':\n iso1_data = file_to_data(sys.argv[1])\n dmel_data = file_to_data(sys.argv[2])\n plot(iso1_data, dmel_data)\n\n","sub_path":"week7homework/cds_plot.py","file_name":"cds_plot.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647840681","text":"import cv2\nimport numpy as np\n\nimage=cv2.imread('images/try123_b.jpg')\n\npoints_A=np.float32( [ [427,337] ,\n [932,331] ,\n [415,709] ,\n [983,709] \n ] )\n\npoints_B=np.float32( [ [0,0] ,\n [400,0] ,\n [0,400] ,\n [400,400] \n ] )\n\nM=cv2.getPerspectiveTransform(points_A,points_B)\nwarped=cv2.warpPerspective(image,M,(400,400))\n\ncv2.imshow('original',image)\ncv2.waitKey()\ncv2.imshow('warpPerspective',warped)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n\n","sub_path":"files/13_affine.py","file_name":"13_affine.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643799383","text":"from django.conf.urls import url\n\nfrom subscription.views import manage_subscriber, add_subscribers, delete_subscriber, \\\n delete_all_subscribers, subscribe_notification, modify_subscription, \\\n confirm_subscription_notification, unsubscribe_notification, \\\n confirm_unsubscription_notification, display_mail, display_mail_iframe\n\nurlpatterns = [\n url(r'^subscriber/manage/(?P\\d+)/(?Pfr|en)/$',\n manage_subscriber,\n name='manage_subscriber'),\n\n url(r'^subscriber/add/(?P\\d+)/$',\n add_subscribers, name='add_subscribers'),\n\n url(r'^subscriber/delete/(?P\\d+)/(?P\\d+)/$',\n delete_subscriber, name='delete_subscriber'),\n\n url(r'^subscribers/delete/all/(?P\\d+)/$',\n delete_all_subscribers, name='delete_all_subscribers'),\n\n url(r'^subscribe/(?P\\d+)/$',\n subscribe_notification, name='subscribe_notification'),\n\n url(r'^modify/(?P\\d+)/(?P\\d+)/$',\n modify_subscription, name='modify_subscription'),\n\n url(r'^subscribe/confirm/(?P\\d+)/$',\n confirm_subscription_notification, name='confirm_subscription_notification'),\n\n url(r'^unsubscribe/(?P\\d+)/$',\n unsubscribe_notification, name='unsubscribe_notification'),\n\n url(r'^unsubscribe/confirm/(?P\\d+)/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$',\n confirm_unsubscription_notification, name='confirm_unsubscription_notification'),\n\n url(r'^display-mail/(?P\\d+)/$',\n display_mail, name='display_mail'),\n\n url(r'^translation/(?P\\d+)/$',\n display_mail_iframe, name='display_mail_iframe'),\n]\n","sub_path":"src/subscription/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46636839","text":"import datetime\nimport json\n\nfrom django.db import transaction\nfrom django.http import JsonResponse\n\nfrom . import models\nfrom base import utils\nfrom program import models as program_models\n\ndef _serialize_set_record(set_record):\n return {\n 'id': set_record.identifier,\n 'plannedReps': set_record.reps_planned,\n 'completedReps': set_record.reps_completed,\n 'weight': set_record.weight,\n 'warmupOrWork': set_record.warmup_or_work,\n }\n\ndef _serialize_exercise_record(exercise_record):\n return {\n 'id': exercise_record.identifier,\n 'name': exercise_record.exercise.name,\n 'weight': exercise_record.planned_weight,\n 'warmupSets': [\n _serialize_set_record(set_record)\n for set_record in exercise_record.warmup_set_records.all()\n ],\n 'workSets': [\n _serialize_set_record(set_record)\n for set_record in exercise_record.work_set_records.all()\n ],\n }\n\ndef _serialize_workout_record(workout_record):\n return {\n 'id': workout_record.identifier,\n 'complete': workout_record.is_finished,\n 'name': workout_record.program_workout.name,\n 'exercises': [\n _serialize_exercise_record(exercise_record)\n for exercise_record in workout_record.exercise_records.all()\n ],\n }\n\ndef _get_planned_weight_for_user(program_exercise, user):\n if program_exercise.progression == program_models.ProgramExercise.CONSTANT:\n most_recent_exercise_record = models.ExerciseRecord.objects.filter(\n user=user,\n exercise=program_exercise.exercise,\n ).order_by('-created').first()\n\n if not most_recent_exercise_record:\n return program_exercise.start_weight\n\n return most_recent_exercise_record.planned_weight\n\n if program_exercise.progression == program_models.ProgramExercise.LINEAR:\n previous_exercise_records = models.ExerciseRecord.objects.filter(\n user=user,\n exercise=program_exercise.exercise,\n ).order_by('-created')\n\n failed_counter = 0\n last_successful_record = None\n\n for previous_exercise_record in previous_exercise_records:\n if previous_exercise_record.succeeded:\n last_successful_record = previous_exercise_record\n break\n else:\n failed_counter += 1\n\n if not last_successful_record:\n return program_exercise.start_weight\n\n # Deload if we've failed 3 times\n if failed_counter >= 3:\n return max(\n program_exercise.start_weight,\n utils.round_to_nearest(last_successful_record.planned_weight * 4 / 5, 5),\n )\n\n # Deload 20% for every two weeks since we last did this\n last_record = previous_exercise_records.first()\n if last_record.created < (utils.utcnow() - datetime.timedelta(days=14)):\n days_since_last_record = (utils.utcnow() - last_record.created).days\n two_week_periods = days_since_last_record // 14\n cumulative_factor = 4**two_week_periods / 5**two_week_periods\n return max(\n program_exercise.start_weight,\n round_to_nearest(\n last_successful_record.planned_weight * cumulative_factor,\n 5,\n ),\n )\n\n return last_successful_record.planned_weight + program_exercise.progression_linear_increment\n\ndef _generate_set_records(exercise_record, program_exercise=None):\n if program_exercise is None:\n program_exercise = exercise_record.workout_record.program_workout.program_exercises.filter(\n exercise=exercise_record.exercise,\n ).first()\n\n planned_weight = exercise_record.planned_weight\n\n if planned_weight * 75 / 100 > program_exercise.start_weight:\n set_record = models.SetRecord(\n exercise_record=exercise_record,\n reps_planned=program_exercise.reps,\n weight=program_exercise.start_weight,\n warmup_or_work=models.SetRecord.WARMUP,\n )\n set_record.save()\n\n for percent in (45, 65, 75, 85):\n if planned_weight * (percent - 25) / 100 > program_exercise.start_weight:\n weight = utils.round_to_nearest(planned_weight * percent / 100, 5)\n\n set_record = models.SetRecord(\n exercise_record=exercise_record,\n reps_planned=program_exercise.reps,\n weight=weight,\n warmup_or_work=models.SetRecord.WARMUP,\n )\n set_record.save()\n\n for s in range(program_exercise.sets):\n set_record = models.SetRecord(\n exercise_record=exercise_record,\n reps_planned=program_exercise.reps,\n weight=exercise_record.planned_weight,\n warmup_or_work=models.SetRecord.WORK,\n )\n set_record.save()\n\n@transaction.atomic\ndef update_exercise_record(request):\n assert request.method == 'POST'\n payload = json.loads(request.body)\n exercise_record_identifier = payload['exerciseRecord']\n weight = payload['weight']\n\n exercise_record = models.ExerciseRecord.objects.get(\n identifier=exercise_record_identifier,\n user=request.user,\n )\n\n exercise_record.planned_weight = int(weight)\n exercise_record.save()\n\n exercise_record.set_records.all().delete()\n\n _generate_set_records(exercise_record)\n\n return JsonResponse({\n 'success': True,\n 'exerciseRecord': _serialize_exercise_record(exercise_record),\n })\n\n@transaction.atomic\ndef start_workout_record(request):\n assert request.method == 'POST'\n payload = json.loads(request.body)\n program_workout_identifier = payload['programWorkout']\n\n program_workout = program_models.ProgramWorkout.objects.get(\n identifier=program_workout_identifier,\n )\n\n workout_record = models.WorkoutRecord.objects.filter(\n user=request.user,\n program_workout=program_workout,\n is_finished=False,\n ).first()\n\n if workout_record:\n return JsonResponse(_serialize_workout_record(workout_record))\n\n workout_record = models.WorkoutRecord(\n user=request.user,\n program_workout=program_workout,\n )\n workout_record.save()\n\n for program_exercise in program_workout.program_exercises.all():\n exercise_record = models.ExerciseRecord(\n user=request.user,\n exercise=program_exercise.exercise,\n workout_record=workout_record,\n planned_weight=_get_planned_weight_for_user(program_exercise, request.user),\n )\n exercise_record.save()\n\n _generate_set_records(exercise_record, program_exercise)\n\n return JsonResponse(_serialize_workout_record(workout_record))\n\ndef finish_workout_record(request):\n assert request.method == 'POST'\n payload = json.loads(request.body)\n workout_record_identifier = payload['workoutRecord']\n\n workout_records = models.WorkoutRecord.objects.filter(\n identifier=workout_record_identifier,\n user=request.user,\n )\n\n if workout_records.count() == 0:\n return JsonResponse({\n 'success': False,\n 'message': 'No WorkoutRecord found with this ID.',\n }, status=404)\n\n assert workout_records.count() == 1\n\n workout_records.update(is_finished=True)\n\n return JsonResponse({\n 'success': True,\n })\n\ndef update_set_record(request):\n assert request.method == 'POST'\n payload = json.loads(request.body)\n set_record_identifier = payload.pop('setRecord', None)\n\n if not set_record_identifier:\n return JsonResponse({\n 'success': False,\n 'message': 'Parameter \"setRecord\" is required',\n })\n\n set_records = models.SetRecord.objects.filter(\n identifier=set_record_identifier,\n exercise_record__user=request.user,\n )\n\n if set_records.count() == 0:\n return JsonResponse({\n 'success': False,\n 'message': 'No SetRecord found with this ID.',\n }, status=404)\n\n assert set_records.count() == 1\n\n updates = {}\n\n for key in payload.keys():\n if key == 'completedReps':\n if payload['completedReps'] is None:\n updates['reps_completed'] = None\n\n else:\n updates['reps_completed'] = int(payload['completedReps'])\n\n else:\n return JsonResponse({\n 'success': False,\n 'message': 'Unexpected field \"{}\"'.format(key),\n }, status=400)\n\n if not updates:\n return JsonResponse({\n 'success': False,\n 'message': 'At least 1 field to update is required. Allowed fields: {}'.format(\n ', '.join('\"{}\"'.format(f) for f in ['completedReps']),\n ),\n })\n\n set_records.update(**updates)\n\n return JsonResponse({\n 'success': True,\n })\n","sub_path":"user/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":8979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110116251","text":"from . import *\nimport logging \nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import LoggingIntegration \n\nDEBUG = False\nALLOWED_HOSTS = ['204.48.26.176']\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'projet8',\n 'USER': 'nathansql',\n 'PASSWORD': os.environ.get('SQLPWD'),\n 'HOST': 'localhost',\n 'PORT': '5432',\n }\n}\n\nsentry_logging = LoggingIntegration( \n level=logging.INFO, # Capture info and above as breadcrumbs \n event_level=logging.ERROR # Send errors as events \n) \n\nsentry_sdk.init(\n dsn=\"https://0b44cc663eaa4f35b107cbdd75719913@sentry.io/2827010\",\n integrations=[DjangoIntegration(), sentry_logging],\n\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=False,\n environment=\"production\"\n)\n","sub_path":"project8/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90077910","text":" # Program execution file\n\n# Import Quantum Gates\nimport QuantumGates\n\n# Import Random\nimport random\n\n# Import Cmath\nimport cmath\n\n# Import Logging\nimport logging\n\ndef Run(program):\n\tlogging.info(program)\n\t# Initalize QBit array and data storage array\n\ttry:\n\t\tQBits = [complex(0.0, 0.0)] * (2**int(program[0]))\n\t\tData = [0] * (2**int(program[0]))\n\texcept:\n\t\treturn \"Error: the first line was not a natural number\"\n\n\t# Set first item to 1\n\tQBits[0] = complex(1.0 ,0.0)\n\n\t# Set the deafault output as an array of probabilities\n\toutputData = False\n\n\t# Set the deafault number of times to loop when outputting actual data\n\ttimes = 0\n\n\tcheckFirst = True\n\tfor i in xrange(1, len(program)): # loop all rows in 'Program.txt'\n\t\tcommand = program[i].split(' ') # split row into array at spaces\n\t\tlogging.info(program[i])\n\t\tlogging.info(command)\n\t\tif i == 1:\n\t\t\ttry: # try\n\t\t\t\tint(command[0]) # check if turning command[0] into an int returns an error\n\t\t\t\toutputData = True # change ouptutData to True (output result of running simulation)\n\t\t\t\ttimes = int(command[0]) # set times to the value of command[0]\n\t\t\texcept ValueError: # except\n\t\t\t\toutputData = False # keep output data as false\n \n\t\tif command[0] == '#': # check if row is comment\n\t\t\tpass\n \n\t\telif command[0] == 'NOT': # check if row is a NOT gate\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.NOT(QBits,int(command[1])) # if so apply the NOT gate\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \n\t\telif command[0] == 'CNOT': # check if row is a CNOT gate\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.CNOT(QBits, int(command[1]), int(command[2])) # if so apply the CNOT gate\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \n\t\telif command[0] == 'CCNOT': # check if row is a CCNOT gate\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.CCNOT(QBits, int(command[1]), int(command[2]), int(command[3])) # if so apply the CCNOT gate\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \n\t\telif command[0] == 'H' or command[0] == 'Hadamard': # check if row is a Hadamard gate\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.Hadamard(QBits, int(command[1])) # if so apply the Hadamard gate\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \n\t\telif command[0] == 'ZNOT': # check if row is a ZNOT gate\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.ZNOT(QBits) # if so apply the ZNOT gate\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \t\n\t\telif command[0] == 'OracleGA': # check if row is a Oracle gate (oracle from Grovers Algorithm)\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.OracleGA(QBits) # if so apply the Oracle gate (oracle from Grovers Algorithm)\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \n\t\telif command[0] == 'GroverDiffusion' or command[0] == 'GD': # check if row is Grover Diffusion\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.GroverDiffusion(QBits) # if so apply the Grover Diffusion\n\t\t\t\tcheckFirst = False\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \t \n\t\telif command[0] == 'HadamardOverZn' or command[0] == 'HZn': # check if row is Hadamard over Z to the n\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.HZn(QBits, int(command[1])) # if so apply Hadamard over Z to the n\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \t\n\t\telif command[0] == 'ADD': # check if row is add\n\t\t\ttry:\n\t\t\t\tQBits = QBits + ([complex(0, 0)] * ( ( 2**(int(command[1]) + int(program[0]) ) ) - len(QBits) ) ) # if so add n QBits to QBits\n\t\t\t\tData = Data + ([0] * ( ( 2**(int(command[1]) + int(program[0]) ) ) - len(Data) ) ) # and Data\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \t \t\n\t\telif command[0] == 'OracleSA': # check if row is a Oracle gate (oracle from Shor's Algorithm)\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.OracleSA(QBits, int(program[0])) # if so apply the Oracle gate (oracle from Shor's Algorithm)\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n \t\n\t\telif command[0] == 'Measure' or command[0] == 'M': # check if row is Measure\n\t\t\ttry:\n\t\t\t\tQBits = QuantumGates.Measure(QBits, int(command[1]), times) # if so measure\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n\t\t\t \t\n\t\telif command[0] == 'R': # check if row is Rotate\n\t\t\ttry:\n\t\t\t\tarray = program[i][2:]\n\t\t\t\tarray = array.split(',')\n\t\t\t\tarray = [float(i) for i in array]\n\t\t\t\tQBits = QuantumGates.Rotate(QBits, array) # if so rotate\n\t\t\texcept:\n\t\t\t\treturn \"Error: line \"+str(i+1)+\"was missing an input\"\n\n\n\tif outputData == False:\n\t\tsquared = []\n\t\tfor i in range(0, len(QBits)):\n\t\t\tsquared.append((cmath.polar(QBits[i])[0])**2)\n\t\treturn squared\n\telse:\n\t\tfor i in range(0, times):\n\t\t\ttest = random.uniform(0.0,1.0)\n\t\t\tif checkFirst == True:\n\t\t\t\tprob = (cmath.polar(QBits[0])[0])**2\n\t\t\t\tfor i in range(0, len(QBits)):\n\t\t\t\t\tif test < prob and test > (prob - (cmath.polar(QBits[i])[0])**2):\n\t\t\t\t\t\tData[i] = Data[i]+1\n\t\t\t\t\tif i != len(QBits)-1:\n\t\t\t\t\t\tprob = prob + (cmath.polar(QBits[i])[0])**2\n\t\t\t\t\tif QBits[i] == 0:\n\t\t\t\t\t\tData[i] = 0\n\t\t\telse:\t\n\t\t\t\tprob = (cmath.polar(QBits[1])[0])**2\n\t\t\t\tfor i in range(1, len(QBits)):\n\t\t\t\t\tif test < prob and test > (prob - (cmath.polar(QBits[i])[0])**2):\n\t\t\t\t\t\tData[i] = Data[i]+1\n\t\t\t\t\tif i < len(QBits)-1:\n\t\t\t\t\t\tprob = prob + (cmath.polar(QBits[i])[0])**2\n\t\t\t\t\tif QBits[i] == 0:\n\t\t\t\t\t\tData[i] = 0\n\t\treturn Data\n","sub_path":"quantum-sim-flask/Execute.py","file_name":"Execute.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"434966026","text":"import os\nfrom PySide import QtGui, QtCore\n\n\nclass InputWidget(QtGui.QWidget):\n\n def __init__(self, parent=None):\n super(InputWidget, self).__init__(parent)\n\n splitter = QtGui.QSplitter(QtCore.Qt.Vertical)\n g = QtGui.QGridLayout()\n g.addWidget(splitter, 0, 0)\n self.setLayout(g)\n\n #Top half of splitter\n top_w = QtGui.QWidget()\n top_g = QtGui.QGridLayout()\n top_w.setLayout(top_g)\n self.current_scene_title = QtGui.QLabel(\"Current Scene Alembics\")\n self.button_refresh = QtGui.QPushButton(QtGui.QIcon(\":/autoload.png\"), \"\")\n self.button_refresh.setObjectName(\"flat\")\n self.sceneview = QtGui.QListWidget()\n self.versionsview = QtGui.QListWidget()\n self.button_replace = QtGui.QPushButton(\"Replace\")\n self.button_remove = QtGui.QPushButton(\"Remove\")\n self.button_swap_to_gpu = QtGui.QPushButton(\"Swap to GPU\")\n self.button_swap_to_ref = QtGui.QPushButton(\"Swap to Ref\")\n top_g.addWidget(self.current_scene_title, 0, 0, 1, 2)\n top_g.addWidget(self.button_refresh, 0, 2)\n top_g.addWidget(self.sceneview, 1, 0)\n top_g.addWidget(self.versionsview, 1, 1, 1, 2)\n top_g.addWidget(self.button_remove, 2, 0)\n top_g.addWidget(self.button_swap_to_gpu, 3, 0)\n top_g.addWidget(self.button_swap_to_ref, 4, 0)\n top_g.addWidget(self.button_replace, 2, 1, 1, 2)\n splitter.addWidget(top_w)\n\n #Bottom half of splitter\n bot_w = QtGui.QWidget()\n bot_g = QtGui.QGridLayout()\n bot_w.setLayout(bot_g)\n self.input_title = QtGui.QLabel(\"Import New Alembics\")\n self.model = QtGui.QFileSystemModel()\n self.model.setFilter(\n QtCore.QDir.Dirs|\n QtCore.QDir.NoDotAndDotDot|\n QtCore.QDir.AllDirs)\n self.model.setRootPath(QtCore.QDir.rootPath())\n self.folderview = QtGui.QTreeView()\n self.folderview.setHeaderHidden(True)\n self.folderview.setSortingEnabled(True)\n self.folderview.setSelectionMode(\n QtGui.QAbstractItemView.ExtendedSelection)\n self.folderview.setSelectionBehavior(\n QtGui.QAbstractItemView.SelectItems)\n self.folderview.setModel(self.model)\n for i in xrange(1,4):\n self.folderview.hideColumn(i)\n self.folderview.setRootIndex(self.model.index('.'))\n self.selm = self.folderview.selectionModel()\n self.selm.selectionChanged.connect(self.sel_changed)\n\n self.fileview = QtGui.QListWidget()\n self.fileview.setSelectionMode(\n QtGui.QAbstractItemView.ExtendedSelection)\n self.button_import = QtGui.QPushButton(\"Import\")\n self.button_import_gpu = QtGui.QPushButton(\"Import GPU\")\n self.button_reference = QtGui.QPushButton(\"Reference\")\n\n bot_g.addWidget(self.input_title, 0, 0, 1, 3)\n bot_g.addWidget(self.folderview, 1, 0)\n bot_g.addWidget(self.fileview, 1, 1, 1, 2)\n bot_g.addWidget(self.button_import, 2, 1)\n bot_g.addWidget(self.button_import_gpu, 2, 2)\n bot_g.addWidget(self.button_reference, 3, 1, 1, 2)\n splitter.addWidget(bot_w)\n self.root = None\n\n def set_root(self, root):\n if self.root == root:\n return\n self.model.setRootPath(root)\n self.folderview.setRootIndex(self.model.index(root))\n self.fileview.clear()\n self.folderview.clearSelection()\n\n def sel_changed(self, selection):\n\n self.fileview.clear()\n indexes = self.selm.selectedIndexes()\n paths = set([self.model.filePath(i) for i in indexes])\n\n for path in paths:\n files = sorted([f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))])\n\n for f in files:\n file_item = QtGui.QListWidgetItem()\n file_item.setText(os.path.splitext(f)[0])\n file_item.setData(QtCore.Qt.UserRole, os.path.join(path, f))\n self.fileview.addItem(file_item)\n\n def get_selected_abcs(self):\n\n selected_items = self.sceneview.selectedItems()\n\n if not selected_items:\n return []\n\n items = [item.data(QtCore.Qt.UserRole) for item in selected_items]\n return items\n\n def get_selected_files(self):\n\n selected_items = self.fileview.selectedItems()\n\n if not selected_items:\n return []\n\n files = [item.data(QtCore.Qt.UserRole) for item in selected_items]\n return files\n\n\nclass OutputWidget(QtGui.QWidget):\n\n def __init__(self, parent=None):\n super(OutputWidget, self).__init__(parent)\n\n grid = QtGui.QGridLayout()\n self.setLayout(grid)\n\n self.idview = QtGui.QListWidget()\n self.idview.setFocusPolicy(QtCore.Qt.NoFocus)\n self.idview.setSortingEnabled(True)\n self.idview.setSelectionMode(\n QtGui.QAbstractItemView.ExtendedSelection)\n self.geoview = QtGui.QListWidget()\n self.geoview.setFocusPolicy(QtCore.Qt.NoFocus)\n self.geoview.setSortingEnabled(True)\n self.geoview.setSelectionMode(\n QtGui.QAbstractItemView.ExtendedSelection)\n self.button_cache = QtGui.QPushButton(\"Cache IDs\")\n self.button_id = QtGui.QPushButton(\"New ID\")\n self.button_del = QtGui.QPushButton(\"Delete ID\")\n self.button_add = QtGui.QPushButton(\"Add Object\")\n self.button_rem = QtGui.QPushButton(\"Remove Object\")\n\n grid.addWidget(QtGui.QLabel(\"Alembic IDs\"), 0, 0, 1, 2)\n grid.addWidget(QtGui.QLabel(\"Geometry\"), 0, 2, 1, 2)\n grid.addWidget(self.idview, 1, 0, 1, 2)\n grid.addWidget(self.geoview, 1, 2, 1, 2)\n grid.addWidget(self.button_id, 2, 0)\n grid.addWidget(self.button_del, 2, 1)\n grid.addWidget(self.button_add, 2, 2)\n grid.addWidget(self.button_rem, 2, 3)\n grid.addWidget(self.button_cache, 3, 0, 1, 2)\n\n def get_selected_ids(self):\n list_items = self.idview.selectedItems()\n\n ids = {}\n for item in list_items:\n ids[item.text()] = item.data(QtCore.Qt.UserRole)\n\n return ids\n\n def get_selected_geo(self):\n geo_items = self.geoview.selectedItems()\n nodes = [item.data(QtCore.Qt.UserRole) for item in geo_items]\n return nodes\n\n def clear(self):\n self.geoview.clear()\n self.idview.clear()\n\n\nclass WindowHeader(QtGui.QLabel):\n\n def __init__(self, img, parent=None):\n super(WindowHeader, self).__init__(parent)\n self.setObjectName(\"WindowHeader\")\n self.setPixmap(QtGui.QPixmap(QtGui.QImage(img)))\n\nclass UI(QtGui.QDockWidget):\n\n def __init__(self, parent=None):\n super(UI, self).__init__(parent)\n\n self.setObjectName(\"AbcIO\")\n self.setWindowTitle(\"Alembic IO\")\n\n self.setFeatures(\n QtGui.QDockWidget.DockWidgetClosable|\n QtGui.QDockWidget.DockWidgetFloatable|\n QtGui.QDockWidget.DockWidgetMovable)\n self.setFloating(True)\n self.setAllowedAreas(\n QtCore.Qt.LeftDockWidgetArea|\n QtCore.Qt.RightDockWidgetArea)\n\n with open(os.path.join(os.path.dirname(__file__), \"abcio.css\")) as f:\n self.setStyleSheet(f.read())\n\n self.w = QtGui.QWidget()\n self.l = QtGui.QGridLayout()\n self.l.setContentsMargins(0, 0, 0, 0)\n self.l.setSpacing(0)\n self.l.setRowStretch(1, 1)\n self.w.setLayout(self.l)\n self.setWidget(self.w)\n\n img_path = os.path.join(os.path.dirname(__file__), \"alembic.png\")\n self.window_header = WindowHeader(img=img_path.replace(\"\\\\\", \"/\"))\n\n self.tabs = QtGui.QTabWidget(self)\n self.tabs.setDocumentMode(True)\n tb = self.tabs.tabBar()\n tb.setDrawBase(False)\n tb.setExpanding(True)\n\n self.in_widget = InputWidget(self.tabs)\n self.out_widget = OutputWidget(self.tabs)\n #add tabs\n self.tabs.addTab(self.in_widget, \"Input\")\n self.tabs.addTab(self.out_widget, \"Output\")\n\n self.status_bar = QtGui.QStatusBar()\n self.l.addWidget(self.window_header)\n self.l.addWidget(self.tabs)\n self.l.addWidget(self.status_bar)\n","sub_path":"abcio/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":8233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368443969","text":"\"\"\"\nTest Suite for NUCLEOTIDE_STATS_FROM_FASTA.py\n\"\"\"\nfrom os import path\nimport pytest\n\n# pylint: disable=C0116\n# pylint: disable=C0103\n\nfrom nucleotide_statistics_from_fasta import _check_size_of_lists, \\\n get_header_and_sequence_lists, get_fh, print_sequence_stats, _get_accession, \\\n _get_nt_occurrence\n\nLIST_HEADER = ['>EUG123', '>EUG124', \">EUG126\", \">EUG128\"]\nLIST_SEQS = ['ACGTAC', 'ACGTCC', 'ACCGGT', 'AAAACT']\nLIST_SEQS_NOT_EQUAL = ['ACGTAC', 'ACGTCC', 'ACCGGT']\nFILE_OUT = get_fh(\"test_influenza_stats.txt\", \"w\")\nFILE_HANDLE = get_fh(\"influenza.fasta\", \"r\")\n\n\ndef test_print_sequence_stats():\n print_sequence_stats(LIST_HEADER, LIST_SEQS, FILE_OUT)\n assert path.exists(\"test_influenza_stats.txt\")\n\n\ndef test__get_accession():\n assert _get_accession(LIST_HEADER[0]) == \"EUG123\"\n assert _get_accession(LIST_HEADER[1]) == \"EUG124\"\n\n\ndef test__get_nt_occurrence():\n assert _get_nt_occurrence(\"A\", LIST_SEQS[3]) == 4\n assert _get_nt_occurrence(\"C\", LIST_SEQS[2]) == 2\n with pytest.raises(SystemExit):\n _get_nt_occurrence(\"Y\", LIST_SEQS[3])\n\n\ndef test__check_size_of_lists():\n assert _check_size_of_lists(LIST_HEADER, LIST_SEQS)\n with pytest.raises(SystemExit):\n _check_size_of_lists(LIST_HEADER, LIST_SEQS_NOT_EQUAL)\n\n\ndef test_get_header_and_sequence_lists():\n header_list, seq_list = get_header_and_sequence_lists(FILE_HANDLE)\n assert len(header_list) == 91830\n assert len(seq_list) == 91830\n\n\ndef test_get_fh_4_IOError():\n # does it raise IOError\n # this should exit\n with pytest.raises(SystemExit):\n get_fh(\"does_not_exist.txt\", \"r\")\n\n\ndef test_get_fh_4_ValueError():\n # does it raise IOError\n # this should exit\n with pytest.raises(SystemExit):\n get_fh(\"ss.influenza.fasta\", \"rrr\")\n","sub_path":"file_merger_py/test_nucleotide_statistics_from_fasta.py","file_name":"test_nucleotide_statistics_from_fasta.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573206628","text":"# coding=utf-8\n# Time: 2019-10-30-16:34 \n# Author: dongshichao\n\n'''\n349.两个数组的交集\n给定两个数组,编写一个函数来计算它们的交集。\n\n示例 1:\n\n输入: nums1 = [1,2,2,1], nums2 = [2,2]\n输出: [2]\n示例 2:\n\n输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]\n输出: [9,4]\n说明:\n\n输出结果中的每个元素一定是唯一的。\n\n\n'''\n\nclass Solution(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n count={}\n res=[]\n for i in nums1:\n if i in count:\n count[i] +=1\n else:\n count[i] = 1\n for i in nums2:\n if i in count and count[i] > 0:\n res.append(i)\n count[i] -=1\n\n return list(set(res))\n","sub_path":"list/intersection.py","file_name":"intersection.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375014736","text":"import pandas as pd\n\n\n#MODEL='M_201028_FU_23'\nMODEL='M_201028_FU_M10'\nTOPIC='10'\n\nfile = 'M10_T'+TOPIC+'_coded.csv'\ndf = pd.read_csv('./coded/'+file, engine='python')\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style(\"darkgrid\")\n\nsns.distplot(df['P'], hist=False, rug=False, kde=True,\n bins=100, color = 'blue', label = 'All',\n hist_kws={'edgecolor':'black'},\n kde_kws = {'shade': True, 'linewidth': 2})\n\ndf_M = df[(df['CODING'] == \"M\")]\nsns.distplot(df_M['P'], hist=False, rug=False, kde=True,\n bins=100, color = 'green', label = 'Marketing',\n hist_kws={'edgecolor':'black'})\n\ndf_E = df[(df['CODING'] == \"E\")]\nsns.distplot(df_E['P'], hist=False, rug=False, kde=True,\n bins=100, color = 'yellow', label = 'Entrepreneur',\n hist_kws={'edgecolor':'black'})\n\ndf_Y = df[(df['CODING'] == \"Y\")]\nsns.distplot(df_Y['P'], hist=False, rug=False, kde=True,\n bins=100, color = 'red', label = 'Marketing & Entrepreneur',\n hist_kws={'edgecolor':'black'})\n\n#Add legend\nplt.legend(prop={'size': 10}, title = 'Set')\n\n# Add labels\nplt.title('Match probabilities Coded, model= ' + MODEL + ' , T= ' + TOPIC)\nplt.xlabel('Probability')\nplt.ylabel('Articles')\n\nplt.tight_layout()\nplt.show()\n","sub_path":"Python/tools/coded_distribution_plot.py","file_name":"coded_distribution_plot.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206450709","text":"import boto3\nfrom datetime import datetime\nimport os\n\n\ndef image_file(send, farm_id):\n now = datetime.now()\n year = str(now.year)\n month = str(now.month)\n day = str(now.day)\n hour = str(now.hour)\n minute = str(now.minute)\n second = str(now.second)\n if len(month) == 1:\n month = '0' + month\n if len(day) == 1:\n day = '0' + day\n if len(hour) == 1:\n hour = '0' + hour\n if len(minute) == 1:\n minute = '0' + minute\n if len(second) == 1:\n second = '0' + second\n\n if os.path.isfile('image.jpg'):\n os.system('rm image.jpg')\n os.system('fswebcam -r 1280x720 image.jpg')\n file_name = 'image.jpg'\n bucket_name = 'tomato-growth-images'\n realtime = farm_id+\"_\"+year+\"-\"+month+\"-\"+day+\"[\"+hour+\"-\"+minute+\"-\"+second+'].jpg'\n measure_time = hour\n if send == 1:\n realtime = farm_id+\"_\"+\"9999-99-99[99-99-99].jpg\"\n measure_time = \"99\"\n s3 = boto3.client('s3')\n \n s3.upload_file(file_name, bucket_name, realtime)\n print(realtime + ' upload success')\n\n return realtime, measure_time\n\n","sub_path":"mqtt/pi/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313166289","text":"from django.urls import path\nfrom .views import *\n\napp_name = 'todoapp'\n\nurlpatterns = [\n path('', index, name='index'),\n path('update/', TaskUpdateView.as_view(), name='update'),\n path('delete/', TaskDeleteView.as_view(), name='delete'),\n \n]\n","sub_path":"todoapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412694849","text":"'''\r\n1]Install python3 first\r\n2]Install pillow - go to cmd and type \"pip install Pillow\"\r\n3]Install opencv - go to cmd and type \"pip install opencv-python\"\r\n'''\r\n\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nimport cv2 as cv\r\nfrom PIL import ImageTk, Image\r\n\r\nWIDTH = 640\r\nHEIGHT = 480\r\n\r\nslot1 = False\r\nslot2 = False\r\nimportedimage = None\r\nimportedimagearray = None\r\npencilsketch = None\r\npencilsketcharray = None\r\n\r\ndef resizeImage(inputimage):\r\n global WIDTH\r\n global HEIGHT\r\n return cv.resize(inputimage, (WIDTH, HEIGHT))\r\n\r\ndef displayInputImageOnWidget(givenimage, widget):\r\n rgbimage = cv.cvtColor(givenimage, cv.COLOR_BGR2RGB)\r\n rgbimage = ImageTk.PhotoImage(image = Image.fromarray(rgbimage))\r\n global importedimagearray\r\n importedimagearray = rgbimage\r\n widget.create_image(0,0, anchor=\"nw\", image=rgbimage)\r\n\r\ndef displayPencilSketchImageOnWidget(givenimage, widget):\r\n rgbimage = cv.cvtColor(givenimage, cv.COLOR_GRAY2RGB)\r\n rgbimage = ImageTk.PhotoImage(image = Image.fromarray(rgbimage))\r\n global pencilsketcharray\r\n pencilsketcharray = rgbimage\r\n widget.create_image(0,0, anchor=\"nw\", image=rgbimage)\r\n\r\ndef loadImageFromSystem(wind):\r\n wind.withdraw() # hides window\r\n filepath = filedialog.askopenfilename(initialdir=\"\", title=\"Select image to import\", filetypes=((\"All Files\", \"*.*\"), (\"JPG files\", \"*.jpg\"), (\"JPEG files\", \"*.jpeg\"), (\"PNG files\", \"*.png\"), (\"BMP files\", \"*.bmp\"), (\"TIF files\", \"*.tif\"), (\"GIF files\", \"*.gif\"))) # asks user to open a file\r\n wind.deiconify() # shows window\r\n if filepath == \"\": # no file selected\r\n return None\r\n newimage = cv.imread(filepath)\r\n if newimage is None: # selected file failed to open\r\n throwInvalidImageError()\r\n return None\r\n return resizeImage(newimage)\r\n\r\ndef throwInvalidImageError():\r\n messagebox.showerror(\"Error in import image\", \"File is not a image or image is invalid or file is not accessible.\")\r\n\r\ndef getPencilSketch(inputimage):\r\n res, _ = cv.pencilSketch(inputimage)\r\n global pencilsketch\r\n pencilsketch = res\r\n return res\r\n\r\ndef time_to_do_pencil_sketch(givenimage, widget):\r\n res = getPencilSketch(givenimage)\r\n displayPencilSketchImageOnWidget(res, widget)\r\n\r\ndef loadImageAndDisplay(widget1, widget2, wind):\r\n ret = loadImageFromSystem(wind)\r\n if ret is not None:\r\n global importedimage\r\n global slot1\r\n global slot2\r\n importedimage = ret\r\n displayInputImageOnWidget(ret, widget1)\r\n slot1 = True\r\n slot2 = False\r\n time_to_do_pencil_sketch(ret, widget2)\r\n slot2 = True\r\n\r\ndef save_pencil_sketch(wind):\r\n global slot1\r\n global slot2\r\n if slot1 is False or slot2 is False:\r\n return\r\n wind.withdraw()\r\n filename = filedialog.asksaveasfilename(defaultextension=\".jpg\", filetypes=[(\"JPG files\", \"*.jpg\"), (\"JPEG files\", \"*.jpeg\"), (\"PNG files\", \"*.png\"), (\"BMP files\", \"*.bmp\"), (\"TIF files\", \"*.tif\")])\r\n wind.deiconify()\r\n if filename != \"\":\r\n global pencilsketch\r\n global WIDTH\r\n global HEIGHT\r\n saveimage = cv.resize(pencilsketch, (WIDTH, HEIGHT), interpolation = cv.INTER_LANCZOS4)\r\n cv.imwrite(filename, saveimage)\r\n\r\nwindow = Tk()\r\nwindow.title(\"Pencil Sketch Maker\")\r\nwindow.geometry(\"1300x520+100+100\")\r\nframe = Frame(window)\r\nframe.pack(fill=BOTH, expand=1)\r\nimageslot1 = Canvas(frame, bg=\"gray\", width=WIDTH, height=HEIGHT)\r\nimageslot1.grid(row=0, column=0)\r\nimageslot2 = Canvas(frame, bg=\"gray\", width=WIDTH, height=HEIGHT)\r\nimageslot2.grid(row=0, column=1)\r\nbutton1 = Button(frame, bd=\"5\", text=\"Import image\", command=lambda:loadImageAndDisplay(imageslot1, imageslot2, window))\r\nbutton1.grid(row=1, column=0)\r\nbutton2 = Button(frame, bd=\"5\", text=\"Save image\", command=lambda:save_pencil_sketch(window))\r\nbutton2.grid(row=1, column=1)\r\nwindow.mainloop()\r\n","sub_path":"pencilsketch.py","file_name":"pencilsketch.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169675047","text":"import matplotlib as mpl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nK = 0.3\n\nplt.xlabel('q')\nplt.ylabel('p')\n\nfor k in range(10):\n for j in range(10):\n p = [k/10]\n q = [j/10]\n for i in range(1000): \n p.append((p[i]+(K/(2*math.pi))*math.sin(2*math.pi*q[i]))%(1))\n q.append((q[i]+p[i+1])%(1))\n plt.plot(q[1:1000], p[1:1000],'r,')\n \nplt.legend()\nplt.show()\n","sub_path":"chaos/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176397439","text":"import collections\nimport string\n\n\nclass Solution:\n res = 0\n\n def numTilePossibilities(self, tiles: str) -> int:\n self.res = 0\n n = len(tiles)\n d = collections.Counter(tiles)\n f = [1] * (n + 1)\n for i in range(1, n + 1):\n f[i] = f[i - 1] * i\n\n def comb(K, N):\n return f[N] // (f[K] * f[N - K])\n\n def calculate(dChoose):\n count = []\n for k in dChoose:\n if dChoose[k] > 0:\n count.append(dChoose[k])\n if not count:\n return 0\n N = sum(count)\n ans = 1\n for i in range(len(count) - 1):\n ans *= comb(count[i], N)\n N -= count[i]\n return ans\n\n chars = sorted(d)\n\n def backtrack(choose, idx):\n self.res += calculate(choose)\n for j in range(idx, len(chars)):\n k = chars[j]\n for i in range(1, d[k] + 1):\n choose[k] = i\n backtrack(choose, j + 1)\n del choose[k]\n\n backtrack({}, 0)\n return self.res\n\n\ns = Solution()\nprint(s.numTilePossibilities(\"AABC\"))\nprint(s.numTilePossibilities(\"AAB\"))\nprint(s.numTilePossibilities(\"AAABBC\"))\n","sub_path":"leetcode/2021/letter-tile-possibilities.py","file_name":"letter-tile-possibilities.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"260646269","text":"import glob\nimport sys\nimport os\n\nfrom numpy import f2py\nfrom numba import jit\nimport numpy as np\n\nsys.path.insert(0, \"../../02-sensitivity-analysis/python\")\n\nfrom ishigami import compute_simulation_total_effect\nfrom ishigami import compute_simulation_main_effect\nfrom ishigami import evaluate_ishigami_readable\nfrom ishigami import evaluate_ishigami_numba\n\ncwd = os.getcwd()\n\nos.chdir(os.path.dirname(__file__))\nif not glob.glob(\"ishigami_f2py*\"):\n src = open('ishigami.f90', 'rb').read()\n f2py.compile(src, 'ishigami_f2py', \"\", extension='.f90')\nos.chdir(cwd)\n\n\n@jit(nopython=True)\ndef evaluate_ishigami_numba_loop(inputs):\n \"\"\"Evaluate Ishigami function.\n\n Parameters\n ----------\n\n inputs : numpy.ndarray\n Evaluation points for Ishigami equation.\n\n Returns\n -------\n\n results : numpy.ndarray\n Results from evaluation of `inputs`.\n\n \"\"\"\n results = np.empty(inputs.shape[0])\n\n for i in range(inputs.shape[0]):\n results[i] = evaluate_ishigami_numba(inputs[i, :])\n\n return results\n\n\ndef evaluate_ishigami_f2py_loop(inputs):\n \"\"\"Evaluate Ishigami function.\n\n Parameters\n ----------\n\n inputs : numpy.ndarray\n Evaluation points for Ishigami equation.\n\n Returns\n -------\n\n results : numpy.ndarray\n Results from evaluation of `inputs`.\n\n \"\"\"\n from ishigami_f2py import evalute_ishigami_f2py\n results = evalute_ishigami_f2py(inputs, inputs.shape[0])\n\n return results\n\n\ndef evaluate_ishigami_readable_loop(inputs):\n \"\"\"Evaluate Ishigami function.\n\n Parameters\n ----------\n\n inputs : numpy.ndarray\n Evaluation points for Ishigami equation.\n\n Returns\n -------\n\n results : numpy.ndarray\n Results from evaluation of `inputs`.\n\n \"\"\"\n results = list()\n\n for input_ in inputs:\n results.append(evaluate_ishigami_readable(input_))\n\n return np.array(results)\n\n\ndef task_mp_no_communication(num_outer, num_inner, which):\n \"\"\"Compute main effect by simulation.\n\n This function computes the main effects by simulation for one input parameter but does not\n return anything.\n\n Parameters\n ----------\n\n num_outer : integer\n Number of draws for outer simulation loop.\n\n num_inner : integer\n Number of draws for inner simulation loop.\n\n which : integer\n Position of main effect variable.\n\n Returns\n -------\n\n None\n\n \"\"\"\n _ = compute_simulation_main_effect(num_outer, num_inner, which)\n\n\ndef task_mp_queue(num_outer, num_inner, qout, which):\n \"\"\"Compute main effect by simulation.\n\n This function computes the main effects by simulation for one input parameter and puts the\n result into `qout` container.\n\n Parameters\n ----------\n\n num_outer : integer\n Number of draws for outer simulation loop.\n\n num_inner : integer\n Number of draws for inner simulation loop.\n\n which : integer\n Position of main effect variable.\n\n qout : multiprocessing.Queue\n Queue container to collect results. A tuple with the identifier of the input parameter\n and the main effect is put into the queue.\n\n Returns\n -------\n\n None\n\n \"\"\"\n rslt = compute_simulation_main_effect(num_outer, num_inner, which)\n qout.put((which, rslt))\n\n\ndef task_mp_management(num_outer, num_inner, task):\n \"\"\"Compute sensitivity indices by simulation.\n\n This function computes the sensitivity indices for one input parameter but does not return\n anything.\n\n Parameters\n ----------\n\n num_outer : integer\n Number of draws for outer simulation loop.\n\n num_inner : integer\n Number of draws for inner simulation loop.\n\n task : tuple\n Task information. The first element determines whether to simulate the main or total\n effect, while the second indicates the input parameters.\n\n Returns\n -------\n\n None\n\n \"\"\"\n label, which = task\n if label == \"main\":\n return compute_simulation_main_effect(num_outer, num_inner, which)\n elif label == \"total\":\n return compute_simulation_total_effect(num_outer, num_inner, which)\n else:\n raise NotImplementedError\n","sub_path":"selected-topics/03-high-performance-python/src/auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495081669","text":"import numpy as np\nimport torch\nfrom torchvision import transforms\nfrom torchvision import datasets\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, train=True):\n self._train = train\n self._dataset = datasets.cifar.CIFAR100(\n 'data',\n train=train,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n )\n\n self._targets = np.array(self._dataset.targets)\n self.set_classes_range(0, 10)\n\n def set_classes_range(self, low_range, high_range):\n self._low_range = low_range\n self._high_range = high_range\n\n if low_range != high_range:\n idxes = np.where(np.logical_and(\n self._targets >= low_range,\n self._targets < high_range\n ))[0]\n else:\n idxes = np.where(self._targets == low_range)[0]\n\n self._mapping = {\n fake_idx: real_idx\n for fake_idx, real_idx in enumerate(idxes)\n }\n\n def set_examplars(self, idxes):\n self._mapping.update({\n fake_idx: real_idx\n for fake_idx, real_idx in zip(range(len(self._mapping), len(idxes)), idxes)\n })\n\n\n def get_true_index(self, fake_idx):\n return self._mapping[fake_idx]\n\n def __len__(self):\n return len(self._mapping)\n\n def __getitem__(self, idx):\n real_idx = self._mapping[idx]\n return self._dataset[real_idx]\n\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163554761","text":"# -*- coding: utf8 -*-\n__author__ = 'Jagerzhang'\nimport requests\nimport json\nimport time\nimport hmac\nimport hashlib\nimport base64\n \n \nclass KongHmac():\n \"\"\"生成Kong的Hmac鉴权头部\n 仅适配了hmac-sha256加密方式\n \"\"\"\n \n def sha256_digest_base64(self, content):\n \"\"\" sha256计算内容摘要\n \"\"\"\n content_bytes = bytes(content,\"utf-8\")\n # content_bytes = content\n content_sha256_digest = hashlib.sha256(content_bytes).digest()\n content_sha256_digest_base64_decode = base64.b64encode(content_sha256_digest).decode()\n content_digest = 'SHA-256={}'.format(content_sha256_digest_base64_decode)\n return content_digest\n \n def hmac_sha256_base64(self, secret, str_to_sign):\n \"\"\" 生成sha256加密串\n \"\"\"\n signature = hmac.new(bytes(secret,\"utf-8\"), bytes(str_to_sign,\"utf-8\"),\n digestmod=hashlib.sha256).digest()\n str_base64 = base64.b64encode(signature).decode()\n return str_base64\n \n def get_auth_header(self, username, secret, body):\n # 生成body的sha256加密串\n body_digest = self.sha256_digest_base64(body)\n \n # 生成当前GMT时间,注意格式不能改变,必须形如:Wed, 14 Aug 2019 09:09:28 GMT\n gm_time = time.strftime(\"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime())\n \n # 拼装待签名的数据\n str_to_sign = \"date: {}\\ndigest: {}\".format(gm_time, body_digest)\n \n # 生成签名\n signature = self.hmac_sha256_base64(secret, str_to_sign)\n \n # 拼装headers\n headers = {\n 'Authorization': 'hmac username=\\\"{}\\\", algorithm=\\\"hmac-sha256\\\", headers=\\\"date digest\\\", '\n 'signature=\\\"{}\\\"'.format(username, signature),\n 'Digest': body_digest,\n 'Date': gm_time}\n return headers\n \n \nif __name__ == \"__main__\":\n # 根据实际情况修改\n username = \"\"\n secret = \"\"\n sdn_api = \"http://tix-gn.sdn-api.tencent-cloud.com/restconf/operations/route-adjust:get-adjust-history-list\"\n param = {\"input\": {\n \"startTime\": \"2020-02-01-00-00-00\",\n \"endTime\": \"2020-02-13-18-00-00\"\n }}\n kong_hmac = KongHmac()\n # 请求的参数和拿去生成签名的参数保持一致,否则内容校验会失败:\n param = json.dumps(param)\n headers = kong_hmac.get_auth_header(username=username, secret=secret, body=param)\n headers[\"Content-Type\"] = \"application/json\"\n resp = requests.post(url=sdn_api, data=param, headers=headers)\n print (resp.text)\n","sub_path":"python/kong_hmac_py3.py","file_name":"kong_hmac_py3.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187926542","text":"from django.contrib import admin\nfrom inventory.models import Product, Category, Store\n\nclass CategoryAdmin(admin.ModelAdmin):\n\tlist_display = ('name', 'created_at', 'updated_at',)\n\tlist_display_links = ('name',)\n\tlist_per_page = 20\n\tordering = ['name']\n\tsearch_fields = ('name', 'description')\n\nadmin.site.register(Category)\n\nclass ProductAdmin(admin.ModelAdmin):\n\tlist_display = ('name', 'price', 'old_price', 'created_at', 'updated_at',)\n\tlist_display_links = ('name',)\n\tlist_per_page = 50\n\tordering = ['-created_at']\n\tsearch_fields = ('name', 'description')\n\nadmin.site.register(Product,)\n\nclass StoreAdmin(admin.ModelAdmin):\n\tlist_display = ('name', 'email',)\n\tlist_display_links = ('name',)\n\tlist_per_page = 10\n\tordering = ['name']\n\tsearch_fields = ('name', 'email', 'city', 'state')\n\nadmin.site.register(Store,)","sub_path":"ordergroove/store/inventory/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358990094","text":"#!/usr/bin/python\n# coding: utf8\nfrom pymongo import MongoClient\nimport os\n\n\"\"\"\nAdding Attributes\n=================\n- road\n- suffix\n- direction\n\"\"\"\n\nclient = MongoClient()\n\nlookup_suffix = {\n 'GT': 'Gate',\n 'DRWY': 'Driveway',\n 'DR': 'Drive',\n 'DR.': 'Drive',\n 'PL': 'Place',\n 'CIR': 'Circle',\n 'CIRC': 'Circle',\n 'CREEK': 'Creek',\n 'PRIV': 'Private',\n 'RD': 'Road',\n 'RD.': 'Road',\n 'CRES': 'Crescent',\n 'AVE': 'Avenue',\n 'AVE.': 'Avenue',\n 'AV.': 'Avenue',\n 'AV': 'Avenue',\n 'GRV': 'Grove', \n 'GR': 'Grove',\n 'GDN': 'Garden',\n 'PK': 'Park',\n 'TERR': 'Terrace', \n 'TER': 'Terrace',\n 'CRT': 'Court',\n 'CT': 'Court',\n 'RDG':'Ridge',\n 'TR': 'Trail',\n 'TRL': 'Trail',\n 'BLVD': 'Boulevard',\n 'HTS': 'Heights',\n 'HGTS': 'Heights',\n 'SQ': 'Square',\n 'ST': 'Street',\n 'PKWY': 'Parkway',\n 'STE': 'Saint',\n 'STE.': 'Saint',\n 'PKY': 'Parkway',\n 'GROVE': 'Grove',\n 'WAY': 'Way',\n 'RIDGE': 'Ridge',\n 'LANE': 'Lane',\n 'SIDE': 'Side',\n 'PARK': 'Park',\n 'CREST': 'Crest',\n}\n\nlookup_direction = {\n 'E': 'East',\n 'W': 'West',\n 'N': 'North',\n 'S': 'South',\n}\n\nlookup_table = {}\nlookup_table.update(lookup_suffix)\nlookup_table.update(lookup_direction)\n\ndef search(line, lookup):\n for word in line.split(' '):\n word = word.upper()\n if word in lookup:\n return lookup[word]\n\ndef strip_road(line, lookup):\n words = line.split(' ')\n container = [words[0]]\n for word in words[1:]:\n word = word.upper()\n if not word in lookup:\n container.append(word)\n if container:\n return ' '.join(container)\n else:\n # Only return the first word\n if words:\n return words[0] \n\n\nfor item in client.ottawa.permits.find({}):\n road = item.get('ROAD')\n if road:\n suffix = search(road, lookup_suffix)\n direction = search(road, lookup_direction)\n road = strip_road(road, lookup_table)\n \n if suffix:\n item['suffix'] = suffix\n if direction:\n item['direction'] = direction\n if road:\n item['road'] = road.capitalize()\n client.ottawa.permits.save(item)","sub_path":"Python/4_add_suffix.py","file_name":"4_add_suffix.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25382402","text":"import os\nimport datetime\nimport requests\nimport json\nimport pandas as pd\nimport util\nfrom time import sleep\n\n\n# with this program you can insert data periodically simulating a sensor measure\nif __name__ == \"__main__\":\n\n headers, str_token = util.connection()\n\n print(\"Token: \" + str_token)\n util.print_things(\"metric\", str_token)\n\n metric = input(\"Choose the metricID -> \")\n url = \"http://252.3.243.35:8041/v1/metric/\" + metric + \"/measures\"\n\n os.system(\"ls data/\")\n file_input = input(\"Insert the path of the .csv file -> \")\n df = pd.read_csv(\"data/\" + file_input)\n\n threshold = input(\"How many data do you want to insert? (number of values) -> \")\n if int(threshold) < 0:\n print(\"threshold value not valid..\\n Closing..\\n\")\n exit(-1)\n\n how_many = 0\n\n for row in df.itertuples():\n current_date = datetime.datetime.now().isoformat()\n timestamp, value = [], []\n value.append(float(row[2]))\n timestamp.append(current_date)\n how_many = how_many + 1\n measures = [{\"timestamp\": t, \"value\": v} for t, v in zip(timestamp, value)]\n r = requests.post(url, data=json.dumps(measures), headers=headers)\n if str(r.status_code) == \"202\":\n print(\"current_date: \" + current_date + \", value: \" + str(row[2]))\n else:\n print(\"Error code: \" + str(r.status_code))\n\n if how_many == int(threshold):\n break\n else:\n sleep(2)\n\n\n\n","sub_path":"ProgettoVallati/producer_real_time.py","file_name":"producer_real_time.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192891404","text":"\"\"\"Tests for the distance module.\"\"\"\n\nfrom auvsi_suas.models import distance\nfrom django.test import TestCase\n\n\nclass TestHaversine(TestCase):\n \"\"\"Tests the haversine code correctness.\"\"\"\n def assertCloseEnough(self,\n distance_actual,\n distance_received,\n threshold=0.003048): # 10 feet in km\n \"\"\"Determines whether the km distances given are close enough.\"\"\"\n self.assertLessEqual(abs(distance_actual - distance_received),\n threshold)\n\n def evaluate_inputs(self, input_output_list):\n \"\"\"Evaluates a list of inputs and outputs.\"\"\"\n for (lon1, lat1, lon2, lat2, distance_actual) in input_output_list:\n distance_received = distance.haversine(lon1, lat1, lon2, lat2)\n self.assertCloseEnough(distance_actual, distance_received)\n\n def test_zero_distance(self):\n \"\"\"Tests various latitudes and longitudes which have zero distance.\"\"\"\n self.evaluate_inputs([\n # (lon1, lat1, lon2, lat2, dist_actual)\n (0, 0, 0, 0, 0),\n (1, 1, 1, 1, 0),\n (-1, -1, -1, -1, 0),\n (1, -1, 1, -1, 0),\n (-1, 1, -1, 1, 0),\n (76, 42, 76, 42, 0),\n (-76, 42, -76, 42, 0),\n ]) # yapf: disable\n\n def test_hemisphere_distances(self):\n \"\"\"Tests distances in each hemisphere.\"\"\"\n self.evaluate_inputs([\n # (lon1, lat1, lon2, lat2, dist_actual)\n (-73, 40, -74, 41, 139.6886345468666),\n (73, 40, 74, 41, 139.6886345468667),\n (73, -40, 74, -41, 139.6886345468667),\n (-73, -40, -74, -41, 139.68863454686704),\n ]) # yapf: disable\n\n def test_competition_distances(self):\n \"\"\"Tests distances representative of competition amounts.\"\"\"\n self.evaluate_inputs([\n # (lon1, lat1, lon2, lat2, dist_actual)\n (-76.428709, 38.145306, -76.426375, 38.146146, 0.22446),\n (-76.428537, 38.145399, -76.427818, 38.144686, 0.10045),\n (-76.434261, 38.142471, -76.418876, 38.147838, 1.46914),\n ]) # yapf: disable\n","sub_path":"server/auvsi_suas/models/distance_test.py","file_name":"distance_test.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152500407","text":"import cv2\nimport dlib\nfrom scipy.spatial import distance\nfrom imutils import face_utils\nimport face\n\nEAR_THRESH = 0.22\n# EYE_AR_CONSEC_FRAMES = 10\n\n\n\ndef eye_aspect_ratio(eye):\n\t# compute the euclidean distances between the two sets of\n\t# vertical eye landmarks (x, y)-coordinates\n\tA = distance.euclidean(eye[1], eye[5])\n\tB = distance.euclidean(eye[2], eye[4])\n\n\t# compute the euclidean distance between the horizontal\n\t# eye landmark (x, y)-coordinates\n\tC = distance.euclidean(eye[0], eye[3])\n\n\t# compute the eye aspect ratio\n\tear = (A + B) / (2.0 * C)\n \n\t# return the eye aspect ratio\n\treturn ear\n\ndef eye_is_closed(shape):\n (leftEye_s, leftEye_e) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n (rightEye_s, rightEye_e) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n\n leftEye = shape[leftEye_s:leftEye_e]\n rightEye = shape[rightEye_s:rightEye_e]\n\n ear = (eye_aspect_ratio(leftEye) + eye_aspect_ratio(rightEye)) / 2.0\n\n isclosed = ear < EAR_THRESH\n return isclosed\n\n\ndef main():\n cap = cv2.VideoCapture(0)\n if not cap.isOpened():\n print('Error: Unable to initialize Video Source.')\n return\n predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')\n COUNTER = BLINK_COUNT = 0\n while cap.isOpened():\n\n ok, frame = cap.read()\n\n if ok:\n\n face_rects = face.detect_faces(frame)\n\n for rect in face_rects:\n shape = predictor(frame, rect)\n shape = face_utils.shape_to_np(shape)\n\n if eye_is_closed(shape):\n COUNTER += 1\n else:\n if COUNTER is not 0:\n COUNTER = 0\n BLINK_COUNT += 1\n print('Blinked')\n\n print(BLINK_COUNT)\n cv2.imshow('Output', frame)\n key = cv2.waitKey(50)\n if key in [27, ord('q')]:\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()","sub_path":"blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31551940","text":"from net.grinder.script.Grinder import grinder\r\nimport java.util.concurrent as concurrent\r\nfrom threading import RLock\r\n\r\ndata_pool = {}\r\ndata_pool_lock = RLock()\r\n\r\ndef read_pool_data(key):\r\n queue = None\r\n data_pool_lock.acquire()\r\n try:\r\n if not key in data_pool:\r\n queue = data_pool[key] = concurrent.LinkedBlockingQueue()\r\n else:\r\n queue = data_pool[key]\r\n finally:\r\n data_pool_lock.release()\r\n grinder.getLogger().output('waiting for elements from the queue for ' + key)\r\n dic = queue.poll(1, concurrent.TimeUnit.DAYS)\r\n grinder.getLogger().output('popped ' + str(dic) + ' from the queue for ' + key)\r\n return dic\r\n\r\ndef write_pool_data(key, dic):\r\n queue = None\r\n data_pool_lock.acquire()\r\n try:\r\n if not key in data_pool:\r\n queue = data_pool[key] = concurrent.LinkedBlockingQueue()\r\n else:\r\n queue = data_pool[key]\r\n finally:\r\n data_pool_lock.release()\r\n grinder.getLogger().output('pushing ' + str(dic) + ' into the queue for ' + key)\r\n queue.add(dic)\r\n\r\n","sub_path":"misc/load-testing/src/pool_data.py","file_name":"pool_data.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429261809","text":"\nimport random,sys\noptions = [\n \"rock\",\n \"paper\",\n \"scissors\"\n]\n\nplayer_score = 0\ncomputer_score = 0\n \nplayer_choice = \"\"\ncomputer_choice = \"\" \ndef game(player_choice, computer_choicet, computer_score, player_score):\n \n \n while True:\n\n player_choice = input(\" Choose between rock, paper and scissors\").lower().strip()\n \n if player_choice not in options:\n print(\"Error, choose between rock, paper or scissors\")\n else:\n computer = options[random.randint(0, len(options)-1)]\n computer_choice = computer\n print(\"player choice is \" + player_choice + \" and computer_choice is \" + computer_choice)\n \n # returning certain numbers \n if player_choice == computer_choice:\n print(\"Good game is a draw \")\n elif player_choice == \"rock\" and computer_choice == \"scissors\":\n print(\"You win\")\n player_score = +1\n elif player_choice == \"paper\" and computer_choice == \"scissors\":\n print(\"Computer win\")\n computer_score = +1\n elif player_choice == \"scissors\" and computer_choice == \"paper\":\n print(\"You win\")\n player_score = +1\n elif player_choice == \"rock\" and computer_choice == \"paper\":\n print(\"Computer win\")\n computer_score = +1\n elif player_choice == \"scissors\" and computer_choice == \"rock\":\n print(\"Computer win\")\n computer_score = +1\n elif player_choice == \"paper\" and computer_choice == \"rock\":\n print(\"You win\")\n player_score = +1\n else:\n print(\"invalid game\")\n \n \n if computer_score > player_score:\n print(\"The score of the game for the computer is \" + str(computer_score) + \"The score of the game for the player is \" + str(player_score) + \" computer wins\")\n elif computer_score == player_score:\n print(\"The score of the game for the computer is \" + str(computer_score) + \"The score of the game for the player is \" + str(player_score) + \"It is a draw\")\n else:\n print(\"The score of the game for the computer is \" + str(computer_score) + \"The score of the game for the player is \" + str(player_score) + \"You win\")\ndef keepplaying():\n while True:\n question = input(\"Do you wanna play Rock, paper and scissors?, Answer yes or no, press q to exit\").lower().strip()\n if question == \"yes\":\n game(player_choice,computer_choice, computer_score, player_score)\n return question\n if question != \"yes\" or question != \"no\":\n print(\"invalid response, try again\")\n return question\n if question == \"q\":\n sys.exit()\nkeepplaying()","sub_path":"rsp.py","file_name":"rsp.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"109005047","text":"import io, sys\n\nfrom setuptools import find_packages, setup\n\nwith io.open('README.rst', 'rt', encoding='utf8') as f:\n readme = f.read()\n\nsetup(\n name='virtuatable-rulesets',\n version='0.1.0',\n url='http://flask.pocoo.org/docs/tutorial/',\n license='BSD',\n maintainer='Vincent Courtois',\n maintainer_email='courtois.vincent@outlook.com',\n description='The service to create, update, get or delete rule sets for any tabletop RPG',\n long_description=readme,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'flask',\n ],\n extras_require={\n 'test': [\n 'pytest',\n 'coverage',\n ],\n },\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384889483","text":"from unittest.mock import patch\nimport unittest, UserManagementSystem\nimport logging\n\nclass TestUserManagementSystem(unittest.TestCase):\n logging.basicConfig(filename=\"C:\\\\Venkatesh\\\\Work\\\\Repos\\\\python-practice\\\\exercises\\\\User Registration\\\\logs\\\\ums_app.log\",\n level = logging.DEBUG)\n logger = logging.getLogger(\"TestUserManagementSystem\")\n\n def test_user_account_creation_with_valid_values(self):\n user_input = [\n 'Venkatesh M',\n '26',\n '989989887',\n 'venkatesh@gmail.com',\n 'venkatesh.m',\n '2'\n ]\n \n ums = UserManagementSystem.UserManagementSystem()\n with patch('builtins.input', side_effect=user_input):\n userAccount = ums.addUser()\n self.logger.debug(\"User Account Details: {ua}\".format(ua=userAccount.toDictObject()))\n self.assertNotEqual(userAccount, None)\n\n def test_user_account_creation_with_invalid_age_value(self):\n user_input = [\n 'Venkatesh M',\n 'test',\n '989989887',\n 'venkatesh@gmail.com',\n 'venkatesh.m',\n '2'\n ]\n\n ums = UserManagementSystem.UserManagementSystem()\n with patch('builtins.input', side_effect=user_input):\n self.assertRaises(ValueError, ums.addUser)","sub_path":"exercises/User Registration/test_UserManagementSystem.py","file_name":"test_UserManagementSystem.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465454316","text":"import numpy as np\nimport tensorflow as tf\nimport keras.backend as K\n\nEPOCHS = 2\nEPSILON = 0.03\nMAX_EPSILON = 16.0\nMOMENTUM = 1.0\nBATCH_SIZE = 10\n\n\ndef iterative_fgsm(reshaped_image, model, shape=(28, 28, 1), num_classes=10):\n eps = 2.0 * MAX_EPSILON / 255.0\n alpha = eps / 12\n momentum = MOMENTUM\n batch_shape = [BATCH_SIZE, shape[0], shape[1], shape[2]]\n\n x = reshaped_image.reshape((-1,) + shape).astype('float32')\n # x = reshaped_image\n\n preds = model.predict(x)\n initial_class = np.argmax(preds)\n\n x_max = tf.clip_by_value(x + eps, -1.0, 1.0)\n x_min = tf.clip_by_value(x - eps, -1.0, 1.0)\n x_adv = x\n grad = tf.zeros(shape=batch_shape)\n for i in range(EPOCHS):\n one_hot_target_class = tf.one_hot(initial_class, num_classes)\n logits = model.predict(x)\n logits = logits.reshape(num_classes,)\n cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class,\n logits,\n label_smoothing=0.0,\n weights=1.0)\n noise = tf.gradients(cross_entropy, x_adv)[0]\n noise = noise / tf.reshape(tf.contrib.keras.backend.std(tf.reshape(noise, [BATCH_SIZE, -1]), axis=1),\n [BATCH_SIZE, 1, 1, 1])\n noise = momentum * grad + noise\n noise = noise / tf.reshape(tf.contrib.keras.backend.std(tf.reshape(noise, [BATCH_SIZE, -1]), axis=1),\n [BATCH_SIZE, 1, 1, 1])\n x_adv = x_adv - alpha * tf.clip_by_value(tf.round(noise), -2, 2)\n x_adv = tf.clip_by_value(x_adv, x_min, x_max)\n\n return x_adv\n\n\nfrom keras.models import load_model\nfrom keras.datasets import fashion_mnist\nfrom keras.utils import np_utils\n\nmodel = load_model('my_model.h5')\n# model = model.load_weights('my_load_weights.h5')\n\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\nNUM = 5\nSSIM = []\nx = x_test.reshape(-1, 28, 28, 1).astype('float32')[:NUM]\nx /= 255\ny_test = np_utils.to_categorical(y_test[:NUM], num_classes=10)\nx_adv = np.zeros(x.shape)\n\nfor i in range(x.shape[0]):\n print(str(i), end=' ')\n x_adv[i] = np.copy(iterative_fgsm(x[i], model, (28, 28, 1), 10))\n\nx_adv = np.array(x_adv)\nloss, acc = model.evaluate(x, y_test)\nadv_loss, adv_acc = model.evaluate(x_adv, y_test)","sub_path":"others/iterativeFGSM.py","file_name":"iterativeFGSM.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236543638","text":"import django\nfrom django.test import TestCase\nfrom django.db import models\nfrom trec_eval.trec_wrapper import trec_wrapper\nfrom trec_eval_project.settings import BASE_DIR\nfrom trec.models import Researcher, User, Track, Task, Run\nfrom django.core.files import File\n\nimport os.path\n\n\n\n# Create your tests here.\nclass RunningSiteTests(TestCase):\n fixtures = ['test_db.json']\n\n def setUp(self):\n django.setup()\n\n\n def test_core_pages_load(self):\n self.assertEqual(self.client.get(\"/trec/\").status_code, 200)\n self.assertEqual(self.client.get(\"/trec/users/\").status_code, 200)\n self.assertEqual(self.client.get(\"/trec/tracks/\").status_code, 200)\n self.assertEqual(self.client.get(\"/trec/about/\").status_code, 200)\n self.assertEqual(self.client.get(\"/trec/register/\").status_code, 200)\n self.assertEqual(self.client.get(\"/accounts/login/\").status_code, 200)\n\nclass TrecWrapperTests(TestCase):\n\n def test_trec_eval_returns_MAP_pvalues_rvalues(self):\n data_directory = os.path.join(BASE_DIR, 'pop script data')\n qrel_path = os.path.join(data_directory, 'qrels', 'robust', 'aq.trec2005.qrels.txt')\n run_path = os.path.join(data_directory, 'runs', 'robust', 'aq.trec.bm25.0.50.res.txt')\n\n mapVal, pMap, rMap = trec_wrapper(qrel_path, run_path)\n\n self.assertTrue(mapVal != None)\n self.assertTrue(len(pMap) > 0)\n self.assertTrue(len(rMap) > 0)\n\n\nclass ResearcherTest(TestCase):\n\n def test_run_count_is_positive(self):\n u = User(username='Bob')\n u.save()\n r = Researcher(user=u, number_of_runs= -1)\n r.save()\n self.assertTrue(r.number_of_runs >= 0)\n\nclass RunTest(TestCase):\n\n # check that a new run can be populated with trec data.\n def test_run_is_populated_with_trec_eval_data(self):\n data_directory = os.path.join(BASE_DIR, 'pop script data')\n qrel_path = os.path.join(data_directory, 'qrels', 'robust', 'aq.trec2005.qrels.txt')\n run_path = os.path.join(data_directory, 'runs', 'robust', 'aq.trec.bm25.0.50.res.txt')\n\n qrel_file = File(open(qrel_path))\n run_file = File(open(run_path))\n\n user = User(username=\"tester\")\n user.save()\n\n researcher = Researcher(user=user)\n researcher.save()\n\n track = Track(title='Test track')\n track.save()\n\n task = Task(title=\"test task\", track=track, year=1700, judgements_file=qrel_file)\n task.save()\n run = Run(task=task, researcher=researcher, result_file=run_file)\n run.save() # populate_with_trec_eval_data() assumes run_file has been saved to disk.\n run.populate_with_trec_eval_data()\n run.save()\n\n self.assertTrue(run.map != None)\n self.assertTrue(run.p10 != None)\n self.assertTrue(run.p20 != None)\n","sub_path":"trec_eval_project/trec/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"402336797","text":"from setuptools import setup\n\nversion = '1.0.6'\n\nsetup(name='sailthru-client', \n version=version,\n description='Python client for Sailthru API',\n long_description=open('./README.md').read(),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Utilities\",\n \"Programming Language :: Python\",\n \"Operating System :: OS Independent\",\n \"Natural Language :: English\",\n ],\n keywords='sailthru api',\n author='Prajwal Tuladhar',\n author_email='praj@sailthru.com',\n url='https://github.com/sailthru/sailthru-python-client',\n license='MIT License',\n packages=['sailthru'],\n include_package_data=True,\n zip_safe=True)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266895471","text":"import socket\r\ndef connect(s,n):\r\n sock = socket.socket()\r\n sock.connect((s, n))\r\n return sock\r\ndef turn(n,c,s): \r\n t=str(n)+' '+str(c)\r\n s.send(t.encode())\r\n data = s.recv(1024)\r\n print (data)\r\n data=s.recv(1024)\r\n print (data)\r\ns=connect('192.168.25.21',8942)\r\nfor n in range(11,22):\r\n turn(n,1,s)\r\ninput()","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553775920","text":"# -*- coding: utf-8 -*-\n\nimport xlwt\nimport json\n\ndef toxls():\n path = r\"C:\\Users\\asus\\Desktop\\city.txt\"\n with open(path, \"r\", encoding=\"utf-8\") as file:\n content = file.read()\n dictory = json.loads(content)\n workbook = xlwt.Workbook()\n table = workbook.add_sheet(\"Sheet1\")\n\n #枚举写入内容\n for row, id in enumerate(dictory):\n table.write(row, 0, id)\n table.write(row, 1, dictory[id])\n\n pathExcel = path.replace(\"txt\", \"xls\")\n workbook.save(pathExcel)\n\nif __name__ == \"__main__\":\n toxls()","sub_path":"the_0015th_problem.py","file_name":"the_0015th_problem.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313905423","text":"# Author: Alex Gezerlis\n# Numerical Methods in Physics with Python (CUP, 2020)\n\nimport numpy as np\n\ndef paulimatrices():\n sigx = np.array([0.,1,1,0]).reshape(2,2)\n sigy = np.array([0.,-1j,1j,0]).reshape(2,2)\n sigz = np.array([1.,0,0,-1]).reshape(2,2)\n return sigx, sigy, sigz\n\ndef kron(U,V):\n n = U.shape[0]\n p = V.shape[0]\n W = np.zeros((n*p,n*p), dtype=np.complex64)\n for i in range(n):\n for k in range(n):\n for j in range(p):\n for l in range(p):\n W[p*i+j,p*k+l] = U[i,k]*V[j,l]\n return W\n\nif __name__ == '__main__':\n sigx, sigy, sigz = paulimatrices()\n allones = np.ones((3,3))\n kronprod = kron(sigx,allones); print(kronprod.real)\n","sub_path":"first_edition/codes/kron.py","file_name":"kron.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410303966","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nDIR_TASK = os.path.basename(os.getcwd())\nDIR_LIB = os.path.abspath(os.path.join(os.path.dirname(__file__),\"../\"))\nDIR_TASK = os.path.dirname(os.path.abspath(__file__))\n\nimport json, csv, time, string, itertools, copy, yaml\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nimport math\nimport re\n\nCONFIG_FILE_NAME = '010.01_config'\nconfig = yaml.load( stream = file( DIR_TASK + '\\\\' + CONFIG_FILE_NAME + '.yml', 'r'))\n#yaml.dump( config, file( DIR_TASK + '\\\\config.yml', 'w') )\n\nsys.path.append( DIR_LIB )\n\nfrom lib.router import Router\nrouter = Router( )\n\n# --------------------------------------------------------------------------\ntoday = dt.datetime.now().strftime(\"%Y-%m-%d--%H-%M\") \n\n\n#STEP: modify version?\n\nconfigVersion = config['version']\nconfig['version'] = round( float(configVersion) + .1, 1 ) if config['options']['increment-version'] == True else configVersion\n\n\n#STEP: load source files\nsource = config['source']['source_quotes']\nfileIdx = 0\nsourcePathFile = router.getRoute( source['route'] ) + source['dir'] + source['files'][fileIdx]\ndf_showQuotes = pd.read_csv( filepath_or_buffer = sourcePathFile , sep=\";\", quoting= 3, decimal=',' )\n\nsource = config['source']['source_team']\nfileIdx = 0\nsourcePathFile = router.getRoute( source['route'] ) + source['dir'] + source['files'][fileIdx]\nrawFile = open( sourcePathFile , 'r')\ncoachesTeamsObj = json.load( rawFile )\ncoachesNames = coachesTeamsObj.keys() # ['Michael Patrick', 'Mark', 'Michi & Smudo', 'Yvonne' ]\n\nsource = config['source']['source_buzzers']\nfileIdx = 0\nsourcePathFile = router.getRoute( source['route'] ) + source['dir'] + source['files'][fileIdx]\nrawFile = open( sourcePathFile , 'r')\nparticipantsObj = json.load( rawFile )\n\nsource = config['source']['source_deezer']\nfileIdx = 0\nsourcePathFile = router.getRoute( source['route'] ) + source['dir'] + source['files'][fileIdx]\ndeezerSongs = pd.read_csv( filepath_or_buffer = sourcePathFile , sep=\";\", quoting= 3 )\n\ndef getRawSourceLines( _idx ):\n source = config['source']['source_finals']\n fileIdx = _idx\n sourcePathFile = router.getRoute( source['route'] ) + source['dir'] + source['files'][fileIdx]\n rawFile = open( sourcePathFile , 'r')\n fileLinesRaw = [line.rstrip('\\n') for line in rawFile]\n return fileLinesRaw\n # fileLines = filter(None, blindsFileLinesRaw)\n\n\ndef lineSplitColumns( _dataLine ):\n regResults = re.search('^(\\d)-\\s*(\\d+).\\s*(\\D+):\\s+\\\"\\s*(\\D+)\\\"\\D+-\\s+(\\D+)', _dataLine)\n if regResults is not None:\n r_showId = regResults.group(1)\n r_turn = regResults.group(2)\n r_participant = regResults.group(3)\n r_songTitle = regResults.group(4)\n r_songArtist = regResults.group(5)\n return [ r_showId, r_turn, r_participant, r_songTitle, r_songArtist ]\n return [] \n \ndef searchParticipantCoach( _participantName ):\n # globals: coachesTeamsObj, coachesNames\n nameEncoded = _participantName.decode('utf-8')\n for coachName in coachesNames:\n teamList = coachesTeamsObj[ coachName ]\n search = [ itemTeam for itemTeam in teamList if itemTeam['name'] == nameEncoded ]\n if len(search) > 0:\n return coachName\n \ndef getParticipantInfo( _participantName ):\n # globals: participantsObj\n nameEncoded = _participantName.decode('utf-8')\n search = [ partTeam for partTeam in participantsObj if partTeam['name'] == nameEncoded ]\n return search\n\ndef findSongInDeezer( _songTitle ):\n\n df_testRow = deezerSongs[ deezerSongs['song'].str.lower() == _songTitle.lower() ]\n try:\n df_deezerRow = df_testRow.iloc[0]\n test = df_testRow.iloc[0]['artist']\n except:\n songTitleWords = _songTitle.lower().split(\" \")\n regexParts = [ '(?:@' + word + '@)' for word in songTitleWords ]\n strRegex = '|'.join(regexParts)\n\n deezerSongs['affinity'] = 0\n\n def regex_filter( _row ):\n #com: create a empty space around each word\n _value = _row['song']\n newValueWidthSpaces = '@' + '@@'.join( _value.lower().split(' ') ) + '@'\n searchResult = re.findall( strRegex, newValueWidthSpaces ) # res = re.search( strRegex, _value)\n _row['affinity'] = len( searchResult )\n return _row\n \n #df_rowsResultSearch = deezerSongs[ deezerSongs.apply( regex_filter, axis=1 ) ]\n df_testRow = deezerSongs.apply( regex_filter, axis=1 )\n df_deezerRow = df_testRow.iloc[ df_testRow['affinity'].idxmax() ]\n\n\n # print ( 0 if math.isnan( float(df_deezerRow['year']) ) else int(df_deezerRow['year']) ),\n return [\n df_deezerRow['artist'] ,\n int(df_deezerRow['year'] ) if str(df_deezerRow['year']).isdigit() else 0,\n ( 0 if math.isnan( (df_deezerRow['deezer-quote']) ) else float(df_deezerRow['deezer-quote']) ),\n ( '' if str(df_deezerRow['genre']) == 'nan' else str(df_deezerRow['genre']) ),\n df_deezerRow['lang'],\n ] \n \nepisodes = 0\nlanguages = []\nblindsData = []\n\n# HELP: filter list of lines by text\n## test1 = [ part for part in blindsFileLines if 'Julian Coles' in part ]\n\n# print findSongInDeezer( 'Breakfest in America' )\n# sys.exit(0)\n\nshowLinesExtend = getRawSourceLines(0)\n\n#COM: remove empty lines\nblackList = [ ' ', '' ]\nremoveEmptyLines = [ item for item in showLinesExtend if item not in blackList ]\n\ncandidateSongList = []\n\n#COM: join artist and song lines in one\niterator = iter(removeEmptyLines)\nfor idxLine, line in enumerate( iterator ):\n candidateSongList.append( [line, next(iterator), next(iterator), next(iterator)] )\n\nlanguages = []\nshowData = []\n\nfor idxCS, candidateSongItem in enumerate( candidateSongList ):\n \n participant_name = candidateSongItem[0]\n\n #song_title = candidateSongItem[1].split(' von ')[0].replace('\"','')\n \n\n showDataObject = { }\n \n showDataObject['participant_name'] = participant_name #.encode('utf-8')\n showDataObject['coach_name'] = searchParticipantCoach( participant_name )\n\n df_row = df_showQuotes.loc[ (df_showQuotes['participant'] == participant_name) ]\n quoteObj = {\n 'procent': float( df_row['rate_procent'].values[0] ),\n 'team': df_row['coach'].values[0],\n 'is_winner': True if df_row['coach'].values[0]=='+' else False,\n }\n showDataObject['quotes'] = quoteObj \n\n\n participantInfo = getParticipantInfo( participant_name )\n if len( participantInfo ) > 0 :\n showDataObject['participant_gender'] = participantInfo[0]['gender']\n #WARN: age format in multiple particpants\n # multi -> \"participant_age\": \"29, 34 UND 44\", \n # single -> \"participant_age\": 28, \n showDataObject['participant_age'] = participantInfo[0]['age']\n showDataObject['buzzer_count'] = participantInfo[0]['buzzer_count']\n showDataObject['buzzer_coaches_names'] = participantInfo[0]['buzzer_coaches']\n else:\n showDataObject['buzzer_count'] = 0\n\n\n #COM: In final was multiple songs for each participant\n bufferSong = []\n\n def getSongData( _songId ): # 1,2, 3\n \n song_title = candidateSongItem[ _songId ].split(' von ')[0].split('\"')[1]\n deezerData = findSongInDeezer( song_title )\n\n bufferSongObj = {}\n bufferSongObj[ 'song_title' ] = song_title\n bufferSongObj['song_artist'] = deezerData[0]\n bufferSongObj['song_year'] = deezerData[1]\n bufferSongObj['song_deezer-quote'] = deezerData[2]\n bufferSongObj['song_genre'] = deezerData[3]\n bufferSongObj['song_lang'] = deezerData[4]\n \n if( deezerData[4] not in languages ):\n languages.append( deezerData[4] )\n\n return bufferSongObj\n\n bufferSong.append( getSongData(1) )\n bufferSong.append( getSongData(2) )\n bufferSong.append( getSongData(3) )\n showDataObject[\"songs\"] = bufferSong\n\n showData.append( showDataObject )\n\ndf_years = deezerSongs['year'].dropna()\ndf_years = df_years[ df_years.apply(lambda x: x.isdigit())] \n\nreportData = {\n 'auditions_candidates': len( showData ),\n 'candidates_data': showData,\n 'coaches_keys': coachesNames,\n 'languages_keys': languages,\n 'show-type':'half-final',\n 'songs_deezer_quotes':{\n 'min': float(deezerSongs['deezer-quote'].min()),\n 'max': float(deezerSongs['deezer-quote'].max()),\n },\n 'songs_years':{\n 'min': int(df_years.min()),\n 'max': int(df_years.max()),\n },\n}\n\n\n#STEP: output-file\n\noutputPath = router.getRoute( config['target']['route'] ) + config['target']['dir'] \noutputFilePath = outputPath + config['target']['file'].replace(\"$VERSION$\", str( config['version'] ) )\n\n#com: create output folder\nif not os.path.exists( outputPath ):\n os.makedirs( outputPath )\n\nwith open( outputFilePath , 'w') as outfile:\n json.dump( reportData , outfile , indent=2, ensure_ascii=False)\n\n#STEP: update config file\n\nyaml.dump( config, file( DIR_TASK + '\\\\' + CONFIG_FILE_NAME + '.yml', 'w'), indent=2, default_flow_style=False )","sub_path":"tasks/010_half-final_candidates/010.01_main.py","file_name":"010.01_main.py","file_ext":"py","file_size_in_byte":8631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324359267","text":"import setuptools\r\n\r\nwith open(\"jobpy/_version.py\") as f:\r\n __version__ = f.read()\r\n\r\nwith open(\"README.md\", \"r\", encoding=\"utf8\") as fh:\r\n long_description = fh.read()\r\n\r\nsetuptools.setup(\r\n name=\"jobpy\", # Replace with your own username\r\n version=__version__[15:-1], # Grabas the version from _version.py\r\n author=\"Fabian Rodriguez\",\r\n license = \"MIT\",\r\n author_email=\"fabian.rodrez@gmail.com\",\r\n description=\"A package built to facilitate job search and match your skills the best job.\",\r\n long_description= long_description,\r\n long_description_content_type=\"text/markdown\",\r\n keywords = ['jobs', 'jobpy', 'job tool'],\r\n install_requires=[ # I get to this in a second\r\n 'pandas',\r\n 'beautifulsoup4',\r\n 'requests',\r\n ],\r\n url=\"https://github.com/rodrez/jobpy\",\r\n packages=setuptools.find_packages(),\r\n classifiers=[\r\n \"Development Status :: 3 - Alpha\",\r\n \"Topic :: Software Development :: Build Tools\",\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ],\r\n # python_requires='>=3.7',\r\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580196465","text":"from typing import Any\n\nimport proto\n\nclass FeedItemTargetErrorEnum(proto.Message):\n class FeedItemTargetError(proto.Enum):\n UNSPECIFIED = 0\n UNKNOWN = 1\n MUST_SET_TARGET_ONEOF_ON_CREATE = 2\n FEED_ITEM_TARGET_ALREADY_EXISTS = 3\n FEED_ITEM_SCHEDULES_CANNOT_OVERLAP = 4\n TARGET_LIMIT_EXCEEDED_FOR_GIVEN_TYPE = 5\n TOO_MANY_SCHEDULES_PER_DAY = 6\n CANNOT_HAVE_ENABLED_CAMPAIGN_AND_ENABLED_AD_GROUP_TARGETS = 7\n DUPLICATE_AD_SCHEDULE = 8\n DUPLICATE_KEYWORD = 9\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v13/errors/types/feed_item_target_error.pyi","file_name":"feed_item_target_error.pyi","file_ext":"pyi","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273488203","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @Time:2020/9/27 13:13\n# @Author:JiahangGu\nfrom typing import List\n\n\nclass Solution:\n def minOperations(self, logs: List[str]) -> int:\n s = []\n for path in logs:\n if path == '../':\n if s:\n s.pop()\n elif path == './':\n continue\n else:\n s.append(path)\n return len(s)\n","sub_path":"Weekly Contest/20-9-27/1-5523-crawler-log-folder.py","file_name":"1-5523-crawler-log-folder.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640821333","text":"import ast\n\nfrom odd.artifact import Confidence, Issue, Location, PythonModule\nfrom odd.plugin import Plugin\nfrom odd.ast_utils import ASTNodePosition, iter_name, iter_node_type\n\n\nclass TranslateFormattedString(Plugin):\n _handles = {\"python_module\"}\n _emits = {\"issue\"}\n\n def on_python_module(self, python_module: PythonModule):\n for call in iter_node_type(python_module.node, ast.Call):\n if next(iter_name(call.func, reverse=True), None) != \"_\":\n continue\n\n if (\n len(call.args) == 1\n and isinstance(call.args[0], ast.BinOp)\n and isinstance(call.args[0].op, ast.Mod)\n ):\n yield Issue(\n \"formatted_string_translated\",\n \"`_` called on formatted string\",\n python_module.addon,\n locations=[\n Location(\n python_module.path,\n ASTNodePosition.from_node(call).start_pos_col_1,\n )\n ],\n confidence=Confidence.LOW,\n categories=[\"correctness\"],\n )\n","sub_path":"odd_bunch/plugin/translate_formatted_string.py","file_name":"translate_formatted_string.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484438022","text":"import numpy as np\r\nfrom pylab import *\r\nfrom sklearn import svm, datasets\r\n\r\n#Create fake income/age clusters for N people in k clusters\r\ndef createClusteredData(N, k):\r\n np.random.seed(5)\r\n pointsPerCluster = float(N)/k\r\n X = []\r\n y = []\r\n for i in range (k):\r\n incomeCentroid = np.random.uniform(20000.0, 200000.0)\r\n ageCentroid = np.random.uniform(20.0, 70.0)\r\n for j in range(int(pointsPerCluster)):\r\n X.append([np.random.normal(incomeCentroid, 10000.0), np.random.normal(ageCentroid, 2.0)])\r\n y.append(i)\r\n X = np.array(X)\r\n y = np.array(y)\r\n return X, y\r\n\r\n(X, y) = createClusteredData(100, 5)\r\n\r\nplt.figure(figsize=(8, 6))\r\nplt.scatter(X[:, 0], X[:, 1], c=y.astype(np.float))\r\n# plt.show()\r\n\r\n# we'll use linear SVC to partition our graph into clusters\r\nC = 1.0\r\nsvc = svm.SVC(kernel='linear', C=C).fit(X, y)\r\n\r\n# rendering the regions of each cluster as distinct colors\r\ndef plotPredictions(clf):\r\n xx, yy = np.meshgrid(np.arange(0, 250000, 10),\r\n np.arange(10, 70, 0.5))\r\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\r\n\r\n plt.figure(figsize=(8, 6))\r\n Z = Z.reshape(xx.shape)\r\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)\r\n plt.scatter(X[:, 0], X[:, 1], c=y.astype(np.float))\r\n plt.show()\r\n\r\n\r\nplotPredictions(svc)\r\n\r\n# predict for a given point\r\nprint(svc.predict([[200000, 40]]))\r\nprint(svc.predict([[50000, 65]]))","sub_path":"ML/SVC.py","file_name":"SVC.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599985625","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport autoslug.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Picture',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('file', models.ImageField(upload_to=b'pictures')),\n ('name', models.CharField(max_length=100, null=True, blank=True)),\n ('thumbnail', models.ImageField(max_length=500, null=True, upload_to=b'pictures/thumbs', blank=True)),\n ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', editable=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"image/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120130307","text":"import sys\nsys.path.insert(1, '/home/mdaquin/code/ingraph/')\nimport kwaku.pagetograph as ptg\nfrom ingraph.ingraph import InGraph\n\n\n\nconfig = {\n \"seeds\": [\"https://www.insight-centre.org/research-staff\"],\n \"follow_links\":[\n {\"selector\": \".views-field-field-family-name a\",\n \"attribute\": \"href\"},\n {\"selector\": \".pager__item a\",\n \"attribute\": \"href\"} \n ],\n \"nodes\": [\n {\"selector\": \".pane-page-content\",\n \"ID\": \"h2\",\n \"attributes\": [\n {\"selector\": \".l-user-profile-bottom p span span\",\n \"attribute\": \"biography\"}\n ],\n \"relations\": [\n {\"selector\": \".field--name-field-user-job-title .field__items .field__item\",\n \"relation\": \"role\"},\n {\"selector\": \".field--name-field-user-insight-institude .field__items .field__item\",\n \"relation\": \"affiliation\"\n }\n ]\n },\n {\"selector\": \".view-publications li\",\n \"ID\": \".views-field-title a\",\n \"attributes\": [\n {\"selector\": \".views-field-title a\",\n \"attribute\": \"title\"\n },\n {\"selector\": \".date-display-single\",\n \"attribute\": \"date\"\n }\n ],\n \"relations\": [\n {\"selector\": \".views-field-field-pub-journal .field-content\",\n \"relation\": \"journal\"\n },\n {\"selector\": \".views-label-field-pub-conference .field-content\",\n \"relation\": \"conference\"\n },\n {\"selector\": \".views-field-field-pub-display-authors p\",\n \"relation\": \"authorlist\"\n },\n {\"selector\": \".views-field-field-pub-display-authors p a\",\n \"relation\": \"author\"\n } \n ] \n }\n ]\n }\n\ngraphid = \"test_ptg_graph\"\nes_url = \"http://127.0.0.1:9200/\"\n\ngraph = InGraph(graphid, es_url)\n\n# create the graph\ngraph.create_graph(directed=True, labelled=True, weighted=False, multi=True)\n\nprint (ptg.crawl(config, graph, turl=\"https://www.insight-centre.org/users/francisco-pena\"))\n\n\n","sub_path":"test_build.py","file_name":"test_build.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496578271","text":"## 3 Ejercicio Crear un Formulario que usando el control Treeview muestre la una lista con los nombre de\n## Ciudades Argentinas y su código postal ( por lo menos 5 ciudades ) . \n\nfrom tkinter import *\nfrom tkinter import ttk\n\n\nclass Aplicacion():\n\n def __init__(self):\n raiz = Tk()\n tree = ttk.Treeview(raiz)\n#Columnas\n tree[\"columns\"]=(\"one\",\"two\")\n tree.column(\"#0\", width=100)\n tree.column(\"one\", width=100 )\n tree.column(\"two\", width=100)\n tree.heading(\"#0\", text=\"Id\")\n tree.heading(\"one\", text=\"Ciudad\")\n tree.heading(\"two\", text=\"Codigo Postal\")\n#Insert\n tree.insert(\"\" , 0, text=\"id\", values=(\"Rosario\",\"2000\"))\n tree.insert(\"\" , 1, text=\"id\", values=(\"Santa Fe\",\"3000\"))\n tree.insert(\"\" , 2, text=\"id\", values=(\"Arroyo Seco\",\"2128\"))\n tree.insert(\"\" , 3, text=\"id\", values=(\"Villa Constitucion\",\"2919\"))\n tree.insert(\"\" , 4, text=\"id\", values=(\"Rafaela\",\"2300\"))\n#Loop\n tree.pack()\n raiz.mainloop()\n\ndef main():\n mi_app = Aplicacion()\n return mi_app\n\nif __name__ == '__main__':\n main()","sub_path":"practico_04/ejercicio03.py","file_name":"ejercicio03.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306638810","text":"import os\nimport time\nimport pdb\nimport sys\nimport fcntl\n\n'''\n4 type of events\n1) Current --> \"same\"\n2) Bad Canary - only 1 change --> \"bcanary\"\n3) Good Canary - only 1 change --> \"gcanary\"\n4) Bad Canary -- rollback -- 3 new containers --> \"rollback\"\n5) Good Canary -- deploy -- 2 new containers --> \"deploy\"\n'''\n\nusermap = {}\n\nmodifiedNames = {}\n\nfilepath = '/arlogs/userlist'\n\nlastTime = 0\n\n#defaultEventType = 'same'\n\ndef modifyFile(filepath,username,eventType):\n defaultEventType = 'same'\n\n lines = []\n\n with open(filepath, mode='r') as f:\n while True:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n lines = f.readlines()\n break\n except IOError as e:\n print ('Temporary locked or some other error: ',e)\n time.sleep(0.1) \n finally:\n fcntl.flock(f, fcntl.LOCK_UN)\n\n d = {}\n #for line in lines:\n # print('line is:',line)\n d = dict([line.split() for line in lines]) \n print ('old dict is: ',d)\n \n if username in d:\n d[username] = eventType\n else: \n d[username] = defaultEventType\n \n print ('new dict is: ',d)\n\n writeToFile = \"\"\n lengthOfDict = len(d)\n iter = 1\n for key,value in d.items():\n if iter==lengthOfDict:\n writeToFile+=key+\" \"+value\n else:\n writeToFile+=key+\" \"+value+\"\\n\"\n iter += 1\n\n with open(filepath, mode='w') as f:\n while True:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n f.write(writeToFile)\n break\n except IOError as e:\n print ('Temporary locked or some other error: ',e)\n time.sleep(0.1) \n finally:\n fcntl.flock(f, fcntl.LOCK_UN)\n\n\nif __name__ == \"__main__\": \n if len(sys.argv) != 3:\n print ('Something went wrong with the arguments, exiting ..')\n sys.exit(0)\n\n username = sys.argv[1]\n eventType = sys.argv[2]\n \n modifyFile(filepath,username,eventType)\n","sub_path":"writeFile.py","file_name":"writeFile.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350625737","text":"# setup variables required for package\nimport os\npath_package = os.path.dirname(os.path.realpath(__file__))\npath_package_config = os.path.join(path_package, \"config\")\n\n\n# this function is located here to host the flask sever\ndef create_app(test_config=None):\n from mspypeline.flask_scripts import create_app_helper\n return create_app_helper(test_config)\n\n\n# flatten package imports for the core package\nfrom .version import __version__\nfrom .plotting_backend import plotly_plots, matplotlib_plots\nfrom .modules import *\nfrom .core import *\nfrom .file_reader import *\nfrom .file_reader.MQReader import MQReader\n# import for \"from package import *\"\n__all__ = [\n \"create_app\",\n \"path_package\",\n \"path_package_config\",\n \"__version__\",\n \"plotly_plots\",\n \"matplotlib_plots\",\n \"MQReader\"\n]\n__all__.extend(core.__all__)\n__all__.extend(modules.__all__)\n__all__.extend(file_reader.__all__)\n\nif __name__ == \"__main__\":\n msparser = MSPParser()\n UIHandler(**msparser.args_dict)\n","sub_path":"mspypeline/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428714140","text":"import tensorflow as tf\n\ndef bn_transform(x):\n batch_mean, batch_var = tf.nn.moments(x,[ 0])\n z_hat = (x - batch_mean) / tf.sqrt(batch_var + epsilon)\n gamma = tf.Variable(tf.ones([ 100]))\n beta = tf.Variable(tf.zeros([ 100]))\n bn = gamma * z_hat + beta\n y = tf.nn.sigmoid(bn)\n return y","sub_path":"demo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630450652","text":"__author__ = \"Luke Liu\"\n#encoding=\"utf-8\"\nimport numpy as np\nimport cv2\n\nimg = cv2.imread(\"images/test_01.png\",0)\nheight,width = img.shape[:2]\n\n\nmask =np.zeros(img.shape,dtype='uint8')\nmask[100:500,300:400] = 255\n\nafter_mask = cv2.bitwise_and(img,img,mask=mask)\n# total 非零点的个数\ntotal = cv2.countNonZero(after_mask)\nprint(total)\ncv2.imshow(\"\",after_mask)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"OpenCV实现自动答题卡评分/tstst.py","file_name":"tstst.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154953324","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport base64\nfrom Crypto.Cipher import AES\nfrom os import urandom\n\nfrom oslo.config import cfg\n\nfrom heat.openstack.common import log as logging\n\n\nauth_opts = [\n cfg.StrOpt('auth_encryption_key',\n default='notgood but just long enough i think',\n help=\"Encryption key used for authentication info in database\")\n]\n\ncfg.CONF.register_opts(auth_opts)\n\nlogger = logging.getLogger(__name__)\n\n\ndef encrypt(auth_info):\n if auth_info is None:\n return None, None\n iv = urandom(AES.block_size)\n cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)\n res = base64.b64encode(iv + cipher.encrypt(auth_info))\n return 'heat_decrypt', res\n\n\ndef heat_decrypt(auth_info):\n if auth_info is None:\n return None\n auth = base64.b64decode(auth_info)\n iv = auth[:AES.block_size]\n cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res\n","sub_path":"heat/common/crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145617255","text":"from reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.cidfonts import UnicodeCIDFont\nfrom config import *\npdfmetrics.registerFont(UnicodeCIDFont('STSong-Light'))\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom report.item_frequency_report.sql_selection import *\npdfmetrics.registerFont(TTFont('hei', 'SIMHEI.TTF'))\nfrom reportlab.lib import colors\nfrom reportlab.platypus import SimpleDocTemplate, Image, Table\n\nelements = []\n\ndata = [['Code', 'Product Name', 'Supplier', 'Freq.', 'Picture']]#,'Link']]\nfor i in range(len(frequency_list)):\n print(i)\n item_name = frequency_list[i][\"ItemName\"]\n for j in range(len(item_name)):\n if j>30 and item_name[j]==\" \":\n item_name=item_name[:j]+\"\\n\"+item_name[j+1:]\n break\n for j in range(len(item_name)):\n if j>60 and item_name[j]==\" \":\n item_name=item_name[:j]+\"\\n\"+item_name[j+1:]\n break\n for j in range(len(item_name)):\n if j>90 and item_name[j]==\" \":\n item_name=item_name[:j]+\"\\n\"+item_name[j+1:]\n break\n supplier = frequency_list[i][\"Supplier\"].replace(\" \",\"\\n\")\n if frequency_list[i][\"Picture\"] != \"\":\n data.append([frequency_list[i][\"Code\"],item_name,supplier,\n frequency_list[i][\"Frequency\"],Image(frequency_list[i][\"Picture\"],100,100)#,\n # frequency_list[i][\"Link\"]\n ])\n else:\n data.append([frequency_list[i][\"Code\"],item_name,supplier,\n frequency_list[i][\"Frequency\"],frequency_list[i][\"Picture\"]#,\n # frequency_list[i][\"Link\"]\n ])\nt = Table(data, colWidths=[80,250,70,50,120,100],style=[\n ('FONTNAME', (0, 0), (-1, -1), 'hei'),\n ('GRID', (0, 0), (-1, -1), 2, colors.black),\n ('BOX', (0, 0), (-1, -1), 2, colors.black),\n])\n\nelements.append(t)\n# print(elements)\ndoc = SimpleDocTemplate(root_path+'report/item_frequency_report.pdf')\ndoc.build(elements)\n","sub_path":"report/item_frequency_report/generate_report.py","file_name":"generate_report.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380944764","text":"\n\nfrom xai.brain.wordbase.nouns._municipality import _MUNICIPALITY\n\n#calss header\nclass _MUNICIPALITIES(_MUNICIPALITY, ):\n\tdef __init__(self,): \n\t\t_MUNICIPALITY.__init__(self)\n\t\tself.name = \"MUNICIPALITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"municipality\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_municipalities.py","file_name":"_municipalities.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89599138","text":"# 2109. Adding Spaces to a String\n\n# vwc 272\n# 2021/12/20\n#\n# Runtime: 820 ms, faster than 83.33% of Python3 online submissions for Adding Spaces to a String.\n# Memory Usage: 51.5 MB, less than 16.67% of Python3 online submissions for Adding Spaces to a String.\n\n# two pointer\n# append ' ' if pointer a == pointer b\n\nclass Solution:\n def addSpaces(self, s: str, spaces: List[int]) -> str:\n ans, j = \"\", 0\n for i in range(len(s)):\n if j < len(spaces) and i == spaces[j]:\n ans += ' '\n j += 1\n ans += s[i]\n return ans\n","sub_path":"2109. Adding Spaces to a String.py","file_name":"2109. Adding Spaces to a String.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312193564","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nfrom models.item import ItemModel\nfrom models.store import StoreModel\n\n#Item becomes a class with the Resource class properties due to inheritance\n#This is a resource that is only allowed to be accessed via GET\n#404: Not found\n#200: Most popular HTTP status code for OK\nclass Item(Resource):\n\n #This will parse the data before the function ever uses it and also allows us to put some\n #safeguards and restrictions on how the data is being passed down.\n #This request will stop, if the json payload does not have the correct 'price' format\n #The json payload could have multiple fields, but it will only take 'price'\n parser = reqparse.RequestParser()\n parser.add_argument('price',\n type=float,\n required=True,\n help='This field cannot be left blank!')\n parser.add_argument('store_id',\n type=int,\n required=True,\n help='Every item needs a store id.')\n\n #Forces authentication before we reach the get method, will call the 'identity()' method from security\n @jwt_required()\n def get(self, name):\n #THIS IS ALL THE SET UP NEEDED TO RETRIEVE AN ITEM FROM THE DB\n try:\n #Returns an item object and not a dictionary, we must make it a json\n item = ItemModel.find_by_name(name)\n except:\n return {'message':'An error occurred finding an item!'}\n\n if item is not None:\n return item.json()\n\n return {'message':'Item was not found!'}\n\n\n\n #201: HTTP status code that stands for an item being created.\n #400: HTTP status code that stands for Bad Request\n #We want to make sure we have unique items\n def post(self, name):\n #Check if the item is already in the database\n if ItemModel.find_by_name(name) is not None:\n return {'message':\"Item '{}' already exists!\".format(name)}, 400\n\n #Request will take the data from the body of an HTTP request and convert it into a dictionary\n # data will have the fields needed to create an item; data['price'] for example\n #depricated by reqparse: data = request.get_json()\n data = Item.parser.parse_args()\n\n #Create a JSON of the item\n #Depricated: item = {'name':name, 'price':data['price']}\n item = ItemModel(name, data['price'], data['store_id'])\n\n if StoreModel.find_by_id(data['store_id']):\n item.save_to_db()\n return item.json(), 201\n else:\n return {'message':'Store does not exists!'}\n\n\n\n\n #Will delete an item from the list by filtering out the name of the item to be removed from the list\n def delete(self, name):\n #Finds item by its name\n item = ItemModel.find_by_name(name)\n\n if item is not None:\n # item calls its delete function to delete itself\n item.delete_from_db()\n return {'message': 'Item succesfully deleted!'}\n\n return {'message':'Item was not found!'}\n\n\n #Will update an item if the specified name already exists, or update it if it doesn't\n def put(self,name):\n\n #Request will take the data from the body of an HTTP request and convert it into a dictionary\n # data will have the fields needed to create an item; data['price'] for example\n #depricated by reqparse: data = request.get_json()\n data = Item.parser.parse_args()\n\n try:\n #if item exists, find it and store it into variable item\n item = ItemModel.find_by_name(name)\n except:\n return {'message': 'An error ocurred finding the item!'} , 500\n\n if item is None:\n #if item is None, create a new instance of ItemModel that will have an insert function\n item = ItemModel(name, **data)\n\n else:\n #If item does exists, we can manipulate its values\n item.price = data['price']\n item.store_id = data['store_id']\n\n #Because item is unique, sqlAlchemy will reconginize it and update it\n item.save_to_db()\n return item.json()\n\n\nclass ItemList(Resource):\n def get(self):\n return {'items': [x.json() for x in ItemModel.find_all()]}\n #return {'items': [list(map(lambda x: x.json(), ItemModel.query.all() ))]}\n","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93397687","text":"#!/usr/bin/python\nfrom operator import itemgetter\nimport sys\n\n\n\ndata_file=open(\"/home/adi-sin/Desktop/2.txt\", \"w\")\n\n\n\n\ncurrent_word = None\n#current_count = 0\nword = None\ntext = '\"full_text\":'\ndictn = []\n\n# input comes from STDIN\nfor line in sys.stdin:\n # remove leading and trailing whitespace\n line = line.strip()\n\n # parse the input we got from mapper.py\n word = line.split(',')\n\n for sent in word:\n\n if text in sent:\n #sent.replace(text,'')\n\n print(sent[13:])\n dictn.append(sent[13:])\n\n \n\n \n \t\nop_tw = set(dictn) \t \n\nfor item in op_tw:\n data_file.write(\"%s\\n\" % item) \n\n\n \n\n \n \n\n\n\n \n\n\n\n","sub_path":"executablecode/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321196477","text":"# -*- coding: utf-8 -\n#\n# This file is part of gaffer. See the NOTICE for more information.\n\nimport argparse\nimport fnmatch\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\nimport os\nimport sys\n\nfrom .http_handler import HttpHandler, HttpEndpoint\nfrom .manager import Manager, FlappingInfo\nfrom .pidfile import Pidfile\nfrom .sig_handler import SigHandler\nfrom .util import daemonize\nfrom .webhooks import WebHooks\n\nENDPOINT_DEFAULTS = dict(\n uri = None,\n backlog = 128,\n ssl_options = {})\n\nPROCESS_DEFAULTS = dict(\n group = None,\n args = None,\n env = {},\n uid = None,\n gid = None,\n cwd = None,\n detach = False,\n shell = False,\n os_env = False,\n numprocesses = 1,\n start = True)\n\nclass DefaultConfigParser(configparser.ConfigParser):\n \"\"\" object overriding ConfigParser to return defaults values instead\n of raising an error if needed \"\"\"\n\n def dget(self, section, option, default=None):\n if not self.has_option(section, option):\n return default\n return self.get(section, option)\n\n def dgetint(self, section, option, default=None):\n if not self.has_option(section, option):\n return default\n return self.getint(section, option)\n\n def dgetboolean(self, section, option, default=None):\n if not self.has_option(section, option):\n return default\n return self.getboolean(section, option)\n\n\nclass Server(object):\n \"\"\" Server object used for gafferd \"\"\"\n\n def __init__(self, config_path):\n self.apps, self.processes = self.get_config(config_path)\n self.manager = Manager()\n\n def run(self):\n self.manager.start(apps=self.apps)\n\n # add processes\n for name, cmd, params in self.processes:\n self.manager.add_process(name, cmd, **params)\n\n # run the main loop\n self.manager.run()\n\n def read_config(self, config_path):\n cfg = DefaultConfigParser()\n with open(config_path) as f:\n cfg.readfp(f)\n cfg_files_read = [config_path]\n\n # load included config files\n includes = []\n for include_file in cfg.dget('gaffer', 'include', '').split():\n includes.append(include_file)\n\n for include_dir in cfg.dget('gaffer', 'include_dir', '').split():\n for root, dirnames, filenames in os.walk(include_dir):\n for filename in fnmatch.filter(filenames, '*.ini'):\n cfg_file = os.path.join(root, filename)\n includes.append(cfg_file)\n\n cfg_files_read.extend(cfg.read(includes))\n\n return cfg, cfg_files_read\n\n def get_config(self, config_file):\n cfg, cfg_files_read = self.read_config(config_file)\n\n # you can setup multiple endpoints in the config\n endpoints_str = cfg.dget('gaffer', 'http_endpoints', '')\n endpoints_names = endpoints_str.split(\",\")\n\n endpoints = []\n processes = []\n webhooks = []\n for section in cfg.sections():\n if section.startswith('endpoint:'):\n name = section.split(\"endpoint:\", 1)[1]\n if name in endpoints_names:\n kwargs = ENDPOINT_DEFAULTS.copy()\n\n for key, val in cfg.items(section):\n if key == \"bind\":\n kwargs['uri'] = val\n elif key == \"backlog\":\n kwargs = cfg.dgetint(section, key, 128)\n elif key == \"certfile\":\n kwargs['ssl_options'][key] = val\n elif key == \"keyfile\":\n kwargs['ssl_options'][key] = val\n\n if not kwargs['ssl_options']:\n kwargs['ssl_options'] = None\n if kwargs.get('uri') is not None:\n endpoints.append(HttpEndpoint(**kwargs))\n elif section.startswith('process:'):\n name = section.split(\"process:\", 1)[1]\n cmd = cfg.dget(section, 'cmd', '')\n if cmd:\n params = PROCESS_DEFAULTS.copy()\n for key, val in cfg.items(section):\n if key == \"group\":\n params[key] = val\n elif key == \"args\":\n params[key] = val\n elif key.startswith('env:'):\n envname = key.split(\"env:\", 1)[1]\n params['env'][envname] = val\n elif key == 'uid':\n params[key] = val\n elif key == 'gid':\n params[key] = val\n elif key == 'cwd':\n params[key] = val\n elif key == 'detach':\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == 'shell':\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == 'os_env':\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == 'numprocesses':\n params[key] = cfg.dgetint(section, key, 1)\n elif key == 'start':\n params[key] = cfg.dgetboolean(section, key,\n True)\n elif key == 'flapping':\n # flapping values are passed in order on one\n # line\n values_str = val.split(None)\n try:\n values = [float(val) for val in values_str]\n params['flapping'] = FlappingInfo(*values)\n except ValueError:\n pass\n elif key == \"redirect_output\":\n params[key] = [v.strip() for v in val.split(\",\")]\n elif key == \"redirect_input\":\n params[key] = cfg.dgetboolean(section, key,\n False)\n elif key == \"graceful_timeout\":\n params[key] = cfg.dgetint(section, key, 10)\n\n processes.append((name, cmd, params))\n elif section == \"webhooks\":\n for key, val in cfg.items(section):\n webhooks.append((key, val))\n\n if not endpoints:\n # we create a default endpoint\n endpoints = [HttpEndpoint()]\n\n apps = [SigHandler(),\n WebHooks(hooks=webhooks),\n HttpHandler(endpoints=endpoints)]\n\n return apps, processes\n\ndef run():\n parser = argparse.ArgumentParser(description='Run some watchers.')\n parser.add_argument('config', help='configuration file')\n\n parser.add_argument('--daemon', dest='daemonize', action='store_true',\n help=\"Start gaffer in the background\")\n parser.add_argument('--pidfile', dest='pidfile')\n\n args = parser.parse_args()\n\n if args.daemonize:\n daemonize()\n\n pidfile = None\n if args.pidfile:\n pidfile = Pidfile(args.pidfile)\n\n try:\n pidfile.create(os.getpid())\n except RuntimeError as e:\n print(str(e))\n sys.exit(1)\n\n s = Server(args.config)\n\n try:\n s.run()\n except KeyboardInterrupt:\n pass\n finally:\n if pidfile is not None:\n pidfile.unlink()\n\n sys.exit(0)\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"gaffer/gafferd.py","file_name":"gafferd.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64223545","text":"import glob\nimport time\nimport os\nimport zipfile\n\nfrom selenium import webdriver\n\npairs = ['eurusd', 'gbpusd'] #'usdjpy', 'usdchf', 'usdcad', 'audusd', 'nzdusd']\nmonths = ['8']\nyears = ['2016']\ndestination_path = os.path.join(os.path.expanduser('~'), 'git/working/projects/wolf/histdata.com/data/csv')\ndownloads_path = os.path.join(os.path.expanduser('~'), 'Downloads')\n\nfp = webdriver.FirefoxProfile()\nfp.set_preference(\"browser.download.folderList\",2)\nfp.set_preference(\"browser.download.manager.showWhenStarting\",False)\nfp.set_preference(\"browser.download.dir\", \"/home/git/working/projects/wolf/histdata.com/data\")\nfp.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \"application/octet-stream\")\n\ndriver = webdriver.Firefox(firefox_profile=fp)\n\nfor p in pairs:\n for m in months:\n for y in years:\n u = \"http://www.histdata.com/download-free-forex-historical-data/?/ascii/tick-data-quotes/\" + p + \"/\" + y + \"/\" + m\n print(\"scraping \" + u)\n\n driver.get(u)\n time.sleep(5)\n element = driver.find_element_by_id(\"a_file\")\n element.click()\n time.sleep(20)\n\ndriver.quit()\n\nos.chdir(downloads_path)\nfilenames = glob.glob('HISTDATA_COM_ASCII_*')\nfor f in filenames:\n ticks_file = os.path.join(downloads_path, f)\n with zipfile.ZipFile(ticks_file, \"r\") as z:\n for name in z.namelist():\n if name.endswith('.csv'):\n z.extract(name, destination_path)\n os.remove(ticks_file)\n","sub_path":"histdata.com/src/1.scrape.py","file_name":"1.scrape.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410181505","text":"from tensorflow import keras\nfrom numpy import expand_dims\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\nimport glob \nimport numpy as np\nfrom os import listdir,makedirs\nfrom os.path import isfile,join\nfrom numpy import *\nfrom PIL import Image\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nimport random\nimport cv2\nimport os \n\npath_2=\"/Users/UDAY KUMAR/Desktop/major_project/CataractDetection-master/dataset\"\n\nlocal_path=path_2+\"/1_normal\"\nfiles = [f for f in listdir(local_path) if isfile(join(local_path,f))]\n\nrandom.shuffle(files) \nfiles=files[0:100]\nfor i in files:\n\toriginalImage = cv2.imread(local_path+\"/\"+i)\n\tdstPath = join(path_2+\"/training_data\",i)\n\tcv2.imwrite(dstPath,originalImage)\n# print(len(files))\nlocal_path=path_2+\"/2_cataract\"\nfiles_1 = [f for f in listdir(local_path) if isfile(join(local_path,f))]\nfor i in files_1:\n\tfiles.append(i)\n\toriginalImage = cv2.imread(local_path+\"/\"+i)\n\tdstPath = join(path_2+\"/training_data\",i)\n\tcv2.imwrite(dstPath,originalImage)\n\nlocal_path=path_2+\"/2_glaucoma\"\nfiles_2 = [f for f in listdir(local_path) if isfile(join(local_path,f))]\nfiles_2=files_2[0:100]\nfor i in files_2:\n\tfiles.append(i)\n\toriginalImage = cv2.imread(local_path+\"/\"+i)\n\tdstPath = join(path_2+\"/training_data\",i)\n\tcv2.imwrite(dstPath,originalImage)\n\nlocal_path=path_2+\"/3_retina_disease\"\nfiles_3 = [f for f in listdir(local_path) if isfile(join(local_path,f))]\nfor i in files_3:\n\tfiles.append(i)\n\toriginalImage = cv2.imread(local_path+\"/\"+i)\n\tdstPath = join(path_2+\"/training_data\",i)\n\tcv2.imwrite(dstPath,originalImage)\n\nlisting=os.listdir(path_2)\n# for image in files:\n# \timg = cv2.imread(os.path.join(path_2,image))\n# \timlist.append(img.flatten())\nimmatrix=np.array([np.array(cv2.imread(os.path.join(path_2,image))).flatten()\n\tfor image in files])\nnum_samples=400\n\n\nlabel=np.ones((num_samples,),dtype=int)\nlabel[0:100]=0\nlabel[100:200]=1\nlabel[200:301]=2\nlabel[300:]=3\ndata,label=shuffle(immatrix,label,random_state=2)\nchannels = 3\n\ndataset = np.ndarray(shape=(len(files), channels,200,200),\n dtype=np.float32)\ni=0\nfor _file in files:\n\tif i<100:\n\t\timg = load_img(path_2+\"/1_normal\"+ \"/\" + _file)\n\t\t# print(img)\n\t\tx = img_to_array(img) \n\t\t#print(x)\n\t\tx = x.reshape((3,2000,2000))\n\t\tdataset[i]=x\n\telif i>=100 and i<200:\n\t\timg = load_img(path_2+\"/2_cataract\"+ \"/\" + _file)\n\t\t# print(img)\n\t\tx = img_to_array(img) \n\t\t# print(x)\n\t\tx = x.reshape((3,200,200))\n\t\tdataset[i]=x\n\telif i>=200 and i<300:\n\t\timg = load_img(path_2+\"/2_glaucoma\"+ \"/\" + _file)\n\t\t# print(img)\n\t\tx = img_to_array(img) \n\t\t# print(x)\n\t\tx = x.reshape((3,200,200))\n\t\tdataset[i]=x\n\telse:\n\t\timg = load_img(path_2+\"/3_retina_disease\"+ \"/\" + _file)\n\t\t# print(img)\n\t\tx = img_to_array(img) \n\t\t# print(x)\n\t\tx = x.reshape((3,200,200))\n\t\tdataset[i]=x\n\ti+=1\nfrom sklearn.model_selection import train_test_split\n# print(dataset)\n#Splitting \nX_train, X_test, y_train, y_test = train_test_split(dataset,label, test_size=0.2, random_state=33)\n\n\n# print(len(X_train))\n# print(len(X_test))\nbatch_size=32\n\nnb_classess=4\n\nnb_epoch=20\nimg_rows,img_cols=10,32\n\nimg_channels=1\n\nnb_filters=32\n\nnb_pool=2\n\nnb_conv=3\n\n\n# X,y=(training_data[0],training_data[1])\n\n# print(len(training_data[0]))\n# X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=4)\n# print(X_train.shape)\n# # plt.imshow(X_train[0].reshape(300,400))\n# # print(len(X_train))\n# X_train=np.array([i for i in X_train]).reshape(-1,32,10, 3) \n# y_train = [i for i in y_train] \n# X_test = np.array([i for i in X_test]).reshape(-1,32,10, 3) \n# y_test = [i for i in y_test] \n# # image = array(img).reshape(1, 64,64,3)\n\n\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, Conv2D, MaxPooling2D\nfrom keras.callbacks import CSVLogger\n# from livelossplot.keras import PlotLossesCallback\n# import efficientnet.keras as efn\nmodel = Sequential()\n\ninput_shape = (200,200, 3)\nmodel.add(Conv2D(32, 3, 3, border_mode='same', input_shape=input_shape, activation='relu'))\nmodel.add(Conv2D(32, 3, 3, border_mode='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, 3, 3, border_mode='same', activation='relu'))\nmodel.add(Conv2D(64, 3, 3, border_mode='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(128, 3, 3, border_mode='same', activation='relu'))\nmodel.add(Conv2D(128, 3, 3, border_mode='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(256, 3, 3, border_mode='same', activation='relu'))\nmodel.add(Conv2D(256, 3, 3, border_mode='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.0001),\n metrics=['accuracy'])\n\n\n\ntraining_data_generator = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.1,\n zoom_range=0.1,\n horizontal_flip=True,\n validation_split=0.2)\nvalidation_data_generator = ImageDataGenerator(rescale=1./255)\ntest_data_generator = ImageDataGenerator(rescale=1./255)\n\n# IMAGE_WIDTH=200\n# IMAGE_HEIGHT=200\nIMAGE_SIZE = 200\nIMAGE_WIDTH, IMAGE_HEIGHT = IMAGE_SIZE, IMAGE_SIZE\nEPOCHS = 20\nBATCH_SIZE = 32\nTEST_SIZE = 30\ntraining_data_dir=path_2+\"/training_data\"\n\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n validation_split=0.2) # set validation split\n\ntrain_generator = train_datagen.flow_from_directory(\n training_data_dir,\n target_size=(img_rows, img_cols),\n batch_size=batch_size,\n class_mode='categorical',\n subset='training') # set as training data\n\nvalidation_generator = train_datagen.flow_from_directory(\n training_data_dir , # same directory as training data\n target_size=(img_rows, img_cols),\n batch_size=batch_size,\n class_mode='categorical',\n subset='validation') # set as validation data\n\n\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples // BATCH_SIZE,\n validation_data = validation_generator, \n validation_steps = validation_generator.samples // BATCH_SIZE,\n epochs = EPOCHS)","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77960830","text":"def mytest(server='0.0.0.0:8500'):\n import numpy as np\n import grpc\n import tensorflow as tf\n from tensorflow_serving.apis import predict_pb2\n from tensorflow_serving.apis import prediction_service_pb2_grpc\n channel = grpc.insecure_channel(server)\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n input_shape = [128, 224, 224, 3]\n for _ in range(10):\n request = predict_pb2.PredictRequest()\n request.model_spec.name = 'mymodel'\n request.model_spec.signature_name = 'predict'\n request.inputs['input'].CopyFrom(\n tf.contrib.util.make_tensor_proto(\n # Synthetic inputs.\n np.random.uniform(input_shape).astype(np.float32),\n shape=input_shape))\n response = stub.Predict(request)\n predicted_label = np.array(response.outputs['classes'].int64_val)\n print('prediction result: ' + str(predicted_label))\n","sub_path":"tensorflow_serving/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"195951347","text":"import os\nimport logging\n\n\nch = logging.StreamHandler()\n\n# 设置文件分类输出\n# debug 级别 log 到 debug 文件\nfh_debug = logging.FileHandler(os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs'),\n 'binance_gateio_vet_usdt_v10.log'))\nfh_debug.setLevel(logging.DEBUG)\n\n# info级别输出到info文件\n# fh_info = logging.FileHandler('logs/logging_info.log')\n# fh_info.setLevel(logging.INFO)\n\n# error级别 log 到 error文件\n# fh_error = logging.FileHandler('logs/logging_error.log')\n# fh_error.setLevel(logging.ERROR)\n\n# critical 级别记录到critical文件\n\n\ndef create_logger(level=logging.DEBUG, record_format=None):\n \"\"\"Create a logger according to the given settings\"\"\"\n # if record_format is None:\n # # record_format = \"%(asctime)s\\t%(levelname)s\\t%(module)s.%(funcName)s\\t%(threadName)s\\t%(message)s\"\n # record_format = \"%(asctime)s\\t%(levelname)s\\t%(message)s\"\n # 可以直接设置在基础配置中,也可以在具体使用时详细配置\n # logging.basicConfig(filename='new.log',\n # filemode='a')\n # 实例化一个logger\n logger = logging.getLogger(__name__)\n logger.setLevel(level) # 可以直接输入'大写的名称', 也可以直接用logging.level: logging.debug\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n ch.setFormatter(formatter)\n fh_debug.setFormatter(formatter)\n # fh_debug.filter(logging.DEBUG)\n # fh_info.setFormatter(formatter)\n # fh_info.filter(logging.INFO)\n # fh_error.setFormatter(formatter)\n # fh_error.filter(logging.ERROR)\n logger.addHandler(fh_debug)\n # logger.addHandler(fh_info)\n # logger.addHandler(fh_error)\n logger.addHandler(ch)\n return logger\n\n\nlogger = create_logger()\n\n\n","sub_path":"trade_rate_info/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328019009","text":"\"\"\"\nTest `rlmusician.environment.environment` module.\n\nAuthor: Nikolay Lysenko\n\"\"\"\n\n\nfrom typing import List\n\nimport numpy as np\nimport pytest\n\nfrom rlmusician.environment import PianoRollEnv\n\n\nclass TestPianoRollEnv:\n \"\"\"Tests for `PianoRollEnv` class.\"\"\"\n\n @pytest.mark.parametrize(\n \"env, actions, expected\",\n [\n (\n # `env`\n PianoRollEnv(\n n_semitones=5,\n n_roll_steps=5,\n observation_decay=0.5,\n n_draws_per_roll_step=2,\n scoring_coefs={'absence_of_outer_notes': 1},\n scoring_fn_params={},\n rendering_params={}\n ),\n # `actions`\n [2, 2, 1, 1, 1, 3, 3],\n # `expected`\n np.array([0, 0.75, 0.125, 1.5, 0])\n )\n ]\n )\n def test_observation(\n self, env: PianoRollEnv, actions: List[int], expected: np.ndarray\n ) -> None:\n \"\"\"Test that `step` method returns proper observation.\"\"\"\n env.reset()\n for action in actions:\n observation, reward, done, info = env.step(action)\n assert not done\n np.testing.assert_equal(observation, expected)\n\n @pytest.mark.parametrize(\n \"env, actions, expected\",\n [\n (\n # `env`\n PianoRollEnv(\n n_semitones=5,\n n_roll_steps=5,\n observation_decay=0.5,\n n_draws_per_roll_step=2,\n scoring_coefs={'absence_of_outer_notes': 1},\n scoring_fn_params={},\n rendering_params={}\n ),\n # `actions`\n [2] + [0 for _ in range(7)],\n # `expected`\n -1\n )\n ]\n )\n def test_reward(\n self, env: PianoRollEnv, actions: List[int], expected: float\n ) -> None:\n \"\"\"Test that `step` method returns proper reward.\"\"\"\n env.reset()\n for action in actions:\n observation, reward, done, info = env.step(action)\n assert done\n assert reward == expected\n\n @pytest.mark.parametrize(\n \"env, expected\",\n [\n (\n # `env`\n PianoRollEnv(\n n_semitones=5,\n n_roll_steps=5,\n observation_decay=0.5,\n n_draws_per_roll_step=2,\n scoring_coefs={'absence_of_outer_notes': 1},\n scoring_fn_params={},\n rendering_params={}\n ),\n # `expected`\n np.zeros((5,))\n )\n ]\n )\n def test_reset(self, env: PianoRollEnv, expected: np.ndarray) -> None:\n \"\"\"Test `reset` method.\"\"\"\n observation = env.reset()\n np.testing.assert_equal(observation, expected)\n assert env.current_episode_step == 0\n assert env.current_roll_step == 0\n assert env.n_draws_at_current_roll_step == 0\n","sub_path":"tests/environment/test_environment.py","file_name":"test_environment.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14997394","text":"#-*- coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom TuningDynamicsDecorator import *\nfrom OrientationTuningCurve import OrientationTuningCurve\n\nclass OrientationTuningDynamics(TuningDynamicsDecorator):\n\t'''\n\tRepresents, processes, loads and saves the orientation tuning dynamics\n\t'''\n\n\tdef getName(self):\n\t\treturn \"OriTunDyn\"\n\n\n\t# processing parameters\n\n\t__processingMethod = \"swindale\"\n\n\tdef getProcessingMethod(self):\n\t\t'''\n\t\tReturns current processing method for a PSTH\n\t\t'''\n\t\treturn self.__processingMethod\n\n\tdef _setProcessingMethod(self, value):\n\t\tif value in OrientationTuningCurve.getAvailableMethods():\n\t\t\tself.__processingMethod = value\n\t\telse:\n\t\t\traise ValueError(\"Processing method not supported\")\n\n\t__tuningWidthLevel = 2.0/3\n\n\tdef getTuningWidthLevel(self):\n\t\t'''\n\t\tReturns a current tuning width leve;\n\t\t'''\n\t\treturn self.__tuningWidthLevel\n\n\tdef _setTuningWidthLevel(self, value):\n\t\t'''\n\t\tSets an appropriate tuning width level\n\t\t'''\n\t\tvalue = float(value)\n\t\tif value>0.0 and value<1.0:\n\t\t\tself.__tuningWidthLevel = value\n\t\telse:\n\t\t\traise ValueError(\"Tuning width level must have a value between 0 and 1\")\n\n\tdef __str__(self):\n\t\tS = super(OrientationTuningDynamics, self).__str__()\n\t\tS += \"\\nProcessing method: \"+self.getProcessingMethod()\n\t\tS += \"\\nTuning width level: {0}\".format(self.getTuningWidthLevel())\n\t\treturn S\n\n\n\n\t# processing results\n\n\t__preferredOrientations = None\n\t__tuningWidths = None\n\t__selectivities = None\n\n\tdef getPreferredOrientations(self):\n\t\t'''\n\t\tReturns preferred orientations for all time bins\n\t\t'''\n\t\tif self.__preferredOrientations is None:\n\t\t\traise AttributeError(\"Please, process the data to do this\")\n\t\treturn self.__preferredOrientations\n\n\tdef getTuningWidths(self):\n\t\t'''\n\t\tReturns tuning widths\n\t\t'''\n\t\tif self.__tuningWidths is None:\n\t\t\traise AttributeError(\"Please, process the data to do this\")\n\t\treturn self.__tuningWidths\n\n\tdef getSelectivities(self):\n\t\t'''\n\t\tReturns selectivities\n\t\t'''\n\t\tif self.__selectivities is None:\n\t\t\traise AttributeError(\"Please, process the data to do this\")\n\t\treturn self.__selectivities\n\n\tdef _finalProcessingDetails(self):\n\t\ttv = self.getTimeValues()\n\t\tself.__preferredOrientations = np.zeros(tv.size)\n\t\tself.__tuningWidths = np.zeros(tv.size)\n\t\tself.__selectivities = np.zeros(tv.size)\n\t\tind = 0\n\t\tfor t in tv:\n\t\t\ttc = self.getTuningCurve(t)\n\t\t\tself.__preferredOrientations[ind] = tc.getPreferredOrientation()\n\t\t\tself.__tuningWidths[ind] = tc.getTuningWidth(self.getTuningWidthLevel())\n\t\t\tself.__selectivities[ind] = tc.getSelectivity()\n\t\t\tind += 1\n\n\tdef _envelopeCurve(self, curve):\n\t\tcurve = OrientationTuningCurve(curve)\n\t\tcurve.setProcessingMethod(self.getProcessingMethod())\n\t\treturn curve\n\n\tdef getParameters(self):\n\t\treturn dict(processing_method = self.getProcessingMethod(),\n\t\t\twidth_level = str(self.getTuningWidthLevel()))\n\n\tdef setParameters(self, d):\n\t\tself._setProcessingMethod(d['processing_method'])\n\t\tself._setTuningWidthLevel(d['width_level'])\n\n\n\n\n\t# arithmetic\n\n\tdef _getBaseClass(self):\n\t\treturn OrientationTuningDynamics\n\n\tdef _getAdditionResultClass(self):\n\t\treturn AdditionOrientationTuningDynamics\n\n\tdef _getSubstractionResultClass(self):\n\t\treturn SubstractionOrientationTuningDynamics\n\n\tdef _getMultiplicationResultClass(self):\n\t\treturn MultiplicationOrientationTuningDynamics\n\n\n\t# plotting\n\n\tdef plotPreferredOrientation(self, ax = None):\n\t\tif ax is None:\n\t\t\tax = plt.axes()\n\t\t\tax.grid()\n\t\tax.get_xaxis().set_ticklabels([])\n\t\tax.set_ylabel(u\"сдвиг ориент., град.\")\n\t\tax.set_title(u\"предпочитаемая ориентация\")\n\t\tax.set_ylim([-90, 90])\n\t\tticks = np.arange(-90, 90.2, 22.5)\n\t\tax.get_yaxis().set_ticks(ticks)\n\t\ttheta0 = self.getPreferredOrientations()[0]\n\t\tax.plot(self.getTimeValues(), self.getPreferredOrientations()-theta0, \"bo-\")\n\t\treturn ax\n\n\tdef plotTuningWidth(self, ax = None):\n\t\tif ax is None:\n\t\t\tax = plt.axes()\n\t\t\tax.grid()\n\t\tax.get_xaxis().set_ticklabels([])\n\t\tax.set_ylabel(u\"град.\")\n\t\tax.set_ylim([0, 180])\n\t\tax.set_title(u\"ширина настройки\")\n\t\tax.plot(self.getTimeValues(), self.getTuningWidths(), \"bo-\")\n\t\treturn ax\n\n\tdef plotSelectivity(self, ax = None):\n\t\tif ax is None:\n\t\t\tax = plt.axes()\n\t\t\tax.grid()\n\t\tax.set_xlabel(u\"время, мс\")\n\t\tax.set_ylim([0, 1])\n\t\tax.set_title(u\"селективность\")\n\t\tax.plot(self.getTimeValues(), self.getSelectivities(), \"bo-\")\n\t\treturn ax\n\n\n\tdef plotAll(self, fig = None):\n\t\t'''\n\t\tplots all the information about orientation tuning dynamics\n\t\t'''\n\t\tif fig is None:\n\t\t\tfig = plt.figure(figsize = (100.0, 100.0))\n\t\t\tfig.suptitle(self.getFullname())\n\t\tax1 = fig.add_subplot(311)\n\t\tplt.grid()\n\t\tself.plotPreferredOrientation(ax1)\n\t\tax2 = fig.add_subplot(312)\n\t\tplt.grid()\n\t\tself.plotTuningWidth(ax2)\n\t\tax3 = fig.add_subplot(313)\n\t\tplt.grid()\n\t\tself.plotSelectivity(ax3)\n\t\t# fig.tight_layout()\n\t\treturn (ax1, ax2, ax3)\n\n\n\t# distance between two tuning curves\n\n\tdef _getDistance(self, other):\n\t\tfrom OrientationTuningData import OrientationTuningData\n\t\tpo1 = self.getPreferredOrientations()\n\t\tpo2 = other.getPreferredOrientations()\n\t\ttv = self.getTimeValues().size\n\t\tpo = np.zeros(tv)\n\t\tsum = 0\n\t\tfor ind in range(tv):\n\t\t\toriDif = OrientationTuningData.getOrientationDifference(po1[ind], po2[ind])\n\t\t\tsum += oriDif*oriDif\n\t\treturn np.sqrt(sum)\n\n\n\n\n\n\n\n\n\n###################################### OrientationTuningDynamics ##############################################\n\nclass NativeOrientationTuningDynamics(OrientationTuningDynamics, NativeTuningDynamicsDecorator):\n\t'''\n\tAchieves, represents, processes, loads and saves the orientation tuning dynamics\n\t'''\n\n\tdef setProcessingMethod(self, value):\n\t\t'''\n\t\tSets a current processing method for a PSTH\n\t\t'''\n\t\tself._setProcessingMethod(value)\n\n\tdef setTuningWidthLevel(self, value):\n\t\t'''\n\t\tSets an appropriate tuning width level\n\t\t'''\n\t\tself._setTuningWidthLevel(value)\n\nclass AdditionOrientationTuningDynamics(AdditionTuningDynamics, OrientationTuningDynamics):\n\tpass\n\nclass SubstractionOrientationTuningDynamics(SubstractionTuningDynamics, OrientationTuningDynamics):\n\tpass\n\nclass MultiplicationOrientationTuningDynamics(MultiplicationTuningDynamics, OrientationTuningDynamics):\n\tpass","sub_path":"kozhukhov/extracellular/OrientationTuningDynamics.py","file_name":"OrientationTuningDynamics.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432649195","text":"class Book:\n booklist=[{'booknum':1,'title':'murder','publisher':'kfq','author':'hwang','description':'how to kill a person','ea':60},\n {'booknum':2,'title':'falling airplane','publisher':'England','author':'Johnson','description':'why?why?why?why?why?','ea':100},\n {'booknum':3,'title':'paper lady','publisher':'France','author':'GM','description':'romantic','ea':10}]\n idx = 3\n same = 0\n\n def firstinput(self):\n choice=input('''\n 다음 중에서 하실 일을 골라주세요 :\n L - 책 목록 \n A - 책 등록\n U - 책 정보 수정\n B - 책 대여\n D - 책 삭제\n Q - 프로그램 종료\n ''').upper() \n return choice\n\n def exe(self, choice):\n if choice=='L':\n self.blist()\n elif choice=='A':\n self.add()\n elif choice=='U':\n self.update()\n elif choice=='B':\n self.borrow()\n elif choice=='D': \n self.delete() \n elif choice=='Q':\n quit() \n\n def blist(self):\n print(\"책 목록\")\n print(\"{:3} {:20} {:15} {:10} {:40} {:10}\".format('no','title','publisher','author','description','ea'))\n for i in self.booklist:\n print(\"{:>3} {:20} {:15} {:10} {:40} {:<10}\".format(i['booknum'],i['title'],i['publisher'],i['author'],i['description'],i['ea']))\n print()\n\n def add(self):\n print(\"책 등록\")\n while True:\n same = 0 \n book = {'booknum':'', 'title':'', 'publisher':'','author':'','description':'','ea':''}\n book['booknum'] = int(self.idx+1)\n book['title'] = str(input(\"책 제목을 입력하세요. : \"))\n\n for i in self.booklist:\n if i['title'] == book['title']:\n print(\"같은 이름의 책이 있습니다.\")\n same = 1\n add = input(\"추가 수량을 입력하세요. \")\n if add.isdecimal():\n add = int(add)\n i['ea']+=add\n break\n else:\n print(\"숫자를 입력하세요.\")\n if same == 0:\n book['publisher'] = str(input(\"출판사를 입력하세요. : \"))\n book['author'] = str(input(\"저자를 입력하세요. : \"))\n book['description'] = str(input(\"설명을 입력하세요. : \"))\n while True:\n book['ea'] = str(input(\"재고를 입력하세요. : \"))\n if book['ea'].isdecimal():\n book['ea'] = int(book['ea'])\n break\n else:\n print(\"숫자를 입력하세요.\")\n\n self.booklist.append(book)\n self.idx+=1\n\n print(\"{:3} {:20} {:15} {:10} {:40} {:10}\".format('no','title','publisher','author','description','ea'))\n for i in self.booklist:\n print(\"{:>3} {:20} {:15} {:10} {:40} {:<10}\".format(i['booknum'],i['title'],i['publisher'],i['author'],i['description'],i['ea']))\n print()\n break\n\n def update(self):\n print(\"고객 정보 수정\")\n print(\"{:3} {:20}\".format('no','title'))\n for i in self.booklist:\n print(\"{:>3} {:20}\".format(i['booknum'],i['title']))\n print()\n\n while True:\n updatebook = input(\"수정하려는 책의 제목을 입력하세요. : \")\n page = -1\n for i in range(0, len(self.booklist)):\n if self.booklist[i]['title'] == updatebook: # i번째, 키 값(이메일)\n page = i\n if page == -1:\n print(\"목록에 없는 책입니다.\")\n break\n updatemenu = input(\"\"\"\n 다음 중 수정하실 정보를 입력하세요.\n publisher, author, description\n (수정할 정보가 없으면 exit 입력)\n \"\"\")\n if updatemenu in ('publisher', 'author', 'description'):\n self.booklist[page][updatemenu] = input(\"수정할 {}을 입력하세요. : \".format(updatemenu))\n for i in self.booklist:\n print(i['title'],end=\" \")\n print()\n break\n elif updatemenu == 'exit':\n break\n else:\n print(\"존재하지 않는 정보입니다.\")\n break\n\n def borrow(self):\n print(\"{:3} {:20} {:10}\".format('no','title', 'ea'))\n for i in self.booklist:\n print(\"{:>3} {:20} {:<10}\".format(i['booknum'],i['title'],i['ea']))\n print()\n\n while True:\n updatebook = input(\"대여하려는 책의 제목을 입력하세요. : \")\n page = -1\n for i in range(0, len(self.booklist)):\n if self.booklist[i]['title'] == updatebook: # i번째, 키 값(이메일)\n page = i\n if page == -1:\n print(\"목록에 없는 책입니다.\")\n break\n while True:\n borrow = str(input(\"대여할 수량을 입력하세요. : \"))\n if borrow.isdecimal():\n borrow = int(borrow)\n imsi = self.booklist[page]['ea'] - borrow\n else:\n print(\"숫자를 입력하세요.\")\n\n if imsi < 0:\n print(\"대여할 수량이 재고보다 많습니다. 다시 입력하세요.\")\n else:\n self.booklist[page]['ea'] = imsi\n break\n if imsi >= 0:\n break\n \n print(\"{:3} {:20} {:10}\".format('no','title', 'ea'))\n for i in self.booklist:\n print(\"{:>3} {:20} {:<10}\".format(i['booknum'],i['title'],i['ea']))\n print()\n\n def delete(self):\n print(\"책 삭제\")\n print(\"{:3} {:20}\".format('no','title'))\n for i in self.booklist:\n print(\"{:>3} {:20}\".format(i['booknum'],i['title']))\n print()\n\n delete = input(\"삭제하려는 책의 제목을 입력하세요. : \")\n delok = 0\n for i in range(0, len(self.booklist)):\n if self.booklist[i]['title'] == delete:\n print(\"{} 책의 정보가 삭제되었습니다.\".format(self.booklist[i]['title']))\n del self.booklist[i]\n delok = 1\n if delok == 1:\n break\n if delok == 0:\n print(\"목록에 없는 책입니다.\")\n\n def __init__(self):\n while True:\n self.exe(self.firstinput())\n\nBook()","sub_path":"book/book_class.py","file_name":"book_class.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"136101991","text":"import logging\nimport azure.functions as func\n\nfrom .calculate import calculate_confidence\nfrom .get_tecdoc import get_tecdec\nfrom .middleware import Middleware\nfrom bson.json_util import dumps\nimport time\n\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function processed a request.')\n error_flag_1 = False\n\n try:\n req_body = req.get_json()\n except ValueError:\n error_flag_1 = True\n error_msg_1 = 1015 #\"Could not parse request body.\"\n else:\n middleware = Middleware() \n sanity_check = middleware.check_all_fields(req_body)\n '''\n if middleware_out[0]:\n sanity_check = middleware.check_all_fields(req_body)\n else:\n sanity_check = True, middleware_out[1]\n '''\n if error_flag_1 :\n return func.HttpResponse(str({\"ErrorCodes\":[error_msg_1]}),status_code= 400)\n\n elif sanity_check[0]:\n return func.HttpResponse(str({'ErrorCodes':sanity_check[1]}),status_code= 400) \n \n else:\n t1 = time.time()\n try:\n output = calculate_confidence(get_tecdec(), req_body['Mileage'], 20000)\n except Exception as e:\n if(e.args[0] ==100):\n return func.HttpResponse(str({\"ErrorCodes\":[1013]}),status_code=603) \n else:\n return func.HttpResponse(str({\"ErrorCodes\":[1014]}),status_code=603) \n t2 = time.time()\n if(output[0]):\n return func.HttpResponse(str({\"ErrorCodes\":[1016]}),status_code=602) \n else:\n response = {\"RequestID\": req_body[\"RequestId\"],\n \"TimeElapsed \" : t2-t1,\n \"NoOfPredictions\": len(output[1]),\n \"ConfidenceThreshold\":5,\n \"DefectPredictions\": output[1] \n }\n return func.HttpResponse(dumps(response),status_code=200) \n\n#return func.HttpResponse(\"db connection error\",status_code=603) \n \n\n\n\n","sub_path":"get_prediction/__init__old.py","file_name":"__init__old.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608910410","text":"import os\nimport uuid\nfrom flask import (Blueprint, render_template, request,\n redirect, url_for, session)\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, FileField, SubmitField, IntegerField\nfrom werkzeug.utils import secure_filename\nfrom flask3.utils import get_data, update_data\n\nproducts = Blueprint('products', __name__,\n template_folder='templates',\n static_folder='static',\n static_url_path='/products_blueprint/static')\n\nDATA_STORAGE = 'products_blueprint/product_data.json'\n\n\n@products.route('/product')\ndef get_all_products_page():\n data = get_data(DATA_STORAGE)\n\n if request.args:\n selected_products = []\n\n for product in data:\n for key, value in request.args.items():\n if not product.get(key) == value:\n break\n else:\n selected_products.append((product[\"id\"], product[\"name\"]))\n else:\n selected_products = [(product[\"id\"], product[\"name\"])\n for product in data]\n return render_template('all_products.html',\n selected_products=selected_products)\n\n\n@products.route('/product/')\ndef get_product_page(product_id):\n session[product_id] = 'clicked'\n data = get_data(DATA_STORAGE)\n product_data = [x for x in data if x[\"id\"] == product_id][0]\n return render_template('product.html', product_data=product_data)\n\n\n@products.route('/product/add_product', methods=['GET', 'POST'])\ndef get_add_product_page():\n form = AddNewProduct()\n if request.method == 'POST':\n filename = secure_filename(form.image.data.filename)\n file_path = f'{os.getcwd()}/products_blueprint/static/{filename}'\n form.image.data.save(file_path)\n form_data = {\n 'id': str(uuid.uuid1()),\n 'name': form.name.data,\n 'description': form.description.data,\n 'img_name': form.image.data.filename,\n 'price': str(form.price.data)\n }\n update_data(DATA_STORAGE, form_data)\n return redirect(url_for('products.get_all_products_page'))\n return render_template('add_product.html', form=form)\n\n\nclass AddNewProduct(FlaskForm):\n name = StringField('Name')\n description = StringField('Descriprion')\n image = FileField()\n price = IntegerField('Price')\n submit = SubmitField('Add product')\n","sub_path":"flask3/products_blueprint/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265457899","text":"\r\nfrom tkinter import*\r\n\r\ntop = Tk()\r\n\r\ndef helloCallBack():\r\n print(\"Hello %s\" % (T.get()))\r\n\r\nlabel =Label(text = \"What would you like to do?\")\r\nB = Button(top, text =\"Hello\", command = helloCallBack, width =10)\r\n\r\ntop.title(\"Not Skype\")\r\n\r\nif str==\"Hello\":\r\n print(\"\\n Hello\")\r\nmenubar = Menu(top)\r\nmenubar.add_command(label=\"File\")\r\n\r\nmenubar.add_command(label=\"Quit\",command=sys.exit)\r\nsys.exit()\r\ntop.config(menu=menubar)\r\nfilemenu = Menu(menubar, tearoff=0)\r\nfilemenu.add_command(label=\"Open\")\r\nmenubar.add_cascade(label=\"File\", menu=filemenu)\r\n \r\nT = Entry(top)\r\nlabel.pack()\r\nT.pack()\r\nB.pack()\r\ntop.mainloop()\r\n","sub_path":"python script 2.py","file_name":"python script 2.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105672048","text":"\n\n#calss header\nclass _UNCLE():\n\tdef __init__(self,): \n\t\tself.name = \"UNCLE\"\n\t\tself.definitions = [u\"the brother of someone's mother or father, or the husband of someone's aunt or uncle: \", u'any male adult that you know who is older than you: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_uncle.py","file_name":"_uncle.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437830846","text":"\n\n\n\n\"\"\"\n\n\nAdding Items to a Set\n\nWe can add elements to a set by using add() method. \nAgain as discussed there is no specific index attached to the newly added element.\n\n\"\"\"\n\nDays=set([\"Mon\",\"Tue\",\"Wed\",\"Thu\",\"Fri\",\"Sat\"])\n \nDays.add(\"Sun\")\nprint(Days)\n\n\n\"\"\"\n\nWhen the above code is executed, it produces the following result.\n\nset(['Wed', 'Sun', 'Fri', 'Tue', 'Mon', 'Thu', 'Sat'])\n\n\"\"\"\n\n\n\n\n\n","sub_path":"Python3_Data_Structure/08_Set/03_Set.py","file_name":"03_Set.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203958859","text":"\"\"\"myproyect URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom rest_framework import routers\nfrom django.urls import path\nfrom . import views\nurlpatterns = [\n path('tasks', views.tasks, name='tasks'),\n path('inserttask', views.insert_task, name='inserttask'),\n path('updatetask', views.update_task, name='updatetask'),\n path('gettask/', views.get_task, name='gettask'),\n path('deletetask/', views.delete_task, name='deletetask'),\n path('validserver', views.validserver, name='validserver'),\n\n]\n\n# from django.urls import include, path\n# from rest_framework import routers\n# from . import views\n#\n# router = routers.DefaultRouter()\n# router.register(r'tasks', views.tasks, basename='tasks')\n#\n# # Wire up our API using automatic URL routing.\n# # Additionally, we include login URLs for the browsable API.\n# urlpatterns = [\n# path('', include(router.urls)),\n# path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n# ]","sub_path":"python/myproyect/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412449764","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport playlist.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('playlist', '0004_auto_20150806_1331'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='playlist',\n name='desc',\n field=models.CharField(max_length=100, blank=True, verbose_name='Description', null=True),\n ),\n migrations.AlterField(\n model_name='playlist',\n name='name',\n field=models.CharField(max_length=30, verbose_name='Name *'),\n ),\n migrations.AlterField(\n model_name='songfile',\n name='song',\n field=models.FileField(blank=True, null=True, upload_to=playlist.models.newpath),\n ),\n migrations.AlterUniqueTogether(\n name='playlist',\n unique_together=set([]),\n ),\n ]\n","sub_path":"playlist/migrations/0005_auto_20150807_1902.py","file_name":"0005_auto_20150807_1902.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443662057","text":"from __future__ import unicode_literals\nimport json\n\nfrom django.contrib.auth.models import User\nfrom filer.models.filemodels import File\nfrom filer.fields.file import FilerFileField\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom ojuser.models import GroupProfile\nfrom django.core.cache import cache\nfrom bojv4.settings import BASE_DIR\nfrom django.core.cache import cache\n\n# from filer.fields.file import FilerFileField\n\n\nclass ProblemTag(models.Model):\n name = models.CharField(max_length=50)\n\n\nclass Problem(models.Model):\n\n FORBIDDEN_TIMEOUT = 24 * 3600 * 60\n HITS_LIMIT = 30\n SUBMIT_INTERVAL = 300\n \n\n title = models.CharField(max_length=50, default='Untitled')\n time_limit = models.IntegerField(default=1000)\n memory_limit = models.IntegerField(default=65536)\n code_length_limit = models.IntegerField(default=65536)\n desc = models.TextField(default='None')\n is_checked = models.BooleanField(default=False)\n superadmin = models.ForeignKey(User)\n created_time = models.DateTimeField(auto_now_add=True)\n last_updated_time = models.DateTimeField(auto_now=True)\n # allowed_lang = models.ManyToManyField('ojuser.Language', related_name='problems')\n groups = models.ManyToManyField(GroupProfile, blank=True, related_name='problems')\n tags = models.ManyToManyField(ProblemTag, blank=True, related_name='problems')\n is_spj = models.BooleanField(default=False)\n\n def __unicode__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('problem:problem-detail', kwargs={'pk': self.pk})\n\n def view_by_user(self, user):\n for g in self.groups.all():\n if user.has_perm('ojuser.view_groupprofile', g):\n return True\n return False\n\n @classmethod\n def is_forbid(cls, user):\n forbid_key = 'forbid_submit_' + user.username\n if cache.get(forbid_key):\n return True\n return False\n \n def forbid(self, user):\n cache_key = str(self.pk) + \"_forbid_\" + user.username\n hits = cache.get(cache_key)\n if not hits:\n cache.set(cache_key, 1, self.SUBMIT_INTERVAL)\n return False\n forbid_key = 'forbid_submit_' + user.username\n if cache.get(forbid_key):\n return True\n cache.incr(cache_key, 1)\n if hits > self.HITS_LIMIT:\n cache.set(forbid_key, 1, self.FORBIDDEN_TIMEOUT)\n return True\n return False\n \n def description(self):\n if not hasattr(self, '_desc'):\n try:\n self._desc = json.loads(self.desc)\n except:\n self._desc = {'desc': self.desc, 'sample_in': '', 'sample_out': ''}\n return self._desc['desc']\n\n def sample_in(self):\n if not hasattr(self, '_desc'):\n try:\n self._desc = json.loads(self.desc)\n except:\n self._desc = {'desc': self.desc, 'sample_in': '', 'sample_out': ''}\n return self._desc['sample_in']\n\n def sample_out(self):\n if not hasattr(self, '_desc'):\n try:\n self._desc = json.loads(self.desc)\n except:\n self._desc = {'desc': self.desc, 'sample_in': '', 'sample_out': ''}\n return self._desc['sample_out']\n\n def check_data(self):\n data_info = self.datainfo\n in_set = set()\n out_set = set()\n mp = {}\n self.cases.all().delete()\n for f in data_info.all():\n data = f.data\n path = data.path\n filename = path[path.rfind('/') + 1:]\n mp[filename] = data\n if path.endswith('.in'):\n if filename in in_set:\n return False\n in_set.add(filename)\n elif path.endswith('.out'):\n if filename in out_set:\n return False\n out_set.add(filename)\n if len(in_set) != len(out_set):\n return False\n cases = []\n for x in in_set:\n case = ProblemCase()\n case.problem = self\n case.input_data = mp[x]\n case.position = len(cases)\n outfile = x.rstrip('.in') + '.out'\n if outfile in out_set:\n case.output_data = mp[outfile]\n else:\n return False\n case.gen_sample_data()\n cases.append(case)\n for cas in cases:\n cas.save()\n return True\n\n def get_problem_data(self):\n resp = []\n p_count = 0\n for cas in self.cases.all():\n in_data = cas.input_data.path\n out_data = cas.output_data.path\n resp.append({\n 'in': in_data,\n 'out': out_data,\n 'position': p_count\n })\n p_count += 1\n return resp\n\n def get_position_data(self, position):\n if not self.cases or self.cases.count() <= position:\n return None, None\n case = self.cases.all()[position]\n return case.sample_in, case.sample_out\n\n def get_score(self, position):\n if not isinstance(position, int) or position >= self.cases.count():\n raise Exception(\"param 'position' is invalid.\")\n return self.cases.all()[position].score\n\n @property\n def score(self):\n res = 0\n for c in self.cases.all():\n res += c.score\n return res\n\n class Meta:\n permissions = (\n ('view_problem', 'Can view problem'),\n )\n\n\ndef upload_dir(instance, filename):\n return 'documents/{0}/{1}'.format(instance.problem.pk, str(filename))\n\n\nclass ProblemDataInfo(models.Model):\n problem = models.ForeignKey(Problem, related_name=\"datainfo\")\n data = models.OneToOneField(File, null=True, blank=True, related_name=\"datainfo\")\n\n def __unicode__(self):\n return str(self.problem.pk) + \" \" + str(self.pk)\n\n\nclass ProblemCase(models.Model):\n problem = models.ForeignKey(Problem, related_name=\"cases\")\n input_data = models.OneToOneField(File, null=True, blank=True, related_name=\"incase\")\n output_data = models.OneToOneField(File, null=True, blank=True, related_name=\"outcase\")\n sample_in = models.CharField(max_length=256, blank=True, null=True)\n sample_out = models.CharField(max_length=256, blank=True, null=True)\n score = models.IntegerField(default=0)\n position = models.IntegerField(default=0)\n info = models.TextField(blank=True)\n\n def __unicode__(self):\n return str(self.problem.pk) + \":\" + str(self.pk)\n\n @property\n def input_name(self):\n path = self.input_data.path\n return path[path.rfind('/') + 1:]\n\n @property\n def output_name(self):\n path = self.output_data.path\n return path[path.rfind('/') + 1:]\n\n @staticmethod\n def get_data_from_file(path, limit=200):\n data = ''\n with open(path, 'r') as f:\n data = f.read(limit)\n return data\n\n def gen_sample_data(self):\n self.sample_in = self.get_data_from_file(self.input_data.path, 200)\n self.sample_out = self.get_data_from_file(self.output_data.path, 200)\n\n\n\n","sub_path":"problem/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533141023","text":"# !/usr/bin/python\n# coding: utf-8\n\nfrom NativeWifiApi import *\n\n\ndef get_wireless_interfaces():\n \"\"\"\n 枚举无线接口上的WirelessInterface\n\n :return: 包含Interface的list\n \"\"\"\n interfaces_list = []\n handle = WlanOpenHandle() # 获得wlan句柄\n wlan_ifaces = WlanEnumInterfaces(handle) # 获得当前能用的wlan\n\n data_type = wlan_ifaces.contents.InterfaceInfo._type_ # 数组单个元素的数据类型\n num = wlan_ifaces.contents.NumberOfItems # 接口数量\n ifaces_pointer = addressof(wlan_ifaces.contents.InterfaceInfo)\n wlan_interface_info_list = (data_type * num).from_address(ifaces_pointer) # 根据初始地址来找到所有InterfaceInfo\n for wlan_interface_info in wlan_interface_info_list:\n wlan_iface = Interface(wlan_interface_info) # 构建成Interface对象\n interfaces_list.append(wlan_iface)\n WlanFreeMemory(wlan_ifaces)\n WlanCloseHandle(handle)\n return interfaces_list\n\n\ndef get_wireless_available_network_list(wireless_interface):\n \"\"\"\n 获得当前无线环境中的可用无线信息\n\n :param wireless_interface: wlan句柄\n \"\"\"\n networks = []\n handle = WlanOpenHandle() # 获得wlan句柄\n network_list = WlanGetAvailableNetworkList(handle, wireless_interface.guid) #\n data_type = network_list.contents.Network._type_\n num = network_list.contents.NumberOfItems\n network_pointer = addressof(network_list.contents.Network)\n networks_list = (data_type * num).from_address(network_pointer)\n for network in networks_list:\n networks.append(Network(network))\n WlanFreeMemory(networks_list)\n WlanCloseHandle(handle)\n return networks\n\n\ndef get_wireless_profiles(wireless_interface):\n \"\"\"\n 获取WirelessProfile对象的list\n\n :param wireless_interface: Interface对象\n :return: 包含Profile对象的list\n \"\"\"\n profiles = []\n handle = WlanOpenHandle() # 获得wlan句柄\n profile_list = WlanGetProfileList(handle, wireless_interface.guid)\n data_type = profile_list.contents.ProfileInfo._type_ # ProfileInfo的数据类型\n num = profile_list.contents.NumberOfItems # ProfileInfo的个数\n profile_info_pointer = addressof(profile_list.contents.ProfileInfo)\n profiles_list = (data_type * num).from_address(profile_info_pointer)\n xml_data = None # 确保xml为空\n for profile in profiles_list:\n xml_data = WlanGetProfile(handle, wireless_interface.guid, profile.ProfileName)\n profiles.append(Profile(profile, xml_data.value))\n WlanFreeMemory(xml_data)\n WlanFreeMemory(profiles_list)\n WlanCloseHandle(handle)\n return profiles\n","sub_path":"PyWiWi/WindowsWifi/WifiApi.py","file_name":"WifiApi.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208610839","text":"#!/usr/bin/python3\n#### Othello Shell\n#### Updated by P. Gabor Jan 28, 2018\n#### Originally by P. White 2016-2018\n\nimport random, sys, os\nimport re\nimport time\nimport multiprocessing\nimport subprocess\n\n\ndef findIdx(lst, pattern):\n # returns the first index where the rgex matches, else -1\n for i, v in enumerate(lst):\n if re.search(pattern, v): return i\n return -1\n\n\ndef findIdxs(lst, pattern):\n # returns the indeces where the rgex matches, else []\n return [i for i, v in enumerate(lst) if re.search(pattern, v)]\n\n\ndef findFile(*fileSpec):\n mypath = os.getcwd()\n for fs in fileSpec:\n if fs == \"random\": return {\"random\"}\n if fs[-3:]==\".py\": # If explicit filename is provided, then we must mean that one without capitalization\n if os.path.isfile(fs): return {fs}\n else: # If unique prefix is provided\n fs = fs.lower()\n for (dirpath, dirnames, filenames) in os.walk(mypath):\n setOfScripts = {*filenames} # Gets all files in the path\n break\n setOfScripts = {s for s in setOfScripts if s[:len(fs)].lower()==fs and s[-3:]==\".py\"}\n if len(setOfScripts)==1: return setOfScripts\n return set()\n\n\ndef rngLim(c, d, n):\n if (abs(d) - 1) % (n-1): # if diagonal direction\n return n-max(n-1-c%n if (d-1)%n else c%n, n-1-c//n if d<0 else c//n)\n return (n if d>0 else 1) - (d // abs(d)) * (c%n if d%n else c//n)\n\n\ndef legalMoves(othelloBoard, token):\n moves = {}\n for idx in [idx for idx,tkn in enumerate(othelloBoard) if tkn==dot]:\n for dir, lim in dirrng[idx]:\n for p in range(idx+dir,lim,dir):\n if othelloBoard[p]==\".\": break\n if othelloBoard[p]==token:\n if p==idx+dir: break\n if idx not in moves: moves[idx] = set()\n moves[idx].update(range(idx+dir,p,dir))\n break\n return moves\n \n\ndef makeMove(board, token, mv, affects):\n # affects are the positions of enemy tokens which flip\n for i in affects:\n board = board[:i] + token + board[i+1:]\n return board[:mv] + token + board[mv+1:]\n\n\ndef showBoard(board, mvNum, players, tokens, player, token, mv):\n if token:\n print(\"Game {}, Move {}; Player {} as {} vs. Player {} as {}\".format(\n gameNum, mvNum, players[0], tokens[0], players[1], tokens[1]))\n print(\"Token {} moves to {} => X={} vs. O={}\".format(token, mv, board.count('X'), board.count('O')))\n print(\"\\n\".join([str(i+1) + \" \" + \" \".join(board[i*sL:i*sL+sL]) for i in range(sL)]))\n print(\"\\n A B C D E F G H\\n\")\n\n\n\n\n\ndef parseArgs():\n # determine the board (defaults to standard starting board)\n # determine the token to play. Defaults to X or O as board.count('.') unless only one side has a legal move\n # determine the contestants - one must be specified.\n # the second defaults to BaselineRandom, if it exists, else to first player\n # rounds to play, defaults to 1 (ie. 2 games)\n # secondsPerMove specified by prefixing a number with an s, defaults to 5\n args = sys.argv[1:]\n idxBrd = findIdx(args, \"^[xXoO.]$\")\n board = \".\"*27 + \"OX......XO\" + \".\"*27\n if idxBrd>=0:\n board = args[idxBrd].upper()\n del args[idxBrd]\n\n idxToken = findIdx(args, \"^[xXoO]$\")\n token = \"XO\"[board.count('.') % 2] if idxToken < 1 else args[idxToken].upper()\n if not legalMoves(board, token): token = \"XO\"[\"OX\".find(token)] # {'O':'X', 'X':'O'}[token]\n\n idxContestants = findIdxs(args, \"^(.{3,}|[^sS.0-9]|[sS][^0-9.])\")\n help = \"Usage: contest [board] contestant1pfx [contestant2pfx] [rndsToPlay=1] [s+secsPerMove]\"\n if not idxContestants or len(idxContestants)>2: exit(help)\n\n scripts = findFile(args[idxContestants[0]])\n if not len(scripts): exit(\"Contestant1 not found\\n\" + help)\n if len(scripts)>2: exit(\"Contestant1 prefix not unique: {}\\n{}\".format(scripts, help))\n contestant1 = scripts.pop()\n\n if len(idxContestants)==1:\n contestant2 = \"random\" # \"BaselineRandom.py\" if os.path.isfile(\"BaselineRandom.py\") else contestant1\n else:\n scripts = findFile(args[idxContestants[1]])\n if not len(scripts): exit(\"Contestant2 not found\\n\" + help)\n if len(scripts)>2: exit(\"Contestant2 prefix not unique: {}\\n{}\".format(scripts, help))\n contestant2 = scripts.pop()\n\n idxRounds = findIdx(args, \"^(\\\\d+[.]?\\\\d*|[.]\\\\d+)$\")\n gameCt = 2 if idxRounds < 0 else int(2*float(args[idxRounds])+.5)\n\n idxSecsPerMove = findIdx(args, \"^[sS](\\\\d+[.]?\\\\d*|[.]\\\\d+)$\")\n secsPerMove = 5 if idxSecsPerMove < 0 else float(args[idxSecsPerMove][1:])\n\n return board, token, [\"A-\"+contestant1, \"B-\"+contestant2], gameCt, secsPerMove\n\n\ndef getMove(board, playerScript, token, secsPerMove):\n # Preparation\n timedOut = False\n errOut, actualOut = \"\", \"\"\n # myargs = [ sys.executable , \"-u\", '\"{}\"'.format(playerScript) , board, token, \">out.txt\", \"2>err.txt\" ]\n # myargs = [ sys.executable , \"-u\", '\"{}\"'.format(playerScript) , board, token, \">{}\".format(OUTFILE), \"2>{}\".format(ERRFILE) ]\n\n running = multiprocessing.Value('i', 1)\n best_shared = multiprocessing.Value('i', -99)\n brd = '?'*11 + '??'.join([board[i:i+8] for i in range(0,64,8)]) + '?'*11\n brd = brd.replace('X', '@').replace('O', 'o')\n\n po = multiprocessing.Process(target=dctContestants[playerScript].best_strategy, \\\n args=(brd, \"@o\"[\"XO\".find(token)], best_shared, running))\n t1 = time.time()\n po.start()\n if po.is_alive():\n print (\"Putting time limit on move for {} \".format(playerScript), end='')\n po.join(secsPerMove)\n running.value = 0\n time.sleep(0.1)\n timedOut = po.is_alive()\n po.terminate()\n time.sleep(0.1)\n move = best_shared.value\n print (\"The raw best value found was '{}'\".format(move))\n if move > 0: move = 8 * (move // 10) + (move % 10) - 1 - 8\n print(\"In %4.2f secs, got move = %i\" % (time.time() - t1, move))\n\n if po.is_alive():\n# timedOut = True\n# errOut = \"Timed out\"\n if os.name != \"posix\":\n print (\"Initiating TASKKILL of {}\".format(po.pid))\n pok = subprocess.Popen(\"TASKKILL /F /PID {} /T\".format(po.pid))\n pok.wait(20) # waiting for TASKKILL to finish\n else:\n print(\"Process still alive at termination attempt\")\n\n if timedOut and not errOut: errOut = txtTimedOut\n print (\"playerScript: {} as {} ==> {}\".format(playerScript, token, move))\n return move, errOut, actualOut\n\n\n\n\n\ndef playGame(board, contestants, tokens, secsPerMove):\n # returns board, moveTranscript, mostRecentMove, errMsg, actualOut\n \n playerNum = 0 # index of player to move\n moves = [] # transcript of game\n lm = legalMoves(board, tokens[playerNum]) # possible moves\n\n while lm: # While a move is possible ...\n print (\"About to get move for {} ({}) from among {}\".format(tokens[playerNum], contestants[playerNum], {*lm.keys()}))\n# print (\"Contestant dictionary: {}\".format(dctContestants))\n if contestants[playerNum][2:] == \"random\":\n mv, msg, actual = random.choice([*lm]), \"\", \"\"\n else:\n mv, msg, actual = getMove(board, contestants[playerNum], tokens[playerNum], secsPerMove) # get a move\n if msg[:len(txtTimedOut)]==txtTimedOut:\n moves.append(-2)\n if msg==txtTimedOut: msg=\"\"\n\n mvCt = len({*moves}-{-1,-2})\n if msg or mv not in lm: # If error or move invalid\n # Report failure\n return [board, moves, mv, msg or \"Illegal move attempt of {}\".format(mv), actual]\n board = makeMove(board, tokens[playerNum], mv, lm[mv]) # Make the move\n moves.append(mv) # Record it\n\n\n showBoard(board, mvCt+1, contestants, tokens, contestants[playerNum], tokens[playerNum], mv)\n\n playerNum = 1-playerNum # Switch sides\n lm = legalMoves(board, tokens[playerNum])\n\n if not lm: # If no possible moves\n plyr = contestants[playerNum]\n playerNum = 1-playerNum # Player must pass\n lm = legalMoves(board, tokens[playerNum])\n if lm:\n moves.append(-1) # provided opponent can move\n print (\"\\nPlayer {} must pass.\\n\".format(plyr))\n\n return [board, moves, mv, \"\", actual] # game over\n\n\n\n\n\n\ndef main():\n global theX, theO, dot, sL, txtTimedOut, dirs, dirrng, dctContestants\n theX, theO, dot, sL = \"X\", \"O\", \".\", 8 # sL = side length\n txtTimedOut = \"Timed out\"\n\n\n startTime = time.time()\n pyver = re.sub(' [^Z]*', \"\", sys.version)\n print (\"{} running under python version {}\".format(os.path.basename(__file__), pyver))\n\n\n #### initialization\n # the set of directions in which one can go for making moves\n dirs = [{h+v for h in [-1,0,1] for v in [-sL,0,sL] for b in [c+h+v+h+v] \\\n if (b>=0)*(b=0)}-{0} for c in range(sL*sL)]\n # the direction together with the boundary of where one must check for bracketing (used in legalMoves)\n dirrng = [[(dir,idx+rngLim(idx,dir,sL)*dir) for dir in setOfDirs] for idx,setOfDirs in enumerate(dirs)]\n\n\n\n board, token, contestants, gameCt, secsPerMove = parseArgs()\n playerIdx = random.choice([0, 1]) # index of the player to go first\n aRes, tokens = [], [token, \"XO\"[\"OX\".find(token)]]\n# primaryTknCt, secondaryTknCt, gamesWon, gamesLost, worst2\n\n dctContestants = {cntstnt: \"\" for cntstnt in contestants}\n i = 0\n for cntstnt in dctContestants:\n if cntstnt[2:]==\"random\": continue\n imprt = \"import {} as con{}\\ndctContestants['{}'] = con{}.Strategy()\".format(cntstnt[2:][:-3], i, cntstnt, i)\n print (\"About to import\\n\" + imprt)\n exec(imprt)\n i += 1\n\n\n # Conduct the contest here\n global gameNum\n for gameNum in range(1,gameCt+1):\n # primaryPlayer is contestants[0]\n # secondaryPlayer is contestants[1]\n # player to play first is contestants[playerIdx]\n ## primaryPlayer token is \"XOX\"[tokens[0]==\"O\"+playerIdx]\n # primaryPlayer token is tokens[playerIdx]\n\n # returns board, moveTranscript, mostRecentMove, errMsg, actualOut\n print (\"About to start game\")\n res = playGame(board, contestants[::1-2*playerIdx], tokens, secsPerMove)\n# aRes.append(res + [\"XOX\"[(token==\"O\")+playerIdx], gameNum])\n aRes.append(res + [tokens[playerIdx], gameNum])\n# tokens = tokens[::-1]\n playerIdx = 1 - playerIdx\n\n # Compute the stats here\n ERRIDX, GAMENMIDX, PRIMARYTOKENIDX = 3, 6, 5\n aErr = [res for res in aRes if res[ERRIDX]]\n if aErr:\n print (aErr)\n print (\"{} error{} detected:\".format(len(aErr), \"s were\" if len(aErr) else \" was\"))\n for res in aErr:\n primaryTkn = res[PRIMARYTOKENIDX]\n print (\"\\nGame {}; {} as {} vs. {} as {}\".format(\n res[GAMENMIDX], contestants[0], primaryTkn, contestants[1], \"XO\"[\"OX\".find(primaryTkn)]))\n res1ct = len(res[1]) + 2*res[1].count(-1) - res[1].count(-2)\n tknToMove = \"XO\"[((token==\"O\") + res1ct) % 2]\n print (\"Board/tkn to move: {} {}\".format(res[0], tknToMove))\n showBoard(res[0], \"\", \"\", \"\", \"\", \"\", \"\")\n print (\"History: {}\\nLegal moves: {}\\n\".format(\" \".join([str(rs) for rs in res[1]]), {*legalMoves(res[0], tknToMove)}))\n if res[2]>=0: print(\"Move attempt: {}\".format(res[2]))\n print(\"Err msg:\\n \" + \"\\n \".join(res[ERRIDX].splitlines()))\n exit()\n\n\n scores = [(tknCt - (sL*sL - tknCt - res[0].count(dot)), idx) for idx, res in enumerate(aRes) \\\n for tknCt in [res[0].count(res[PRIMARYTOKENIDX])]]\n rest = [*range(len(scores))]\n if len(aRes)>20:\n scores.sort()\n worst = {scores[idx][1] for idx in range(3)}\n best = {scores[-idx-1][1] for idx in range(3)}\n rest = {*range(len(scores))} - worst - best\n rest = (sorted([*rest]) + [*best] + [*worst])[-20:]\n\n for idx in rest:\n res = aRes[idx]\n primaryTkn = res[PRIMARYTOKENIDX]\n ptc = res[0].count(primaryTkn)\n print (\"\\nGame {}; {} as {} vs. {} as {} ==> {} to {}\".format(\n res[GAMENMIDX], contestants[0], primaryTkn, contestants[1],\n \"XO\"[\"OX\".find(primaryTkn)], ptc, sL*sL - ptc - res[0].count(dot)))\n showBoard(res[0], \"\", \"\", \"\", \"\", \"\", \"\")\n print (\"History: {}\\n\".format(\" \".join([str(rs) for rs in res[1]])))\n\n\n won = sum([tknCt > sL*sL - tknCt - res[0].count(dot) for res in aRes for tknCt in [res[0].count(res[PRIMARYTOKENIDX])]])\n lost = sum([tknCt < sL*sL - tknCt - res[0].count(dot) for res in aRes for tknCt in [res[0].count(res[PRIMARYTOKENIDX])]])\n smry = \"Games won: {}; tied: {}; lost: {}\\n\".format(won, len(aRes)-won-lost, lost)\n tknCt = sum(res[0].count(res[PRIMARYTOKENIDX]) for res in aRes)\n dotCt = sum(res[0].count(dot) for res in aRes)\n nmyCt = sL*sL*len(aRes) - tknCt - dotCt\n tmtCt = sum(res[1].count(-2) for res in aRes)\n pct = \"{}\".format(100*tknCt / (tknCt + nmyCt))[:6]\n smry += \"Primary token count: {}; Enemy token count: {} ==> {}%\".format(tknCt, nmyCt, pct)\n if tmtCt: smry += \"\\nTimeouts: {}\\n\".format(tmtCt)\n print(smry)\n\n\n print (\"\\nScores: {}\".format(scores))\n\n\nif __name__ == '__main__':\n multiprocessing.freeze_support()\n main()\n\n","sub_path":"Artificial Intelligence/Semester 1/Othello/contestor.py","file_name":"contestor.py","file_ext":"py","file_size_in_byte":13143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319943682","text":"from django.db import models\nfrom .hwbi_outputs import DomainOut\n\n\nclass Domain(models.Model):\n \"\"\"HWBI Domain Model\"\"\"\n domainID = models.TextField(max_length=10, primary_key=True)\n domainName = models.TextField(max_length=25, null=True, blank=True)\n name = models.TextField(max_length=20)\n min = models.IntegerField\n max = models.IntegerField\n score = 0.0\n weight = 1.0\n\n def get_dict(self):\n dct = {}\n dct['domainID'] = self.domainID\n dct['description'] = self.domainName\n dct['domainName'] = self.name\n dct['min'] = self.min\n dct['max'] = self.max\n dct['unit'] = 'domain score'\n dct['type'] = 'number'\n dct['score'] = self.score\n dct['weight'] = self.weight\n return dct\n\n def get_domain_out(self):\n domain_out = DomainOut()\n domain_out.domainID = self.domainID\n domain_out.description = self.domainName\n domain_out.name = self.name\n domain_out.score = self.score\n domain_out.weight = self.weight\n return domain_out\n\n def get_input_metadata(self):\n dct = {'name' : self.name, 'description' : self.domainName, 'value' : self.weight}\n return dct\n\n","sub_path":"models/domains.py","file_name":"domains.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520636034","text":"\nfrom datetime import datetime, timedelta\n\ndef round_to_nearest_hour(dt) -> datetime:\n \"\"\"\n This takes in a datetime (dt) and round it to the nearest hour\n \n ### Returns\n\n datetime\n\n ### Example\n >>> from datetime import datetime\n >>> dt = datetime(2021, 3, 6, 23, 3, 43, 123)\n >>> print(str(dt))\n\n 2021-03-06 23:03:43.000123\n >>> dt = round_to_nearest_hour(dt)\n >>> print(str(dt))\n\n 2021-03-06 23:00:00 \n \"\"\"\n dt_new = datetime(\n dt.year, \n dt.month, \n dt.day, \n dt.hour, \n 0, \n 0, \n 0\n )\n\n if dt.minute >= 30:\n #round up\n dt_new += timedelta(hours=1)\n\n return dt_new","sub_path":"datetime_helper.py","file_name":"datetime_helper.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181801746","text":"from typing import Any, Dict, List\n\nfrom telegram.client import AsyncResult, Telegram\n\n\nclass Tdlib(Telegram):\n def download_file(\n self, file_id, priority=16, offset=0, limit=0, synchronous=False,\n ):\n result = self.call_method(\n \"downloadFile\",\n params=dict(\n file_id=file_id,\n priority=priority,\n offset=offset,\n limit=limit,\n synchronous=synchronous,\n ),\n block=False,\n )\n result.wait()\n\n def reply_message(\n self, chat_id: int, reply_to_message_id: int, text: str\n ) -> AsyncResult:\n data = {\n \"@type\": \"sendMessage\",\n \"chat_id\": chat_id,\n \"reply_to_message_id\": reply_to_message_id,\n \"input_message_content\": {\n \"@type\": \"inputMessageText\",\n \"text\": {\"@type\": \"formattedText\", \"text\": text},\n },\n }\n\n return self._send_data(data)\n\n def send_doc(self, file_path: str, chat_id: int) -> AsyncResult:\n data = {\n \"@type\": \"sendMessage\",\n \"chat_id\": chat_id,\n \"input_message_content\": {\n \"@type\": \"inputMessageDocument\",\n \"document\": {\"@type\": \"inputFileLocal\", \"path\": file_path},\n },\n }\n return self._send_data(data)\n\n def send_audio(self, file_path: str, chat_id: int) -> AsyncResult:\n data = {\n \"@type\": \"sendMessage\",\n \"chat_id\": chat_id,\n \"input_message_content\": {\n \"@type\": \"inputMessageAudio\",\n \"audio\": {\"@type\": \"inputFileLocal\", \"path\": file_path},\n },\n }\n return self._send_data(data)\n\n def send_photo(self, file_path: str, chat_id: int) -> AsyncResult:\n data = {\n \"@type\": \"sendMessage\",\n \"chat_id\": chat_id,\n \"input_message_content\": {\n \"@type\": \"inputMessagePhoto\",\n \"photo\": {\"@type\": \"inputFileLocal\", \"path\": file_path},\n },\n }\n return self._send_data(data)\n\n def send_video(\n self,\n file_path: str,\n chat_id: int,\n width: int,\n height: int,\n duration: int,\n ) -> AsyncResult:\n data = {\n \"@type\": \"sendMessage\",\n \"chat_id\": chat_id,\n \"input_message_content\": {\n \"@type\": \"inputMessageVideo\",\n \"width\": width,\n \"height\": height,\n \"duration\": duration,\n \"video\": {\"@type\": \"inputFileLocal\", \"path\": file_path},\n },\n }\n return self._send_data(data)\n\n def send_voice(\n self, file_path: str, chat_id: int, duration: int, waveform: int\n ):\n data = {\n \"@type\": \"sendMessage\",\n \"chat_id\": chat_id,\n \"input_message_content\": {\n \"@type\": \"inputMessageVoiceNote\",\n \"duration\": duration,\n \"waveform\": waveform,\n \"voice_note\": {\"@type\": \"inputFileLocal\", \"path\": file_path},\n },\n }\n return self._send_data(data)\n\n def edit_message_text(self, chat_id: int, message_id: int, text: str):\n data = {\n \"@type\": \"editMessageText\",\n \"message_id\": message_id,\n \"chat_id\": chat_id,\n \"input_message_content\": {\n \"@type\": \"inputMessageText\",\n \"text\": {\"@type\": \"formattedText\", \"text\": text},\n },\n }\n return self._send_data(data)\n\n def toggle_chat_is_marked_as_unread(\n self, chat_id: int, is_marked_as_unread: bool\n ) -> AsyncResult:\n data = {\n \"@type\": \"toggleChatIsMarkedAsUnread\",\n \"chat_id\": chat_id,\n \"is_marked_as_unread\": is_marked_as_unread,\n }\n return self._send_data(data)\n\n def toggle_chat_is_pinned(\n self, chat_id: int, is_pinned: bool\n ) -> AsyncResult:\n data = {\n \"@type\": \"toggleChatIsPinned\",\n \"chat_id\": chat_id,\n \"is_pinned\": is_pinned,\n }\n return self._send_data(data)\n\n def set_chat_nottification_settings(\n self, chat_id: int, notification_settings: dict\n ):\n data = {\n \"@type\": \"setChatNotificationSettings\",\n \"chat_id\": chat_id,\n \"notification_settings\": notification_settings,\n }\n return self._send_data(data)\n\n def view_messages(\n self, chat_id: int, message_ids: list, force_read: bool = True\n ) -> AsyncResult:\n data = {\n \"@type\": \"viewMessages\",\n \"chat_id\": chat_id,\n \"message_ids\": message_ids,\n \"force_read\": force_read,\n }\n return self._send_data(data)\n\n def open_message_content(\n self, chat_id: int, message_id: int\n ) -> AsyncResult:\n data = {\n \"@type\": \"openMessageContent\",\n \"chat_id\": chat_id,\n \"message_id\": message_id,\n }\n return self._send_data(data)\n\n def forward_messages(\n self,\n chat_id: int,\n from_chat_id: int,\n message_ids: List[int],\n as_album: bool = False,\n send_copy: bool = False,\n remove_caption: bool = False,\n options: Dict[str, Any] = {},\n ) -> AsyncResult:\n data = {\n \"@type\": \"forwardMessages\",\n \"chat_id\": chat_id,\n \"from_chat_id\": from_chat_id,\n \"message_ids\": message_ids,\n \"as_album\": as_album,\n \"send_copy\": send_copy,\n \"remove_caption\": remove_caption,\n \"options\": options,\n }\n return self._send_data(data)\n","sub_path":"tg/tdlib.py","file_name":"tdlib.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119204024","text":"from PyQt5.QtGui import QColor, QImage, QPixmap, QPen, QPainter, QFont, QStandardItemModel\r\n\r\n\r\ndef dibuja_region(painter, primer_punto_x, primer_punto_y, segundo_punto_x, segundo_punto_y, color, px, offset_X=0, offset_Y=0):\r\n\r\n pen = QPen()\r\n pen.setColor(QColor(color))\r\n pen.setWidth(px)\r\n painter.setPen(pen)\r\n painter.drawRect(primer_punto_x-offset_X, primer_punto_y-offset_Y,\r\n segundo_punto_x-primer_punto_x, segundo_punto_y-primer_punto_y)\r\n","sub_path":"GUI/GUI_dibuja_region.py","file_name":"GUI_dibuja_region.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31203931","text":"import bpy\n\nfrom .. import gp_draw\n\n\nclass BGE_OT_fence_clear(bpy.types.Operator):\n bl_idname = \"bge.fence_clear\"\n bl_label = \"Clear Fences\"\n bl_description = \"Clears all drawn fences in the scene\"\n bl_options = {'REGISTER', 'UNDO'}\n\n @classmethod\n def poll(cls, context):\n return True\n\n def execute(self, context):\n gp_draw.clear()\n return {'FINISHED'}\n","sub_path":"addons/bundle_exporter/operators/op_fence_clear.py","file_name":"op_fence_clear.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84650840","text":"from __future__ import print_function\nimport pandas as pd\nfrom os.path import splitext\nimport string\nimport re\nimport argparse\nfrom collections import Counter, defaultdict\nimport numpy as np\n\n__author__ = 'david_torrejon'\n\n\"\"\"\nTODO\n\nimplement the option to download data direct from snli webpage + uncompress...\nhttp://nlp.stanford.edu/projects/snli/snli_1.0.zip\n\nCLEAN the code\n\nAS OF 25/02 the coocurrence matrix works properly but its useless the text that is given to be generated.\n\n\"\"\"\n\n\n\"\"\"\n# all the functions here are prepared to deal with snli corpus provided, hence a list of strings, not a whole text.\n\nSimple Implementation of the GloVe model:\nJeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. GloVe: Global Vectors for Word Representation.\nhttp://nlp.stanford.edu/projects/glove/\n\nX -> co-ocurrence matrix\nXij = number of times j appears in context of i\nXi = SUMk(Xik)\nPij = P(j|i)= Xij/Xi Probability of j appear in i\n\nContext -> sentence\n\"\"\"\n\n'''\n\"sentence1_parse\": \"(ROOT (S (NP (CD Two) (NNS women)) (VP (VBP are) (VP (VBG embracing) (SBAR (IN while) (S (NP (VBG holding)) (VP (TO to) (VP (VB go) (NP (NNS packages)))))))) (. .)))\", \"sentence2\": \"The sisters are hugging goodbye while holding to go packages after just eating lunch.\", \"sentence2_binary_parse\": \"( ( The sisters ) ( ( are ( ( hugging goodbye ) ( while ( holding ( to ( ( go packages ) ( after ( just ( eating lunch ) ) ) ) ) ) ) ) ) . ) )\", \"sentence2_parse\": \"(ROOT (S (NP (DT The) (NNS sisters)) (VP (VBP are) (VP (VBG hugging) (NP (UH goodbye)) (PP (IN while) (S (VP (VBG holding) (S (VP (TO to) (VP (VB go) (NP (NNS packages)) (PP (IN after) (S (ADVP (RB just)) (VP (VBG eating) (NP (NN lunch))))))))))))) (. .)))\"}\n\"sentence2_parse\": \"(ROOT (S (NP (CD Two) (NN woman)) (VP (VBP are) (VP (VBG holding) (NP (NNS packages)))) (. .)))\"}\n\"sentence2_parse\": \"(ROOT (S (NP (DT The) (NNS men)) (VP (VBP are) (VP (VBG fighting) (PP (IN outside) (NP (DT a) (NNS deli))))) (. .)))\"}\n'''\n\n\nparser = argparse.ArgumentParser(description='Get File to process with GloVe')\nparser.add_argument('-f', metavar='file_name', type=str, nargs=1, help='name of the file')\n\n#some parameters\ndefault_folder = 'snli_1.0/'\nndimensions = 300\nzero = 0.0\n#not_useful tokens\nnon_useful_tokens = ['a', 'the', 'in', 'the', 'and', 'on', 'with', 'of', 'to', 'an', 'at',\n 'is', 'while']\n\n#functions\ndef test_init():\n glove_init()\n\ndef tokenize_sentence(sentence):\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n sentence = regex.sub('', sentence).lower()\n tokenized_sentence = sentence.split(\" \")\n return tokenized_sentence\n\ndef token_count(corpus, ndim):\n tokens_dict = defaultdict(int) #empty dict faster access than checking if its in a list\n word2idx = {}\n idx = 1\n for sentence in corpus:\n tokens_sentence = tokenize_sentence(sentence)\n for token in tokens_sentence:\n #print (token)\n if token not in non_useful_tokens:\n tokens_dict[token]+=1\n if token not in word2idx:\n word2idx[token] = idx\n #print (token, idx)\n idx +=1\n\n #tokens_list = sorted(tokens_dict.items(), key=itemgetter(1), reverse=True)\n c = Counter(tokens_dict)\n common_tokens = c.most_common()\n print (common_tokens[:10])\n #print (sum(c.values()))\n # return 300 most common tokens\n # can return all and after matrix pick 300 better?\n return common_tokens, word2idx, idx\n\ndef convert_sentence_index(sentence, word2id):\n sindex = []\n tokens_sentence = tokenize_sentence(sentence)\n for token in tokens_sentence:\n if token not in non_useful_tokens:\n sindex.append(word2id[token])\n return sindex\n\ndef get_coocu_matrix(corpus, word2idx, nrows, test=True):\n # not proper for computations...would be better a np matrix but to represent... word -> index?\n cooc_m = np.zeros((nrows+1, nrows+1))\n #prob_m = np.zeros((nrows+1, nrows+1))\n print ('Matrix of size',cooc_m.size,'created...')\n print ('Generating coocurrence matrix...')\n \"\"\"\n 1 2 3 SUM-> first row and first column have indexes of words\n 1 x y z x+y+z\n 2 x1 y1 z1 x1+y1+z1\n 3 x2 y2 z2 x2+y2+z2\n easy to compute the probabilities...\n \"\"\"\n for sentence in corpus:\n idx_sent = convert_sentence_index(sentence, word2idx)\n for idx_s in idx_sent:\n cooc_m[idx_s][0]=idx_s\n for idx_w in idx_sent:\n cooc_m[0][idx_w]=idx_w\n cooc_m[idx_s][idx_w]+=1\n\n #compute probabilities\n first_row = True\n for row in cooc_m:\n if not first_row:\n row[nrows] = (np.sum(row))-row[0]\n first_row = False\n print('Counting done...')\n print('Generating probabilities...')\n\n #copyto(dst, src)\n prob_m = np.zeros((nrows+1, nrows+1))\n\n for i in range(1, nrows+1):\n if(i%500==0):\n print('Updated',i,'probabilities...')\n for j in range(nrows+1):\n if j > 0:\n prob_m[i][j] = cooc_m[i][j]/cooc_m[i][nrows]\n else:\n prob_m[i][j] = cooc_m[i][j]\n\n if test:\n value_test = word2idx['policeman']\n \n array_test = prob_m[value_test]\n top_n_values = -15 # needs to be negative to pick the last sorted values from np.array\n top_ten_ndarray = array_test.argsort()[top_n_values:][::-1]\n\n for key, value in word2idx.items():\n if value == value_test:\n print ('looking coocurrences for: ', key, 'with id', value)\n\n top_ten = top_ten_ndarray.tolist()\n\n top_ten.remove(value_test)\n\n for element in top_ten:\n for key, value in word2idx.items():\n if value == element:\n print (key,'(',value,') with probability:', prob_m[value_test][value])\n pass\n\n\n print('Probabilities generated')\n\ndef glove_init():\n try:\n args = parser.parse_args()\n file_name = default_folder + args.f[0]\n print(\"Reading file\", file_name, \"...\")\n file_path, file_ext = splitext(file_name)\n with open(file_name, 'rb') as f:\n data = f.readlines()\n data_json_str = \"[\" + ','.join(data) + \"]\"\n data_df = pd.read_json(data_json_str)\n print(file_ext,\"loaded...\")\n # sentence1_parse sentence2_parse, sentence1, sentence2\n keep_columns = ['sentence1','sentence2','sentence1_parse','sentence2_parse']\n data_df = data_df[keep_columns]\n sentences = data_df['sentence1'].tolist() + data_df['sentence2'].tolist()\n '''\n I dont know whether to delete 2 sentences from sentence 1, because there are\n 3 sentences meaning the same for every pack of sentencnes\n s1-s2 E, s1-s2 N, s1-s2 C, where s1 is always same\n '''\n # build dictionary?\n print(\"Counting word appearances...\")\n tokens, word2index, nidx = token_count(sentences, ndimensions)\n coocurrence_matrix = get_coocu_matrix(sentences, word2index, nidx)\n\n except BaseException as e:\n print (e)\n\n\ntest_init()\n","sub_path":"glove.py","file_name":"glove.py","file_ext":"py","file_size_in_byte":7142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358667865","text":"# Constants\nACCT_ADMIN = 'A'\nACCT_KILLED = 'K'\nACCT_LOCKED = 'L'\nACCT_MONSTER = 'M'\nACCT_PLAYER = 'P'\nACCT_PMARE = 'S'\nACCT_ADMIN_EXPIRED = 'E'\nACCT_PLAYER_EXPIRED = 'X'\n\n# Type to text\nSTATUS_DESCRIPTIONS = {\n ord(ACCT_ADMIN[0]): 'admin',\n ord(ACCT_KILLED[0]): 'killed',\n ord(ACCT_LOCKED[0]): 'locked',\n ord(ACCT_MONSTER[0]): 'agent',\n ord(ACCT_PLAYER[0]): 'player',\n ord(ACCT_PMARE[0]): 'pmare',\n ord(ACCT_ADMIN_EXPIRED[0]): 'expired_admin',\n ord(ACCT_PLAYER_EXPIRED[0]): 'expired_player',\n}","sub_path":"underlight_api/utils/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155192835","text":"__all__ = ['models', 'results', 'pcmds', 'data', 'radii_am', 'radii_kpc',\n 'dmods', 'regions']\n\ntry:\n import pcmdpy_gpu as ppy\nexcept:\n import pcmdpy as ppy\nimport numpy as np\nimport pandas as pd\nfrom os.path import expanduser, isfile\n\nmodels = {}\nresults = {}\npcmds = {}\ndata = {}\nresults_dir = expanduser('~/pCMDs/pixcmd/paper2/results/')\ndata_dir = expanduser('~/pCMDs/pixcmd/data/')\ndf_radii = pd.read_csv(results_dir + 'regions_radii.csv', index_col=0)\nradii_am = {}\nradii_kpc = {}\nregions = {}\n\ndmods = {\n 'NGC3377': 30.18,\n 'NGC4993': 33.05,\n 'M87': 30.9,\n 'M87v2': 30.9,\n 'M49': 31.12,\n 'DF2': 31.505,\n 'M31': 24.44,\n 'M31d': 24.44,\n 'M51': 29.67\n}\nm51_rpix = {\n 'a': 1745,\n 'b': 2776,\n 'c': 2471,\n 'd': 2709,\n 'e': 678\n}\nm31_rpix = {\n 'e': 6250,\n 'd': 3550,\n 'c': 2050,\n 'b': 1350,\n 'a': 700\n}\nm31d_rpix = {\n 'a': 21397,\n 'b': 23423,\n 'c': 25217\n}\n\nbase_models = {}\nbase_models[1] = ppy.galaxy.CustomGalaxy(\n ppy.metalmodels.SingleFeH(),\n ppy.dustmodels.SingleDust(),\n ppy.sfhmodels.NonParam(),\n ppy.distancemodels.VariableDistance()\n)\nbase_models[2] = ppy.galaxy.CustomGalaxy(\n ppy.metalmodels.SingleFeH(),\n ppy.dustmodels.SingleDust(),\n ppy.sfhmodels.NonParam(),\n ppy.distancemodels.FixedDistance()\n)\nbase_models[3] = ppy.galaxy.CustomGalaxy(\n ppy.metalmodels.FixedWidthNormMDF(0.3),\n ppy.dustmodels.SingleDust(),\n ppy.sfhmodels.NonParam(),\n ppy.distancemodels.VariableDistance()\n)\nbase_models[4] = ppy.galaxy.CustomGalaxy(\n ppy.metalmodels.SingleFeH(),\n ppy.dustmodels.SingleDust(),\n ppy.sfhmodels.TauModel(),\n ppy.distancemodels.VariableDistance()\n)\nbase_models[5] = ppy.galaxy.CustomGalaxy(\n ppy.metalmodels.SingleFeH(),\n ppy.dustmodels.SingleDust(),\n ppy.sfhmodels.SSPModel(),\n ppy.distancemodels.VariableDistance()\n)\nbase_models[6] = base_models[1].copy()\nbase_models[7] = base_models[1].copy()\nbase_models[8] = base_models[5].copy()\nbase_models[9] = base_models[1].copy()\nbase_models[10] = base_models[2].copy()\nbase_models[11] = ppy.galaxy.CustomGalaxy(\n ppy.metalmodels.ClosedBoxMDF(),\n ppy.dustmodels.SingleDust(),\n ppy.sfhmodels.NonParam(),\n ppy.distancemodels.VariableDistance()\n)\nbase_models[12] = base_models[4].copy()\nbase_models[13] = base_models[9].copy()\nbase_models[14] = base_models[9].copy()\n\n\ndef add_set(galaxy, mnum, region, key,\n colors='z_gz', model=None,\n dataname=None):\n g_orig = galaxy.replace('v2', '').replace('M31d', 'M31')\n g_dir = galaxy.replace('d','').lower()\n if dataname is not None:\n data_file = data_dir + f'{g_dir}/pcmds/{dataname}.pcmd'\n else:\n data_file = data_dir + f'{g_dir}/pcmds/{g_orig}_{colors}_{region}.pcmd'\n res_file = results_dir + f'{galaxy}_r{region}_m{mnum}.csv'\n live_file = res_file.replace('.csv', '_live.csv')\n pcmd_file = res_file.replace('.csv', '.pcmd')\n if not isfile(res_file):\n print(f'Skipping {key}')\n return\n regions[key] = region\n models[key] = model or base_models[mnum].copy()\n try:\n results[key] = ppy.results.ResultsPlotter(\n res_file, live_file=live_file, dmod_true=dmods[galaxy],\n gal_model=models[key], model_is_truth=False)\n except Exception as e:\n print('Error loading ', key)\n print(e)\n return\n ks = [d for d in data.keys() if key.replace(f'_m{mnum}', '') in d]\n if len(ks) > 0:\n data[key] = data[ks[0]]\n elif key not in data:\n data[key] = np.loadtxt(data_file, unpack=True)\n if g_orig in df_radii:\n radii_am[key] = df_radii[g_orig][region] * 0.05 / 60.\n radii_kpc[key] = radii_am[key] * (np.pi/(180.*60.)) * 1e3 * ppy.distancemodels.dmod_to_mpc(dmods[galaxy])\n try:\n pcmds[key] = np.loadtxt(pcmd_file, unpack=True)\n except:\n pass\n\n \ndef add_set_v2(galaxy, mnum, region, key, data_name, model=None):\n data_file = data_dir + f'{galaxy.lower()}/pcmds/{data_name}.pcmd'\n res_file = results_dir + f'{galaxy}_{region}_m{mnum}.csv'\n live_file = res_file.replace('.csv', '_live.csv')\n pcmd_file = res_file.replace('.csv', '.pcmd')\n regions[key] = region\n models[key] = model or base_models[mnum].copy()\n try:\n results[key] = ppy.results.ResultsPlotter(\n res_file, live_file=live_file, dmod_true=dmods[galaxy],\n gal_model=models[key], model_is_truth=False)\n except Exception as e:\n print('Error loading ', key)\n print(e)\n return\n ks = [d for d in data.keys() if key.replace(f'_m{mnum}', '') in d]\n if len(ks) > 0:\n data[key] = data[ks[0]]\n else:\n data[key] = np.loadtxt(data_file, unpack=True)\n try:\n pcmds[key] = np.loadtxt(pcmd_file, unpack=True)\n except:\n pass\n\n\ndef load_model(m, all_quads=False):\n print('M87')\n for let, rs in zip(['a','b','c'], [[204,201,202,203], [128,125,126,127], [44,41,42,43]]):\n for i, r in enumerate(rs):\n if (i > 0) and not all_quads:\n continue\n add_set('M87', m, r, f'M87_{let}{i+1}_m{m}', colors='I_VI')\n\n print('M87v2')\n for let, rs in zip(['a','b','c'], [[204,201,202,203], [128,125,126,127], [44,41,42,43]]):\n for i, r in enumerate(rs):\n if (i > 0) and not all_quads:\n continue\n add_set('M87v2', m, r, f'M87v2_{let}{i+1}_m{m}', colors='I_gI')\n\n print('M49')\n for let, rs in zip(['a','b','c'], [[204,201,202,203], [124,121,122,123], [40,37,38,39]]):\n for i, r in enumerate(rs):\n if (i > 0) and not all_quads:\n continue\n add_set('M49', m, r, f'M49_{let}{i+1}_m{m}')\n\n print('NGC3377')\n for let, rs in zip(['a','b','c'], [[173,174,175,176], [97,98,99,100], [41,42,43,44]]):\n for i, r in enumerate(rs):\n if (i > 0) and not all_quads:\n continue\n if r == 175:\n continue\n add_set('NGC3377', m, r, f'NGC3377_{let}{i+1}_m{m}')\n\n print('NGC4993')\n for let, rs in zip(['a','b','c'], [[203,204,201,202], [143,144,141,142], [83,84,81,82]]):\n for i, r in enumerate(rs):\n if (i > 0) and not all_quads:\n continue\n if r == 201:\n continue\n model = (base_models[5].copy() if m==6 else None)\n add_set('NGC4993', m, r, f'NGC4993_{let}{i+1}_m{m}', model=model)\n\n print('M31 Bulge')\n for i, let in enumerate(['e','d','c','b','a']):\n k = f'M31_{let}_m{m}'\n add_set('M31', m, i+1, k,\n dataname=f'm31_bulge_r{i+1}')\n radii_am[k] = m31_rpix[let] * 0.05/60.\n radii_kpc[k] = radii_am[k] * (np.pi/(180.*60.)) * 1e3 * ppy.distancemodels.dmod_to_mpc(dmods['M31'])\n\n # if m in [7]:\n # print('M31 Disk')\n # for i, let in enumerate(['a','b','c']):\n # k = f'M31d_{let}_m{m}'\n # add_set('M31d', m, i+1, k,\n # dataname=f'm31_disk_r{i+1}')\n # radii_am[k] = m31d_rpix[let] * 0.05/60.\n # radii_kpc[k] = radii_am[k] * (np.pi/(180.*60.)) * 1e3 * ppy.distancemodels.dmod_to_mpc(dmods['M31'])\n\n # print('M51')\n # for i, let in enumerate(['a','b','c','d','e']):\n # k = f'M51_{let}_m{m}'\n # model = base_models[m].copy()\n # model.dust_model = ppy.dustmodels.FixedWidthLogNormDust(0.1)\n # add_set('M51', m, i+1, k, colors='I_BI', model=model)\n # radii_am[k] = m51_rpix[let] * 0.05/60.\n # radii_kpc[k] = radii_am[k] * (np.pi/(180.*60.)) * 1e3 * ppy.distancemodels.dmod_to_mpc(dmods['M51'])\n \n\n#load_M87()\n#load_M87v2()\n#load_M49()\n#load_NGC3377()\n#load_NGC4993()\n#load_M31()\n#load_M51()\n","sub_path":"paper2/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650331506","text":"class Solution:\n # @return a string\n def convertToTitle(self, num):\n num -= 1\n cnt = 26\n res = []\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n while num >= 0:\n res.append(letters[num % cnt])\n num = (num / cnt) - 1\n return ''.join(res[::-1])\n","sub_path":"excel_sheet_column_title.py","file_name":"excel_sheet_column_title.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463332775","text":"import math\n\ndef getPrimes(limit):\n\tif limit < 2:\n\t\treturn []\n\n\t#odd = [i for i in xrange(3, limit, 2)]\n\tprimeMap = {i: True for i in range(3, limit, 2)}\n\t\n\tprimeList = [2]\n\tstop = math.floor(math.sqrt(limit))\n\tfor i in range(3, limit, 2):\n\t\tkey = i**2\n\t\twhile key < limit:\n\t\t\tprimeMap[key] = False\n\t\t\tkey += (i*2)\n\t\tif i > stop:\n\t\t\tbreak\n\n\tfor key in primeMap:\n\t\tif primeMap[key]:\n\t\t\tprimeList.append(key)\n\treturn primeList\n\ndef gcd(a,b):\n\tif a == 0 or b == 0:\n\t\treturn abs(a-b)\n\n\twhile a != 0 and b != 0:\n\t\tif a > b:\n\t\t\ta %= b\n\t\telif b >= a:\n\t\t\tb %= a\n\n\tif a > b:\n\t\treturn a\n\telse:\n\t\treturn b","sub_path":"tools3.py","file_name":"tools3.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520083799","text":"#\n# Copyright 2017 by Delphix\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n# This class has been automatically generated from:\n# /delphix-quota-policy.json\n#\n# Do not edit this file manually!\n#\n\nfrom delphixpy.v1_9_0.web.vo.Policy import Policy\nfrom delphixpy.v1_9_0 import common\n\nclass __Undef(object):\n def __repr__(self):\n return \"undef\"\n\n_UNDEFINED = __Undef()\n\nclass QuotaPolicy(Policy):\n \"\"\"\n *(extends* :py:class:`v1_9_0.web.vo.Policy` *)* This policy limits the\n maximum amount of space an object (group or database) can use.\n \"\"\"\n def __init__(self, undef_enabled=True):\n super(QuotaPolicy, self).__init__()\n self._type = (\"QuotaPolicy\", True)\n self._warn_alert_time = (self.__undef__, True)\n self._crit_alert_time = (self.__undef__, True)\n self._size = (self.__undef__, True)\n\n API_VERSION = \"1.9.0\"\n\n @classmethod\n def from_dict(cls, data, dirty=False, undef_enabled=True):\n obj = super(QuotaPolicy, cls).from_dict(data, dirty, undef_enabled)\n obj._warn_alert_time = (data.get(\"warnAlertTime\", obj.__undef__), dirty)\n if obj._warn_alert_time[0] is not None and obj._warn_alert_time[0] is not obj.__undef__:\n assert isinstance(obj._warn_alert_time[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._warn_alert_time[0]))\n common.validate_format(obj._warn_alert_time[0], \"date\", None, None)\n obj._crit_alert_time = (data.get(\"critAlertTime\", obj.__undef__), dirty)\n if obj._crit_alert_time[0] is not None and obj._crit_alert_time[0] is not obj.__undef__:\n assert isinstance(obj._crit_alert_time[0], basestring), (\"Expected one of [u'string'], but got %s\" % type(obj._crit_alert_time[0]))\n common.validate_format(obj._crit_alert_time[0], \"date\", None, None)\n obj._size = (data.get(\"size\", obj.__undef__), dirty)\n if obj._size[0] is not None and obj._size[0] is not obj.__undef__:\n assert isinstance(obj._size[0], float), (\"Expected one of [u'number'], but got %s\" % type(obj._size[0]))\n common.validate_format(obj._size[0], \"None\", None, None)\n return obj\n\n def to_dict(self, dirty=False):\n dct = super(QuotaPolicy, self).to_dict(dirty)\n\n def dictify(obj):\n if isinstance(obj, list):\n return [dictify(o) for o in obj]\n elif hasattr(obj, \"to_dict\"):\n return obj.to_dict()\n else:\n return obj\n if \"warn_alert_time\" == \"type\" or (self.warn_alert_time is not self.__undef__ and not (dirty and not self._warn_alert_time[1])):\n dct[\"warnAlertTime\"] = dictify(self.warn_alert_time)\n if \"crit_alert_time\" == \"type\" or (self.crit_alert_time is not self.__undef__ and not (dirty and not self._crit_alert_time[1])):\n dct[\"critAlertTime\"] = dictify(self.crit_alert_time)\n if \"size\" == \"type\" or (self.size is not self.__undef__ and not (dirty and not self._size[1])):\n dct[\"size\"] = dictify(self.size)\n return dct\n\n def dirty(self):\n return self.from_dict(self.to_dict(dirty=False), dirty=True)\n\n def force_dirty(self):\n self._warn_alert_time = (self._warn_alert_time[0], True)\n self._crit_alert_time = (self._crit_alert_time[0], True)\n self._size = (self._size[0], True)\n\n def is_dirty(self):\n return any([self._warn_alert_time[1], self._crit_alert_time[1], self._size[1]])\n\n def __eq__(self, other):\n if other is None:\n return False\n if not isinstance(other, QuotaPolicy):\n return False\n return super(QuotaPolicy, self).__eq__(other) and \\\n self.warn_alert_time == other.warn_alert_time and \\\n self.crit_alert_time == other.crit_alert_time and \\\n self.size == other.size\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return common.generate_repr_string(self)\n\n @property\n def warn_alert_time(self):\n \"\"\"\n Last time a warning alert was generated.\n\n :rtype: ``basestring``\n \"\"\"\n return self._warn_alert_time[0]\n\n @warn_alert_time.setter\n def warn_alert_time(self, value):\n self._warn_alert_time = (value, True)\n\n @property\n def crit_alert_time(self):\n \"\"\"\n Last time a critical alert was generated.\n\n :rtype: ``basestring``\n \"\"\"\n return self._crit_alert_time[0]\n\n @crit_alert_time.setter\n def crit_alert_time(self, value):\n self._crit_alert_time = (value, True)\n\n @property\n def size(self):\n \"\"\"\n Size of the quota, in bytes.\n\n :rtype: ``float``\n \"\"\"\n return self._size[0]\n\n @size.setter\n def size(self, value):\n self._size = (value, True)\n\n","sub_path":"src/main/resources/delphixpy/v1_9_0/web/vo/QuotaPolicy.py","file_name":"QuotaPolicy.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"517657442","text":"# -*- coding: utf-8 -*-\n\n\n#push_message = \"\\x1b[1m[\\x1b[93mPUSH\\x1b[0m\\x1b[1m]\\x1b[0m\"\npush_message = \"[PUSH]\"\n#pop_message = \"\\x1b[1m[\\x1b[92mPOP_\\x1b[0m\\x1b[1m]\\x1b[0m\"\npop_message = \"[POP_]\"\n\n\nclass Stack(object):\n \"\"\"docstring for Stack.\"\"\"\n def __init__(self):\n super(Stack, self).__init__()\n self.stack = []\n\n def push(self, name, linein, attrs={}, log=False):\n data = {\n \"name\" : name,\n \"linein\" : linein,\n \"lineout\" : -1,\n \"attrs\" : attrs\n }\n self.stack.append(data)\n if log: print(push_message, self.stack[-1])\n\n def pop(self, name, lineout, log=False):\n if len(self.stack) != 0:\n if self.stack[-1][\"name\"] != name:\n print(\"[WARN] First element of the stack is \\\"\" + self.stack[-1][\"name\"] +\"\\\" and not \\\"\" + name + \"\\\"\")\n return None\n else:\n data = self.stack[-1]\n data[\"lineout\"] = lineout\n self.stack = self.stack[:-1]\n if log: print(pop_message, data)\n return data\n else:\n return None\n\n def currentstack(self):\n stack_el_names = [e[\"name\"] for e in self.stack]\n if len(stack_el_names) != 0:\n maxlen = max([len(e) for e in stack_el_names])\n return '\\n'.join([\"|\" + name.center(maxlen, \" \") + \"|\" for name in stack_el_names[::-1]]) + \"\\n|\" + \"_\"*maxlen + \"|\"\n else:\n return \"\\n|_EMPTY_STACK_|\"\n\n\n\n\n\n\nif __name__ == '__main__':\n s = Stack()\n s.push(\"html\", 0)\n s.push(\"head\", 1)\n print(s.pop(\"html\", 3))\n print(s.pop(\"head\", 3))\n","sub_path":"Python/HTML_Parser/lib/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627416515","text":"import random\nimport time\nfrom random import choice, random\nimport random\n\n# play on a 5x5 board both random agents\nAGENT = -1\nCOMP = +1\nboard = [\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n]\n\n\ndef evaluate(state):\n if winState(state, COMP):\n #score = +1\n score = 100\n return score\n\n elif winState(state, AGENT):\n #score = -1\n score = -100\n return score\n\n else:\n score = 0\n return score\n\n\ndef winState(state, player):\n win_state_large_board = [\n [state[0][0], state[0][1], state[0][2], state[0][3], state[0][4]],\n [state[1][0], state[1][1], state[1][2], state[1][3], state[1][4]],\n [state[2][0], state[2][1], state[2][2], state[2][3], state[2][4]],\n [state[3][0], state[3][1], state[3][2], state[3][3], state[3][4]],\n [state[4][0], state[4][1], state[4][2], state[4][3], state[4][4]],\n [state[0][0], state[1][0], state[2][0], state[3][0], state[4][0]],\n [state[0][1], state[1][1], state[2][1], state[3][1], state[4][1]],\n [state[0][2], state[1][2], state[2][2], state[3][2], state[4][2]],\n [state[0][3], state[1][3], state[2][3], state[3][3], state[4][3]],\n [state[0][4], state[1][4], state[2][4], state[3][4], state[4][4]],\n [state[0][0], state[1][1], state[2][2], state[3][3], state[4][4]],\n [state[0][4], state[1][3], state[2][2], state[3][1], state[4][0]],\n ]\n\n if [player, player, player, player, player] in win_state_large_board:\n return True\n else:\n return False\n\n\ndef checkWin(state):\n if winState(state, AGENT):\n return True\n elif winState(state, COMP):\n return True\n else:\n return False\n\n\ndef availablePositions(state):\n #spots available for agent or computer to play\n l = []\n for first in enumerate(state):\n for second in enumerate(first[1]):\n if second[1] == 0:\n l.append([first[0], second[0]])\n return l\n\n\ndef validMove(x, y, player):\n if [x, y] in availablePositions(board):\n board[x][y] = player\n return True\n\n if [x, y] not in availablePositions(board):\n return False\n\n\n\ndef loadBoard(state, computerSymbol, agentSymbol):\n chars = {\n -1: agentSymbol,\n +1: computerSymbol,\n 0: ' '\n }\n str_line = '-------------------------'\n\n print('\\n' + str_line)\n for row in state:\n for cell in row:\n symbol = chars[cell]\n print(f'| {symbol} |', end='')\n print('\\n' + str_line)\n\n\ndef ai_turn(computerSymbol, agentSymbol, expanded):\n openPositions = len(availablePositions(board))\n depth = openPositions\n\n if depth == 0 or checkWin(board):\n return\n\n else:\n print(f'Computer turn')\n loadBoard(board, computerSymbol, agentSymbol)\n\n if depth == 25:\n x = choice([0, 1, 2, 3, 4])\n y = choice([0, 1, 2, 3, 4])\n\n else:\n l = []\n\n for first in range(len(board)):\n for second in range(len(board)):\n if board[first][second] == 0:\n l.append((first, second))\n\n move = random.choice(l)\n x, y = move[0], move[1]\n expanded = 1\n\n validMove(x, y, COMP)\n time.sleep(1)\n return expanded\n\n\ndef agent_turn(computerSymbol, agentSymbol, expanded):\n openPositions = len(availablePositions(board))\n depth = openPositions\n\n if depth == 0 or checkWin(board):\n return\n\n else:\n print(f'Agent turn')\n loadBoard(board, computerSymbol, agentSymbol)\n\n if depth == 25:\n x = choice([0, 1, 2, 3, 4])\n y = choice([0, 1, 2, 3, 4])\n\n else:\n l = []\n\n for first in range(len(board)):\n for second in range(len(board)):\n if board[first][second] == 0:\n l.append((first, second))\n\n move = random.choice(l)\n x, y = move[0], move[1]\n expanded = 1\n\n validMove(x, y, AGENT)\n time.sleep(1)\n return expanded\n\n\ndef main():\n start_time = time.time()\n agentSymbol = ''\n computerSymbol = ''\n expanded_comp = 0\n expanded_agent = 0\n expanded = 0\n\n agentSymbol = input('\\nChoose the Agents symbol X or O\\nUser Selected: ').upper()\n\n if agentSymbol == 'X':\n computerSymbol = 'O'\n\n if agentSymbol == 'O':\n computerSymbol = 'X'\n\n while len(availablePositions(board)) > 0 and not checkWin(board):\n expanded_agent = agent_turn(computerSymbol, agentSymbol, expanded_agent)\n expanded_comp = ai_turn(computerSymbol, agentSymbol, expanded_comp)\n #print(expanded_agent)\n #print(expanded_comp)\n if expanded_agent is None or expanded_comp is None:\n if expanded_agent is None:\n expanded_agent = 0\n if expanded_comp is None:\n expanded_comp = 0\n expanded = expanded + int(expanded_agent) + int(expanded_comp)\n\n # Game over message with number nodes expanded, and time to execute\n if winState(board, COMP):\n print(f'COMPUTER turn')\n loadBoard(board, computerSymbol, agentSymbol)\n print('AGENT LOSES - COMPUTER WINS!')\n print(f'Expanded {expanded} nodes')\n print(\"Execution time in %s seconds\" % (time.time() - start_time))\n\n elif winState(board, AGENT):\n print(f'AGENT turn')\n loadBoard(board, computerSymbol, agentSymbol)\n print('AGENT WON!')\n print(f'Expanded {expanded} nodes')\n print(\"Execution time in %s seconds\" % (time.time() - start_time))\n else:\n loadBoard(board, computerSymbol, agentSymbol)\n print('TIE!')\n print(f'Expanded {expanded} nodes')\n print(\"Execution time in %s seconds\" % (time.time() - start_time))\n\n exit()\n\n\nif __name__ == '__main__':\n print(\"Tic Tac Toe Game Using Random Agent 5x5 board\")\n main()\n","sub_path":"RandomAgentLargeBoard.py","file_name":"RandomAgentLargeBoard.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532554408","text":"def spiralOrder(matrix):\n visited = set()\n arr = []\n d = 0\n \n #get number of cells\n num_rows = len(matrix)\n num_cols = len(matrix[0])\n cells = num_rows * num_cols\n \n cells_visited = 0\n row = 0\n col = 0\n prev_row = 0\n prev_col = 0\n \"\"\"\n 1. keep track of visited cells\n 2. keep track of directions\n - Traverse right until the edge or a visited cell\n - Traverse down until edge or visited cell\n - Traverse left until edge or visited cell\n - Traverse up until edge or visited cell\n \"\"\" \n while( cells_visited <= cells ):\n hit_right = col >= num_cols\n hit_bottom = row >= num_rows\n hit_left = col < 0\n hit_top = row < 0\n \n num = matrix[row][col]\n\n if num in visited or hit_right or hit_bottom or hit_left or hit_top:\n d += 1\n if d == 4:\n d = 0\n if num in visited:\n print(f\"row: {row} col: {col}\")\n row = prev_row\n col = prev_col\n else:\n visited.add(num)\n arr.append(num)\n # print(num)\n\n prev_row = row\n prev_col = col\n \n if d == 0: # go right\n if (col < num_cols - 1):\n col += 1\n elif d == 1: # go down\n if row < num_rows - 1:\n row +=1 \n elif d == 2: # go left\n if col > 0:\n col -= 1\n elif d == 3: # go up\n if row > 0:\n row -= 1\n \n cells_visited += 1\n\n\ninput = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9,10,11,12]\n]\n\nspiralOrder(input)","sub_path":"tests/snippets/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142574961","text":"import sys\nimport logging\nfrom heapq import heappush, nsmallest\nimport time\nLogger = logging.getLogger()\n\n\ndef setup_debug_logger():\n handler = logging.StreamHandler()\n Logger.addHandler(handler)\n Logger.setLevel(logging.DEBUG)\n\n\ndef read_lines(file_name):\n with open(file_name) as f:\n return [int(line.strip()) for line in f]\n\n\ndef heap_median_maintanance(stream):\n median_list, heap = [], []\n for i in stream:\n median = find_median(i, heap)\n Logger.debug(\"Stream {0} heap {1} median {2}\".format(i, heap, median))\n median_list.append(median)\n return sum(median_list) % 10000\n\n\ndef find_median(i, heap):\n heappush(heap, i)\n median_index = (len(heap) + 1) / 2\n median = max(nsmallest(median_index, heap))\n return median\n\n\nif __name__ == \"__main__\":\n # setup_debug_logger()\n if len(sys.argv) > 1:\n file_name = sys.argv[1]\n int_stream = read_lines(file_name)\n start_time = time.time()\n median_mod = heap_median_maintanance(int_stream)\n print(\"Median maintainance mod: {0}\".format(median_mod))\n print(\"Median maintainance using Heap: {0:.2f} seconds\".format(\n time.time() - start_time))\n else:\n print(\"Please provide file name!\")\n","sub_path":"part1/6_2_median_maintenance/median_maintenance.py","file_name":"median_maintenance.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578634234","text":"from okean.roms.inputs import surface\n\ndef gen_blk(year):\n path = 'interim_data/%d' % year\n grid = 'roms_grid.nc'\n fname = 'bulk_interim_%d.nc' % year\n\n args={}\n args['attr'] = {'dataset':'ECMWF ERA-Interim'} # add global atts\n args['model'] = 'roms' # or agrif\n args['tunits'] = 'days since 1970-01-01'\n args['keepor'] = False # also save data at original resolution in file\n\n surface.make_blk_interim(path,grid,fname,**args)\n\n\nif __name__=='__main__':\n import sys\n y=int(sys.argv[1])\n gen_blk(y)\n","sub_path":"pong_py/howto/code/gen_blk_interim.py","file_name":"gen_blk_interim.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325625371","text":"class ListNode:\n\tdef __init__(self, val=0, next=None):\n\t\tself.val = val\n\t\tself.next = next\n\n\tdef __str__(self):\n\t\treturn '{}({})'.format(self.__class__.__name__, self.val)\n\n\nclass Solution:\n\tdef mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n\t\torg = l3 = ListNode(None)\n\n\t\twhile l1 and l2:\n\t\t\tif l1.val < l2.val:\n\t\t\t\tl3.next = l1\n\t\t\t\tl1 = l1.next\n\t\t\telse:\n\t\t\t\tl3.next = l2\n\t\t\t\tl2 = l2.next\n\t\t\tl3 = l3.next\n\n\t\tl3.next = l1 or l2\n\t\treturn org.next\n\n\ndef list_node_to_list(list_node):\n\tlst = []\n\twhile list_node:\n\t\tlst.append(list_node.val)\n\t\tlist_node = list_node.next\n\treturn lst\n\n\nl1 = ListNode(val=0, next=ListNode(val=3, next=ListNode(val=5)))\nl2 = ListNode(val=0, next=ListNode(val=3))\n# l2 = ListNode(val=2, next=ListNode(val=4))\ntmp3 = ListNode(val=0, next=ListNode(val=0, next=ListNode(val=3, next=ListNode(val=3, next=ListNode(5)))))\n\n\nl3 = Solution().mergeTwoLists(l1, l2)\nassert list_node_to_list(tmp3) == list_node_to_list(l3), (list_node_to_list(tmp3), list_node_to_list(l3))\n\n","sub_path":"python3/leetcode/21_merge_two_lists.py","file_name":"21_merge_two_lists.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255864840","text":"##################################################################\n######################## include.py ############################\n##################################################################\n##### ekate - Enhanced KRATOS for Advanced Tunnel Enineering #####\n##### copyright by CIMNE, Barcelona, Spain #####\n##### and Institute for Structural Mechanics, RUB #####\n##### all rights reserved #####\n##################################################################\n##################################################################\n##################################################################\n##################################################################\nimport sys\nimport os\nkratos_root_path=os.environ['KRATOS_ROOT_PATH']\n##################################################################\n##################################################################\n#importing Kratos modules\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.BRepApplication import *\nfrom KratosMultiphysics.IsogeometricApplication import *\nkernel = Kernel() #defining kernel\n\nimport geometry_factory\n\nmpatch_export3 = MultiNURBSPatchMatlabExporter()\nbsplines_patch_util = BSplinesPatchUtility()\n\ndef CreatePatch():\n ## generate trajectory curve\n cpoints = []\n cpoints.append([0.0, 0.0, 0.0])\n cpoints.append([0.0, 0.0, 1.0])\n cpoints.append([1.0, 1.0, 2.0])\n\n order = 2\n curve_ptr = geometry_factory.CreateCurve(cpoints, order)\n curve = curve_ptr.GetReference()\n curve.Prefix = \"curve\"\n mpatch_export3.Export(curve, \"curve.m\")\n\n ## generate the local Frenet frame along the trajectory\n npoints = 10\n trans_list = geometry_factory.GenerateLocalFrenetFrame(curve, npoints)\n for trans in trans_list:\n print(trans)\n geometry_factory.ExportLocalFrenetFrameToMatlab(trans_list, \"frame.m\", 2e-1)\n\n ## generate a list of cut section\n rin = 0.5\n rout = 1.0\n start_angle = 45\n end_angle = 90\n axis = \"z\"\n ring_patches = []\n for i in range(0, npoints):\n ring_ptr = geometry_factory.CreateSmallRing([0.0, 0.0, 0.0], axis, rin, rout, start_angle, end_angle)\n ring = ring_ptr.GetReference()\n ring.Id = i+1\n ring.Prefix = \"ring\"\n ring.ApplyTransformation(trans_list[i])\n # mpatch_export3.Export(ring, ring.Name() + \"_def.m\")\n ring_patches.append(ring_ptr)\n\n ## create the sweep volume\n order_w = 2\n vol_ptr = bsplines_patch_util.CreateLoftPatchFromList3D(ring_patches, order_w)\n return vol_ptr\n\ndef main():\n vol_ptr = CreatePatch()\n vol = vol_ptr.GetReference()\n vol.Id = 1\n vol.Prefix = \"volume\"\n mpatch_export3.Export(vol, \"sweep_volume.m\")\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"examples/nurbs_multipatch/example_sweep_volume.py","file_name":"example_sweep_volume.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385465958","text":"import os\r\nos.system(\"pip install deepctr --no-deps\")\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import log_loss, roc_auc_score\r\nfrom deepctr.models import DeepFM\r\nfrom deepctr.utils\timport SingleFeat\r\n\r\nif __name__ == \"__main__\":\r\n\tdata = pd.read_csv('/kaggle/input/train.csv')\r\n\r\n\tsparse_features = [\"F\"+str(it+1) for it in range(24)]\r\n\tdata[sparse_features] = data[sparse_features].fillna('-1', )\r\n\ttarget = ['label']\r\n\t# 1.Label Encoding for sparse features,and do simple Transformation for dense features\r\n\tfor feat in sparse_features:\r\n\t\tlbe = LabelEncoder()\r\n\t\tdata[feat] = lbe.fit_transform(data[feat])\r\n\t# 2.count #unique features for each sparse field,and record dense feature field name\r\n\r\n\tsparse_feature_list = [SingleFeat(feat, data[feat].nunique())\r\n\t\t\t\t\t\t for feat in sparse_features]\r\n\r\n\t# 3.generate input data for model\r\n\ttrain, test = train_test_split(data, test_size=0.2,shuffle = False)\r\n\ttrain_model_input = [train[feat.name].values for feat in sparse_feature_list] \r\n\ttest_model_input = [test[feat.name].values for feat in sparse_feature_list]\r\n\r\n\t# 4.Define Model,train,predict and evaluate\r\n\tmodel = DeepFM({\"sparse\": sparse_feature_list}, final_activation='sigmoid')\r\n\tmodel.compile(\"adam\", \"binary_crossentropy\",\r\n\t\t\t\t metrics=['binary_crossentropy'], )\r\n\tfor i in range(20):\r\n\t\thistory = model.fit(train_model_input, train[target].values,\r\n\t\t\t\t\t\t\tbatch_size=20480, epochs=1, verbose=2, validation_split=0.2, )\r\n\t\tpred_ans = model.predict(test_model_input, batch_size=20480)\r\n\t\tprint(\"test LogLoss\", round(log_loss(test[target].values, pred_ans), 4))\r\n\t\tprint(\"test AUC\", round(roc_auc_score(test[target].values, pred_ans), 4))","sub_path":"SecondYear/MachineLearning/click_through_rate/new_idea2.py","file_name":"new_idea2.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459505290","text":"# @Auther : wuwuwu \n# @Time : 2020/4/11 \n# @File : q18.py\n# @Description : Emboss\n\nimport cv2 as cv\nimport numpy as np\n\ndef BGR2GRAY(img):\n dst = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]\n return dst.astype(np.uint8)\n\ndef embossFilter(img):\n \"\"\"\n Laplacian Filter\n :param img:\n :return:\n \"\"\"\n\n H, W = img.shape\n\n padding = 1\n\n # kernel\n kernel = [[-2., -1., 0.], [-1., 1., 1.], [0., 1., 2.]]\n\n # Zero Padding\n\n dst = np.zeros((H + padding * 2, W + padding * 2), dtype=np.float32)\n dst[padding: H + padding, padding: W + padding] = img.copy().astype(np.float32)\n\n tmp = dst.copy()\n\n for y in range(H):\n for x in range(W):\n dst[y + padding, x + padding] = np.sum(\n tmp[y: y + 3, x: x + 3] * kernel\n )\n dst = np.clip(dst, 0, 255)\n\n dst = dst[padding: H + padding, padding: W + padding].copy().astype(np.uint8)\n\n return dst\n\n\nif __name__ == '__main__':\n img = cv.imread(\"lenna.jpg\")\n gray = BGR2GRAY(img)\n dst = embossFilter(gray)\n cv.imshow(\"input\", img)\n cv.imshow(\"output\", dst)\n cv.waitKey(0)\n cv.destroyAllWindows()","sub_path":"opencv/q18.py","file_name":"q18.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410244004","text":"from Model_class.contact import Contact\nfrom random import randrange\n\n\ndef test_delete_first_contact(app):\n if app.contact.count() == 0:\n app.contact.create(Contact(firstname=\"jadjsda\"))\n old_contacts = app.contact.get_contact_list()\n index = randrange(len(old_contacts))\n app.contact.delete_contact_by_index(index)\n new_contacts = app.contact.get_contact_list()\n assert len(old_contacts) - 1 == len(new_contacts)\n old_contacts[index:index + 1] = []\n assert old_contacts == new_contacts\n","sub_path":"Test/test_delete_contact.py","file_name":"test_delete_contact.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192942268","text":"'''\nThe number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7.\nSimilarly we can work from right to left: 3797, 379, 37, and 3.\n\nFind the sum of the only eleven primes that are both truncatable from left to right and right to left.\n\nNOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.\n'''\n\nfrom math import log\nimport primesieve\n\ndef intlen(n):\n\tif n == 0: return 0\n\treturn int(log(n, 10) + 1)\n\ndef primegen():\n\tit = primesieve.Iterator()\n\twhile True:\n\t\tyield it.next_prime()\n\nprimes = primegen()\nprimes_cache = []\nresults = []\nfor prime in primes:\n\tif prime > 10:\n\t\ttruncatable = True\n\t\tl = intlen(prime)\n\t\tstep = 1\n\t\twhile step < l:\n\t\t\tm = prime % 10**step\n\t\t\tif m not in primes_cache or int((prime - m) / (10**step)) not in primes_cache:\n\t\t\t\ttruncatable = False\n\t\t\t\tbreak\n\t\t\tstep += 1\n\t\tif truncatable:\n\t\t\tresults.append(prime)\n\t\t\tif len(results) >= 11:\n\t\t\t\tbreak\n\t\n\tprimes_cache.append(prime)\n\nprint('Answer:', sum(results))\n","sub_path":"037.py","file_name":"037.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2591205","text":"from concurrent import futures\n\nimport grpc\n\nimport calculator_pb2\nimport calculator_pb2_grpc\n\nimport calculator as c\n\n\nclass Calculator(calculator_pb2_grpc.CalculatorServicer):\n\n def Calc(self, request, context):\n result = c.calc(\n request.number_1, \n request.number_2, \n request.operation)\n if result is None:\n return calculator_pb2.CalcReply(res='None')\n else:\n return calculator_pb2.CalcReply(res=str(result))\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n calculator_pb2_grpc.add_CalculatorServicer_to_server(Calculator(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n serve()\n","sub_path":"calculator_server/calculator_server.py","file_name":"calculator_server.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"449226579","text":"# Import packages\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass QNetwork(nn.Module):\n \"\"\"Q Network.\"\"\"\n \n def __init__(self, state_size, action_size, seed, use_dueling):\n \"\"\"Initilize the network.\"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n # Initialize parameters\n self.state_size = state_size\n self.action_size = action_size\n self.drop_prob = 0.5\n hidden_layers = [512, 128]\n self.use_dueling = use_dueling\n \n # Initialize layers\n self.hidden_layers = nn.ModuleList([nn.Linear(self.state_size, hidden_layers[0])])\n \n # Add a variable number of more hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n \n self.action_value = nn.Linear(hidden_layers[-1], self.action_size)\n self.state_value = nn.Linear(hidden_layers[-1], 1)\n \n def forward(self, state):\n \"\"\"Define the forward pass.\"\"\"\n for linear in self.hidden_layers:\n state = F.relu(linear(state))\n \n if self.use_dueling:\n return self.action_value(state) + self.state_value(state)\n \n else:\n return self.action_value(state)\n ","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626767849","text":"\"\"\"\nโจทย์ : ให้แสดงผลดังนี้ โดยที่รับค่า input ตัว ชื่อจริง ชื่อเล่น เบอร์ เงินที่ตัวเองมี ราคาผัก และราคาที่เหลือ\nMy name is xxxxxxxxx\nNickname is xxxxxxxx\nTel : xxxxxxx\nxxxxx มีเงิน xxxx ได้ซื้อผัก ราคา xxx.xx จะเหลือเงิน xxx.xxเท่าไหร่\n\"\"\"\ndef main():\n name = input() # รับค่าชื่อจริงที่เป็น String\n n_name = input() # รับค่่าชื่อเล่นที่เป็น String\n tel = int(input()) # รับค่าเบอร์ที่เป็น int\n money = int(input()) # รับค่าเงินที่ตัวเองมี เป็นค่า String\n veg = float(input()) # รับค่าเงินที่จ่ายผักทั้งหมด เป็นค่า float\n\n total = float(money-veg) #ค่าเงินที่เหลือ\n\n print(\"My name is %s\" %name)\n print(\"Nickname is %s\" %n_name)\n print(\"Tel : %d\" %tel)\n print(\"%s มีเงิน %d ได้ซื้อผัก ราคา %.2f จะเหลือเงิน %.2f\" %(n_name,money,veg,total))\n\nmain()\n","sub_path":"python/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281967574","text":"## Do vdw calculation from md trajectory##\n\nfrom __future__ import division, print_function\n\nimport MDAnalysis\nimport numpy as np\n\nimport argparse\ntry:\n u_for_prev = u_for\nexcept:\n pass\nuniv = MDAnalysis.Universe('lam_sc_00.tpr', 'traj.xtc')\n\n\n# (zero-indexed) indices of atoms that \n# Will be alchemically transformed\n# For purposes of getting delta U, \n# We need only consider paired VdW\n# interactions with these atoms and \n# all other atoms\nalc_indices = np.arange(877, 888)\n\natm_indices = np.arange(univ.atoms.n_atoms)\n\n\n\nexcls = {\n 877:(870, 872, 873, 874, 875, 876, 877, 878, \n 879, 880, 884, 888),\n 878:(870, 872, 873, 874, 875, 876, 877, 878, \n 879, 880, 881, 882, 883, 884, 885, 886, 887, 888),\n 879:(872, 874, 875, 876, 877, 878, 879, 880, \n 881, 882, 883, 884, 885, 886, 887),\n 880:(872, 874, 875, 876, 877, 878, 879, 880, \n 881, 882, 883, 884, 885, 886, 887),\n 881:(874, 878, 879, 880, 881, 882, 883, 884),\n 882:(874, 878, 879, 880, 881, 882, 883, 884),\n 883:(874, 878, 879, 880, 881, 882, 883, 884),\n 884:(872, 874, 875, 876, 877, 878, 879, 880, \n 881, 882, 883, 884, 885, 886, 887),\n 885:(874, 878, 879, 880, 884, 885, 886, 887),\n 886:(874, 878, 879, 880, 884, 885, 886, 887),\n 887:(874, 878, 879, 880, 884, 885, 886, 887)\n}\n\n# 14 pair list for each excluded atom\npairs = {\n 877: (870, 873, 879, 880, 884, 888), # HB3\n 878: (870, 873, 888),\n 879: (872, 875, 876, 877, 881, 882, 883, 885, 886, 887),\n 880: (872, 875, 876, 877, 885, 886, 887),\n 881: (874, 879, 884),\n 882: (874, 879, 884),\n 883: (874, 879, 884),\n 884: (872, 875, 876, 877, 881, 882, 883),\n 885: (874, 879, 880),\n 886: (874, 879, 880),\n 887: (874, 879, 880)\n}\n\n# dictionary of alchemical atom type transofmations.\n# keyed by alchemical index of an atom that is to be transformed\n# valued by tuple (Astate_idx, Bstate_idx)\n# NOTE: This is topology specific!!!\nalc_types = {\n 877: ('DUM_HC', 'HC'),\n 878: ('DUM_CT', 'DUM_CT'),\n 879: ('DUM_HC', 'DUM_HC'),\n 880: ('DUM_CT', 'DUM_CT'),\n 881: ('DUM_HC', 'DUM_HC'),\n 882: ('DUM_HC', 'DUM_HC'),\n 883: ('DUM_HC', 'DUM_HC'),\n 884: ('DUM_CT', 'DUM_CT'),\n 885: ('DUM_HC', 'DUM_HC'),\n 886: ('DUM_HC', 'DUM_HC'),\n 887: ('DUM_HC', 'DUM_HC')\n}\n\ntype_lookup = {\n 'N3': 0,\n 'H': 1,\n 'CT': 2,\n 'HP': 3,\n 'HC': 4,\n 'C': 5,\n 'O': 6,\n 'N': 7,\n 'H1': 8,\n 'SH': 9,\n 'HS': 10,\n 'OH': 11,\n 'HO': 12,\n 'CA': 13,\n 'HA': 14,\n 'O2': 15,\n 'CC': 5,\n 'NB': 16,\n 'CR': 17,\n 'H5': 18,\n 'NA': 7,\n 'CW': 17,\n 'H4': 19,\n 'DUM_HC': 20,\n 'DUM_CT': 21,\n 'DUM': 22,\n 'OW_spc': 23,\n 'HW_spc': 20\n}\n#format: 24 atom types, atomtype i is a tuple with (sigma, eps)\natmtypes = [\n(3.25000e-01, 7.11280e-01), # N3\n(1.06908e-01, 6.56888e-02), # H\n(3.39967e-01, 4.57730e-01), # CT\n(1.95998e-01, 6.56888e-02), # HP\n(2.64953e-01, 6.56888e-02), # HC\n(3.39967e-01, 3.59824e-01), # C\n(2.95992e-01, 8.78640e-01), # O\n(3.25000e-01, 7.11280e-01), # N\n(2.47135e-01, 6.56888e-02), # H1\n(3.56359e-01, 1.04600e+00), # SH\n(1.06908e-01, 6.56888e-02), # HS\n(3.06647e-01, 8.80314e-01), # OH\n(0.00000e+00, 0.00000e+00), # HO\n(3.39967e-01, 3.59824e-01), # CA\n(2.59964e-01, 6.27600e-02), # HA\n(2.95992e-01, 8.78640e-01), # O2\n(3.25000e-01, 7.11280e-01), # NB\n(3.39967e-01, 3.59824e-01), # CR\n(2.42146e-01, 6.27600e-02), # H5\n(2.51055e-01, 6.27600e-02), # H4\n(0.00000e+00, 0.00000e+00), # DUM_HC\n(0.00000e+00, 0.00000e+00), # DUM_CT\n(0.00000e+00, 0.00000e+00), # DUM\n(3.16557e-01, 6.50629e-01) # OW_spc\n]\n\n\n# set combined sigma to this if it's smaller\nsc_sigma = 0.0\nsc_sigma6 = sc_sigma**6\nsc_alpha = 0.1\n\nn_atmtype = len(atmtypes)\n# Generate VdW lookup table\nc6_lut = np.zeros(n_atmtype**2)\nc12_lut = np.zeros(n_atmtype**2)\nsig_lut = np.zeros(n_atmtype**2)\nsig6_lut = np.zeros(n_atmtype**2)\nfor i, payload_i in enumerate(atmtypes):\n sig_i, eps_i = payload_i\n for j, payload_j in enumerate(atmtypes):\n sig_j, eps_j = payload_j\n eps = np.sqrt(eps_i*eps_j)\n\n idx = i*n_atmtype + j\n\n sig = (sig_i + sig_j) / 2.0\n\n c6 = 4*eps*sig**6\n c12 = 4*eps*sig**12\n c6_lut[idx] = c6\n c12_lut[idx] = c12\n\n\n sig_lut[idx] = sig\n sig_6 = c12/c6\n if c6 == 0 or c12 == 0:\n sig_6 = sc_sigma**6\n sig6_lut[idx] = sig_6\n\n\nlmbda = 0.0\nlmbda_for = 0.9\n\n\nn_frames = univ.trajectory.n_frames\nn_frames = 1\nmy_diffs = np.zeros((n_frames, 2))\n\nfor i_frame in range(n_frames):\n univ.trajectory[i_frame]\n my_diffs[i_frame, 0] = univ.trajectory.time\n univ.atoms.positions = univ.atoms.positions / 10.0\n # Calculate VdW energy differences between lambdas\n u_lmbda = 0.0\n u_for = 0.0\n for i in alc_indices:\n # all atoms separated by more than nrexcl bonds (i.e. not excluded)\n # Note: If i and j are both in alc_indices, skip if j !> i\n incl_indices = np.setdiff1d(atm_indices, excls[i])\n # all 1-4 pair interactions (that will be fudged)\n pair_indices = pairs[i]\n\n atm_i = univ.atoms[i]\n # from tpr file, should be A state topology \n type_i = type_lookup[atm_i.type]\n name_i_a, name_i_b = alc_types[i]\n type_i_a = type_lookup[name_i_a]\n type_i_b = type_lookup[name_i_b]\n\n assert type_i == type_i_a\n\n for j in incl_indices:\n\n atm_j = univ.atoms[j]\n if j in alc_indices:\n if j < i:\n continue\n name_j_a, name_j_b = alc_types[j]\n type_j_a = type_lookup[name_j_a]\n type_j_b = type_lookup[name_j_a]\n else:\n type_j_a = type_j_b = type_lookup[atm_j.type] \n\n lut_idx_a = type_i_a * n_atmtype + type_j_a\n lut_idx_b = type_i_b * n_atmtype + type_j_b\n\n r_ij_sq = np.sum((atm_i.position - atm_j.position)**2)\n if r_ij_sq >= 1:\n continue\n\n # state A params for i\n c6_a = c6_lut[lut_idx_a]\n c12_a = c12_lut[lut_idx_a]\n sig_a = sig_lut[lut_idx_a]\n sig6_a = sig6_lut[lut_idx_a]\n #if sig_a < sc_sigma:\n # sig_a = sc_sigma\n # sig6_a = sc_sigma6\n\n c6_b = c6_lut[lut_idx_b]\n c12_b = c12_lut[lut_idx_b]\n sig_b = sig_lut[lut_idx_b]\n sig6_b = sig6_lut[lut_idx_b] \n\n #if sig_b < sc_sigma:\n # sig_b = sc_sigma\n # sig6_b = sc_sigma6 \n\n denom_lmbda_a = (sc_alpha*sig6_a*lmbda + r_ij_sq**3)\n denom_for_a = (sc_alpha*sig6_a*lmbda_for + r_ij_sq**3)\n\n denom_lmbda_b = (sc_alpha*sig6_b*(1-lmbda) + r_ij_sq**3)\n denom_for_b = (sc_alpha*sig6_b*(1-lmbda_for) + r_ij_sq**3)\n\n u_lmbda += (1-lmbda) * ((c12_a/denom_lmbda_a**2) - (c6_a/denom_lmbda_a)) + (lmbda) * ( (c12_b/denom_lmbda_b**2) - (c6_b/denom_lmbda_b))\n u_for += (1-lmbda_for) * ((c12_a/denom_for_a**2) - (c6_a/denom_for_a)) + (lmbda_for) * ( (c12_b/denom_for_b**2) - (c6_b/denom_for_b))\n\n\n # Now do the same thing for all 1-4 pairs, but his time fudge the final LJ\n for j in pair_indices:\n\n atm_j = univ.atoms[j]\n if j in alc_indices:\n if j < i:\n continue\n name_j_a, name_j_b = alc_types[j]\n type_j_a = type_lookup[name_j_a]\n type_j_b = type_lookup[name_j_b]\n else:\n type_j_a = type_j_b = type_lookup[atm_j.type]\n\n lut_idx_a = type_i_a * n_atmtype + type_j_a\n lut_idx_b = type_i_b * n_atmtype + type_j_b\n\n r_ij_sq = np.sum((atm_i.position - atm_j.position)**2)\n\n # state A params for i\n c6_a = c6_lut[lut_idx_a]\n c12_a = c12_lut[lut_idx_a]\n sig_a = sig_lut[lut_idx_a]\n sig6_a = sig6_lut[lut_idx_a]\n #if sig_a < sc_sigma:\n # sig_a = sc_sigma\n # sig6_a = sc_sigma6\n\n c6_b = c6_lut[lut_idx_b]\n c12_b = c12_lut[lut_idx_b]\n sig_b = sig_lut[lut_idx_b]\n sig6_b = sig6_lut[lut_idx_b]\n #if sig_b < sc_sigma:\n # sig_b = sc_sigma\n # sig6_b = sc_sigma6 \n\n denom_lmbda_a = (sc_alpha*sig6_a*lmbda + r_ij_sq**3)\n denom_lmbda_b = (sc_alpha*sig6_b*(1-lmbda) + r_ij_sq**3)\n\n denom_for_a = (sc_alpha*sig6_a*lmbda_for + r_ij_sq**3)\n denom_for_b = (sc_alpha*sig6_b*(1-lmbda_for) + r_ij_sq**3)\n\n u_lmbda += 0.5 * ((1-lmbda) * ((c12_a/denom_lmbda_a**2) - (c6_a/denom_lmbda_a)) + (lmbda) * ( (c12_b/denom_lmbda_b**2) - (c6_b/denom_lmbda_b)))\n \n u_for += 0.5 * ((1-lmbda_for) * ((c12_a/denom_for_a**2) - (c6_a/denom_for_a)) + (lmbda_for) * ( (c12_b/denom_for_b**2) - (c6_b/denom_for_b)))\n\n my_diffs[i_frame, 1] = u_for\n print(\"frame {}\".format(i_frame))\n print(\"u_for {}\".format(u_for))","sub_path":"calc_vdw.py","file_name":"calc_vdw.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214110493","text":"__author__ = \"Tim Pose\"\n__credits__ = [\"Peter Detzner\"]\n__maintainer__ = \"Tim Pose\"\n__status__ = \"Development\"\n\nfrom typing import Any, Union\n\nfrom utils.dao.DictDAO import DictDAO\n\n\nclass BidirectionalDictDAO(DictDAO):\n \"\"\"Represents a ``BidirectionalDictDAO`` that inherits from ``DictDAO`` and saves added data persisting. For more\n detailed information on how persistence is achieved, see `app.utils.dao.DictDAO`. This class adds Methods to access\n the contained key value pairs bidirectional. So with a key, the corresponding value and with a value, the\n corresponding key can be accessed in O(1). As result, both, key and value must be unique.\n\n Note:\n - This implementation achieves a better access time compared to searching for a given value out of a normal\n dict (O(n)), but with the trade of, of more needed memory to hold two dictionaries and more\n dictionary-accesses for all writing methods. So using this class makes sense if many reading, rather than\n many write accesses are expected\n - To allow the values to be keys in the inverted dictionary, they must also be strings\n \"\"\"\n def __init__(self, filename: str):\n \"\"\"``BidirectionalDictDAO`` constructor.\n\n Args:\n filename: Name of the local file, eg. subscriptions.json\n \"\"\"\n super().__init__(filename)\n self.inverseDict = {v: k for k, v in self.items()}\n\n def clear(self):\n \"\"\"Remove all items from the dictionary.\n \"\"\"\n with self.lock:\n self.inverseDict.clear()\n super().clear()\n\n def __setitem__(self, key: str, value: Any):\n \"\"\"Implements evaluation of self[key]. Setter for the ``key`` with associated ``value``.\n\n Args:\n key: Key to be set\n value: Value to be set\n Raises\n KeyError if value is not unique, that means it is already contained\n \"\"\"\n with self.lock:\n # Check if value is already contained in the inverseDict\n sampleKey = self.inverseDict.get(value, None)\n # If so and the argument key is not the same as the inverseDict key, means this value is already associated\n # to another key. So setting two times dict[A]=B is ok, but not dict[A]=B and dict[C]=B\n if sampleKey is not None and sampleKey != key:\n raise KeyError(\"Inserting {'%s': '%s'} failed, because the value '%s' is already \"\n \"associated to the key '%s'.\" % (key, value, value, sampleKey))\n # Check if the underlying dict already contains key\n sampleValue = self.get(key, None)\n # If so and with the above logic, that determines that the inverseDict doesnt has a value with a random\n # key association, remove it from inverseDict. So setting dict[A]=B and dict[A]=C doesnt leave inverseDict\n # with two entries\n if sampleValue is not None:\n self.inverseDict.pop(sampleValue)\n self.inverseDict[value] = key\n super().__setitem__(key, value)\n\n def pop(self, key: str, default: Any = None) -> Union[str, Any]:\n \"\"\"If ``key`` is in the dictionary, remove it and return its value, else return default. If ``default`` is not\n given, it defaults to None, so that this method never raises a KeyError.\n\n Args:\n key: Key to be removed\n default: To be returned in case key does not exist\n Returns:\n If available value for the given key, else default\n \"\"\"\n with self.lock:\n poppedValue = super().pop(key, None)\n if poppedValue is not None:\n self.inverseDict.pop(poppedValue, None)\n return poppedValue\n else:\n return default\n\n def inversePop(self, value: Any, default: Any = None) -> str:\n \"\"\"If ``value`` is in the dictionary, remove it and return its key, else return default. If ``default`` is not\n given, it defaults to None, so that this method never raises a KeyError.\n\n Args:\n value: Value to be removed\n default: To be returned in case value does not exist\n Returns:\n If available key for the given value, else default\n \"\"\"\n with self.lock:\n poppedKey = self.inverseDict.pop(value, None)\n if poppedKey is not None:\n super().pop(poppedKey, None)\n return poppedKey\n else:\n return default\n\n def inverseGet(self, value: Any, default: Any = None) -> str:\n \"\"\"Return the key for ``value`` if ``value`` is in the dictionary, else ``default``. If ``default`` is not\n given, it defaults to None, so that this method never raises a KeyError.\n\n Args:\n value: Value which associated key should be returned\n default: To be returned in case value does not exist\n Returns:\n If available key for the given value, else default\n \"\"\"\n return self.inverseDict.get(value, default)\n","sub_path":"src/utils/dao/BidirectionalDictDAO.py","file_name":"BidirectionalDictDAO.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183065241","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.context_processors import csrf\nfrom django.http import Http404, HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n \n\nfrom pages.models import Page\n\nimport config\nfrom livesettings import config_value\nfrom django.conf import settings\nfrom catalog.models import Category, Item, Producer\nfrom sessionworking import SessionCartWorking\nfrom shop.forms import OrderForm\nfrom gallery.models import Category as Album\nfrom gallery.models import Photo\n\n\ndef get_common_context(request):\n c = {}\n c['request_url'] = request.path\n c['is_debug'] = settings.DEBUG\n c['categories'] = Category.get_top()\n c['cart_working'] = SessionCartWorking(request)\n c['cart_count'], c['cart_sum'] = c['cart_working'].get_goods_count_and_sum()\n c.update(csrf(request))\n return c\n\ndef page_page(request, page_name):\n c = get_common_context(request)\n p = Page.get_by_slug(page_name)\n if p:\n c.update({'p': p})\n return render_to_response('page.html', c, context_instance=RequestContext(request))\n else:\n raise Http404()\n\ndef home(request):\n c = get_common_context(request)\n c['request_url'] = 'home' \n return render_to_response('home.html', c, context_instance=RequestContext(request))\n\ndef category(request, slug):\n c = get_common_context(request)\n c['category'] = Category.get_by_slug(slug)\n if not c['category']:\n return page_page(request, slug)\n page = c['category']\n breadcrumbs = []\n while page:\n breadcrumbs.append(page)\n page = page.parent\n breadcrumbs.reverse()\n c['titles'] = breadcrumbs[:-1]\n c['items'] = Item.objects.filter(category=c['category'])\n if len(c['items']) == 0:\n c['items'] = Item.objects.filter(category__in=c['category'].get_descendants(include_self=True))\n return render_to_response('category.html', c, context_instance=RequestContext(request)) \n\ndef vendor(request, slug):\n c = get_common_context(request)\n c['category'] = Producer.get_by_slug(slug)\n c['titles'] = []\n c['items'] = Item.objects.filter(producer=c['category'])\n return render_to_response('category.html', c, context_instance=RequestContext(request))\n\ndef item(request, slug):\n c = get_common_context(request)\n item = Item.get_by_slug(slug)\n c['category'] = item.category\n page = c['category']\n breadcrumbs = []\n while page:\n breadcrumbs.append(page)\n page = page.parent\n breadcrumbs.reverse()\n c['titles'] = breadcrumbs\n c['item'] = item\n return render_to_response('item.html', c, context_instance=RequestContext(request))\n\ndef cart(request):\n c = get_common_context(request)\n c['items'] = c['cart_working'].get_content()\n return render_to_response('cart.html', c, context_instance=RequestContext(request))\n\ndef order(request):\n c = get_common_context(request)\n c['items'] = c['cart_working'].get_content()\n if request.method == 'GET':\n c['form'] = OrderForm()\n else:\n form = OrderForm(request.POST)\n if form.is_valid():\n order = form.save(request=request)\n c['order'] = order\n return render_to_response('order_ok.html', c, context_instance=RequestContext(request))\n else:\n c['form'] = form\n return render_to_response('order.html', c, context_instance=RequestContext(request))\n\ndef add_to_cart(request):\n SessionCartWorking(request).add_to_cart(request.POST['id'], int(request.POST['count']))\n return HttpResponse('')\n\ndef recount_cart(request):\n SessionCartWorking(request).recount_cart(request.POST['id'], int(request.POST['count']))\n return HttpResponse('')\n\ndef delete_from_cart(request):\n SessionCartWorking(request).del_from_cart(request.POST['id'])\n return HttpResponse('') \n\ndef gallery(request):\n c = get_common_context(request)\n c['gallery'] = Album.objects.all()\n return render_to_response('gallery.html', c, context_instance=RequestContext(request))\n\ndef gallery_in(request, slug):\n c = get_common_context(request)\n c['album'] = Photo.objects.filter(category=Album.get_by_slug(slug))\n return render_to_response('gallery_in.html', c, context_instance=RequestContext(request))\n\ndef category_old(request, folder_id):\n cat = Category.objects.filter(folder_id=folder_id)\n if len(cat) > 0:\n return HttpResponseRedirect('/category/%s' % cat[0].slug)\n else:\n return HttpResponseRedirect('/')\n\ndef item_old(request, product_id):\n item = Item.objects.filter(product_id=product_id)\n if len(item) > 0:\n return HttpResponseRedirect('/item/%s' % item[0].slug)\n else:\n return HttpResponseRedirect('/')\n \ndef page_old(request, page_name):\n return HttpResponseRedirect('/%s/' % page_name)\n\ndef page_old_old(request):\n if request.GET.get('mode', '') == 'folder':\n return HttpResponseRedirect('/internet_magazin/folder/%s/' % request.GET.get('folder_id', '')) \n ","sub_path":"svatibor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474268763","text":"from tkinter import Tk, ttk, StringVar\nfrom json import load, dump\n#Tk untuk window, ttk object turunan tkinter\n\nfileData = \"data.json\"\ndata = {}\n\ndef saveData():\n\tglobal data\n\n\twith open (fileData,\"w\") as f:\n\t\tdump(data,f)\n\ndef loadData():\n\tglobal data\n\n\twith open(fileData) as f:\n\t\tdata = load(f)\n\nloadData()\n\nmyApps = Tk()\nmyApps.title(\"My Apps\")\n#myApps.resizable(False, False) #vertikal atau horizontal\n\ncounterButton1 = 0\n\nlabel1 = ttk.Label(myApps, text = \"Nama Barang\")\nlabel1.grid(column = 0, row = 0)\n\nlabel2 = ttk.Label(myApps, text = \"Harga\")\nlabel2.grid(column = 1, row = 0)\n\nlabel3 = ttk.Label(myApps, text = \"Jumlah\")\nlabel3.grid(column = 2, row = 0)\n\nlabel4 = ttk.Label(myApps, text = \"ID\")\nlabel4.grid(column = 3, row = 0)\n\nlabel5 = ttk.Label(myApps, text = \"Exp.Date\")\nlabel5.grid(column = 4, row = 0)\n\ndef action_button1():\n\tglobal counterButton1, students_data\n\tbutton1.configure(text = \"Already Clicked\")\n\tif counterButton1 % 2 == 0 :\n\t\tlabel1.configure(foreground = \"blue\")\n\t\tlabel2.configure(foreground = \"blue\")\n\t\tlabel3.configure(foreground = \"blue\")\n\t\tlabel4.configure(foreground = \"blue\")\n\t\tlabel5.configure(foreground = \"blue\")\n\telse:\n\t\tlabel1.configure(foreground = \"red\")\n\t\tlabel2.configure(foreground = \"red\")\n\t\tlabel3.configure(foreground = \"red\")\n\t\tlabel4.configure(foreground = \"red\")\n\t\tlabel5.configure(foreground = \"red\")\n\n\tlabel1.configure(text = data_name.get())\n\tlabel2.configure(text = data_harga.get())\n\tlabel3.configure(text = data_jumlah.get())\n\tlabel4.configure(text = data_ID.get())\n\tlabel5.configure(text = data_expdate.get())\n\tcounterButton1 += 1\n\tdata[data_name.get()] = data_harga.get() , data_jumlah.get() , data_ID.get() , data_expdate.get()\n\tsaveData()\n\tprint(data)\n\n#button\nbutton1 = ttk.Button(myApps, text = \"Click here\", command = action_button1)\nbutton1.grid(column = 5, row = 1)\n\n#entry\ndata_name = StringVar()\ndata_name_entry1= ttk.Entry(myApps, width = 12, textvariable = data_name)\ndata_name_entry1.grid(column = 0, row = 1)\n\ndata_harga = StringVar()\ndata_name_entry2= ttk.Entry(myApps, width = 12, textvariable = data_harga)\ndata_name_entry2.grid(column = 1, row = 1)\n\ndata_jumlah = StringVar()\ndata_name_entry3= ttk.Entry(myApps, width = 12, textvariable = data_jumlah)\ndata_name_entry3.grid(column = 2, row = 1)\n\ndata_ID = StringVar()\ndata_name_entry4= ttk.Entry(myApps, width = 12, textvariable = data_ID)\ndata_name_entry4.grid(column = 3, row = 1)\n\ndata_expdate = StringVar()\ndata_name_entry5= ttk.Entry(myApps, width = 12, textvariable = data_expdate)\ndata_name_entry5.grid(column = 4, row = 1)\n\n\n\n\n#data_name = input(\"Name : \")\n#focus\ndata_name_entry1.focus()\ndata_name_entry2.focus()\ndata_name_entry3.focus()\ndata_name_entry4.focus()\ndata_name_entry5.focus()\n\n#combobox dropdown list\n#data_age_combobox = ttk.Combobox(myApps, width = 12, textvariable = data_age)\n'''\ndata_age_combobox = ttk.Combobox(myApps, width = 12, textvariable = data_age, state = \"readonly\") # state = \"readonly\"\ndata_age_combobox[\"values\"] = [\"YOUNGER\",12,13,14,15,\"OLDER\"]\ndata_age_combobox.grid(column = 1, row = 1)\ndata_age_combobox.current(0)\n'''\ndata_age_combobox = ttk.Combobox(myApps, width = 12, textvariable = data_jumlah, state = \"readonly\")\ndata_age_combobox[\"values\"] = [1,2,3,4,5,\" > 5\"]\ndata_age_combobox.grid(column = 2, row = 1)\ndata_age_combobox.current(0)\nif __name__ == \"__main__\":\n\tmyApps.mainloop()","sub_path":"tkinterProject/Project_input/inputBarang.py","file_name":"inputBarang.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256727881","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/vinny.ly/workspace/cloud-metrics/blueflood-carbon-forwarder/bluefloodserver/util.py\n# Compiled at: 2016-06-17 14:57:20\nimport copy, os, pwd, sys\nfrom os.path import abspath, basename, dirname\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\ntry:\n import cPickle as pickle\n USING_CPICKLE = True\nexcept ImportError:\n import pickle\n USING_CPICKLE = False\n\nfrom time import sleep, time\nfrom twisted.python.util import initgroups\nfrom twisted.scripts.twistd import runApp\nif USING_CPICKLE:\n\n class SafeUnpickler(object):\n PICKLE_SAFE = {'copy_reg': set(['_reconstructor']), \n '__builtin__': set(['object'])}\n\n @classmethod\n def find_class(cls, module, name):\n if module not in cls.PICKLE_SAFE:\n raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module)\n __import__(module)\n mod = sys.modules[module]\n if name not in cls.PICKLE_SAFE[module]:\n raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name)\n return getattr(mod, name)\n\n @classmethod\n def loads(cls, pickle_string):\n pickle_obj = pickle.Unpickler(StringIO(pickle_string))\n pickle_obj.find_global = cls.find_class\n return pickle_obj.load()\n\n\nelse:\n\n class SafeUnpickler(pickle.Unpickler):\n PICKLE_SAFE = {'copy_reg': set(['_reconstructor']), \n '__builtin__': set(['object'])}\n\n def find_class(self, module, name):\n if module not in self.PICKLE_SAFE:\n raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module)\n __import__(module)\n mod = sys.modules[module]\n if name not in self.PICKLE_SAFE[module]:\n raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name)\n return getattr(mod, name)\n\n @classmethod\n def loads(cls, pickle_string):\n return cls(StringIO(pickle_string)).load()\n\n\ndef get_unpickler(insecure=False):\n if insecure:\n return pickle\n else:\n return SafeUnpickler","sub_path":"pycfiles/blueflood-carbon-forwarder-0.4.2.tar/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"304563671","text":"\"\"\"\nREST functions\n\"\"\"\nimport urlfetch\nimport urlparse\nfrom pykontakt import PYKONTAKT_SESSION_API_KEY, PYKONTAKT_SESSION_USER_AGENT\nKONTAKT_PUBLIC_API_KEY = 'YVMS1AZhgAEjC1Krg5X5tiI9AhhmpgbP'\n# Target the (current) latest API version\nKONTAKT_VERSION = 8\n\nKONTAKT_USER_AGENT_DEFAULT = PYKONTAKT_SESSION_USER_AGENT\nKONTAKT_API_KEY_DEFAULT = PYKONTAKT_SESSION_API_KEY\n\nKONTAKT_SCHEME = 'https'\nKONTAKT_NETLOC = 'api.kontakt.io'\n\nKONTAKT_HEADER_ACCEPT = 'application/vnd.com.kontakt+json;version={}'.format(KONTAKT_VERSION)\nKONTAKT_HEADER_CONTENT_TYPE = 'application/x-www-form-urlencoded'\n\nKONTAKT_PATH_DEVICE = '/device'\nKONTAKT_PATH_CONFIG_CREATE = '/config/create'\nKONTAKT_PATH_DEVICE_UPDATE = '/device/update'\nKONTAKT_PATH_ACTION_CREATE = '/action/create'\nKONTAKT_PATH_ACTION_DELETE = '/action/delete'\n\nKONTAKT_PROFILE_EBEACON = 'EDDYSTONE'\nKONTAKT_PROFILE_IBEACON = 'IBEACON'\n\nKONTAKT_DEVICE_TYPE_SMART_BEACON = 'BEACON'\nKONTAKT_DEVICE_TYPE_CLOUD_BEACON = 'CLOUD_BEACON'\n\nKONTAKT_DEVICE_SPECIFICATION_STANDARD = 'STANDARD'\nKONTAKT_DEVICE_SPECIFICATION_TOUGH = 'TOUGH'\nKONTAKT_DEVICE_SPECIFICATION_SENSOR = 'SENSOR'\n\nKONTAKT_ADVERTISING_INTERVAL_MIN_MS = 20\nKONTAKT_ADVERTISING_INTERVAL_MAX_MS = 10240\nKONTAKT_ADVERTISING_INTERVAL = [KONTAKT_ADVERTISING_INTERVAL_MIN_MS,\n KONTAKT_ADVERTISING_INTERVAL_MAX_MS]\n\nKONTAKT_MINOR_RANGE = [0x0001, 0xffff]\nKONTAKT_MAJOR_RANGE = [0x0001, 0xffff]\n\nKONTAKT_TX_POWER_MIN = 0\nKONTAKT_TX_POWER_MAX = 7\nKONTAKT_TX_POWER_RANGE = [KONTAKT_TX_POWER_MIN, KONTAKT_TX_POWER_MAX]\n\nKONTAKT_ACCESS_OWNER = 'OWNER'\nKONTAKT_ACCESS_SUPERVISOR = 'SUPERVSIOR'\nKONTAKT_ACCESS_VIEWER = 'VIEWER'\nKONTAKT_ACCESS_EDITOR = 'EDITOR',\nKONTAKT_ACCESS_EXCLUSIVE_EDITOR = 'EXCLUSIVE_EDITOR'\n\nKONTAKT_ACCESS_PERMISSIONS = [\n KONTAKT_ACCESS_OWNER,\n KONTAKT_ACCESS_SUPERVISOR,\n KONTAKT_ACCESS_VIEWER,\n KONTAKT_ACCESS_EDITOR,\n KONTAKT_ACCESS_EXCLUSIVE_EDITOR\n]\n\nKONTAKT_SSID_CRPYT = ['NONE', 'TKIP', 'AEX']\nKONTAKT_SSID_AUTH = ['NONE', 'WPA_PSK', 'WPA2_PSK']\n\nKONTAKT_LAT_RANGE = [-90, 90]\nKONTAKT_LNG_RANGE = [-180, 180]\nKONTAKT_WIFI_SCAN_INTERVAL = [1, 30]\nKONTAKT_DATA_SEND_INTERVAL = [2, 60]\nKONTAKT_BLE_SCAN_INTERVAL = [0, 86400]\n\n# Kontakt status codes\n#\n# Success\n# -------\n#\n# CODE DESCRIPTION\n# 200 Request has been valid and successful\n# 201 Entity has been created successfully\nKONTAKT_STATUS_CODE_SUCCESS = [200, 201]\n\n# Redirection\n# -----------\n#\n# CODE DESCRIPTION\n# 303 Request has been valid, but needed to be redirected elsewhere\nKONTAKT_STATUS_CODE_REDIRECT = [303]\n\n# Error\n# -----\n#\n# CODE DESCRIPTION\n# 400 Request contains invalid values or is in invalid format\n# 401 Unauthorized access. Most likely Api-Key hasn't been sent\n# 403 Forbidden. Tried to access a resource that isn't theirs\n# 404 Resource not found\n# 409 Conflict. Will return information as to the cause\n# 415 Version or Mediatype not found\n# 422 Parameters validation error\nKONTAKT_STATUS_CODE_ERROR = [400, 401, 403, 404, 409, 415, 422]\n\nKONTAKT_STRING_MAX_CHARS = 128\n\ndef kontakt_rest_api_request(path,\n query=None,\n payload=None,\n method='GET',\n content_type=KONTAKT_HEADER_CONTENT_TYPE,\n api_key=KONTAKT_API_KEY_DEFAULT,\n user_agent=KONTAKT_USER_AGENT_DEFAULT):\n \"\"\"\n Send a request to the Kontakt REST api.\n\n :param path: the REST path\n :query path: query string\n :param payload: data sent with `'POST'` method\n :param method: `'GET'` (the default) or `'POST'`\n :content_type: Header content type\n (default: `'application/x-www-form-urlencoded'`)\n :api_key: The kontakt API key (defaults to PYKONTAKT_SESSION_API_KEY)\n :user_agent: The user-agent string (defaults to PYKONTAKT_SESSION_USER_AGENT)\n\n :return: A urlfetch.response object\n\n :raises: urlfetch.UrlfetchException if the Kontakt api cannot be reached.\n \"\"\"\n headers = {'Accept': KONTAKT_HEADER_ACCEPT,\n 'Content-Type': content_type,\n 'Api-Key': api_key,\n 'User-Agent': user_agent}\n\n url = urlparse.urlunparse((KONTAKT_SCHEME,\n KONTAKT_NETLOC,\n path,\n '',# No params\n query,\n ''))# No fragments\n\n response = urlfetch.fetch(url,\n headers=headers,\n method=method,\n payload=payload,\n validate_certificate=True)\n return response\n","sub_path":"pykontakt/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281934910","text":"import os\nimport markdown\nfrom modules import constants\nimport flask\n\n\ndef load_base():\n base_path = os.path.join(flask.current_app.root_path,\n flask.current_app.template_folder,\n constants.DOCS_BASE_TEMPLATE)\n with open(base_path, encoding='utf-8', errors='replace') as base:\n return base.read()\n\n\ndef markdown_to_html(base, file_to_convert, path):\n with open(\n os.path.join(constants.ROOT_DIR, file_to_convert), 'r',\n encoding='utf-8', errors='replace'\n ) as md_file:\n all_file = md_file.read()\n html = markdown.markdown(all_file)\n new_file = os.path.join(constants.ROOT_DIR, path)\n new_file_dir = os.path.dirname(new_file)\n if not os.path.exists(new_file_dir):\n os.makedirs(new_file_dir)\n with open(\n new_file, 'w', encoding='utf-8'\n ) as html_file:\n html_file.write(base.replace('%DOCUMENTATION%', html))\n\n\ndef convert_all_md(dir_to_convert, path):\n base = load_base()\n for file_to_convert in os.listdir(dir_to_convert):\n filename, ext = os.path.splitext(file_to_convert)\n if ext == '.md':\n markdown_to_html(base,\n os.path.join(dir_to_convert, file_to_convert),\n os.path.join(path, filename + '.jinja2'))\n","sub_path":"modules/md_conversion.py","file_name":"md_conversion.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198413441","text":"#!/usr/bin/python\n# Check NetSIP API Key expiries\n# Jack.Su - PLAT-1457\n\nimport MySQLdb,sys\n\nconfig = {\n 'host':'111.111.111.111',\n 'port':3306,\n 'user':'root',\n 'passwd':'XXXXXX',\n 'db':'netsip',\n}\n\ntry:\n cnn = MySQLdb.connect(**config)\nexcept MySQLdb.Error as e:\n print(\"Mysql Error %d: %s\" % (e.args[0], e.args[1]))\n sys.exit(3)\n\ncur = cnn.cursor()\nquery = \"SELECT ak.id, ak.netsip_account_id, na.account_name, na.company_name, ak.key_hash_new, ak.comment, ak.expires_UTC \\\n FROM netsip.api_keys AS ak, netsip.netsip_accounts AS na \\\n WHERE \\\n ak.netsip_account_id = na.id \\\n AND ak.enabled = '1' \\\n AND ak.expires_UTC < DATE_SUB(NOW(), INTERVAL - 6 WEEK) \\\n ORDER BY expires_utc\"\n\ntry:\n cur.execute(query)\n res = cur.fetchall()\nexcept cur.excute.Error as e:\n print(e)\n sys.exit(3)\n\nif cur.rowcount != 0:\n print(\"Warning: expired keys found or some keys will expiry in 6 weeks\")\n for i in res:\n print(\"ID: %s, NetSIP Account ID: %s, Account Name: %s, Key File: %s, Expiry Date: %s\" % (i[0], i[1], i[2], i[5], i[6].strftime(\"%Y-%m-%d %H:%M:%S\")))\n sys.exit(1)\nelse:\n print(\"OK: No expired keys found\")\n sys.exit(0)\n","sub_path":"check_netsip_key.py","file_name":"check_netsip_key.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345451806","text":"\"\"\"\n Developed by : Sameera Abeykoon (June 11 2018)\n This script will save the NYSPI incoming correct fMRI data into correct folders\n The correct data maybe in /mnt/jxvs01/incoming/NYSPI_data/physical_disk_K01/\"subject_number\"/nii\n eg : /mnt/jxvs01/incoming/NYSPI_data/physical_disk_K01/3045/nii\n or MUX folder inside the subject number s ../XNAT_K01/horgconte/2264/12589/scans/10/MUX\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport os, sys\n\n# provide the Subject numbers\ns_number=input(\"Enter the subject number ?\")\n\n# final Subject folder inside hcp01/tnfcs_PI\ns_folder = s_number \n\n# get the Nifti data folders\n#nii_path = \"/mnt/jxvs01/incoming/NYSPI_data/physical_disk_K01/\" + s_number+ \"/nii/\" # get the dicm2nii nii.gz files\n\n# scanlog file \nscan_file = s_number + \"_scanlog.txt\"\nscan_folder = \"/gpfs/projects/VanSnellenbergGroup/S_codes/scanlog/\" + scan_file\n\n# final unprocessed data folder\ndest_path=\"/gpfs/scratch/sabeykoon/HCP_data/nyspi/\" + s_folder + \"/unprocessed/3T/\"\n\n# unpack the scanlog txt file\ns_data = np.genfromtxt(scan_folder, delimiter=\" \", dtype=str, unpack=True)\n\n# get the scanlog file unpacked data\nfor i, j in enumerate(s_data[4]):\n if 'fMRI' in j or 'T1w' in j or 'T2w' in j:\n # get the folder B0 filenumber scanlog file (eg : 1)\n B0_no = s_data[6][i]\n \n B0_file = s_number + \"_3T_GradientEchoFieldMap_\" + str(B0_no) + \".nii.gz\"\n \n dst = dest_path + \"FieldMap/\" + B0_file\n print (\"dst\", dst)\n \n print (\"++++++++++++++++++++++++++++++++\")\n src = dest_path + j + \"/\" + s_number + \"_3T_GradientEchoFieldMap.nii.gz\"\n print (\"src\", src)\n \n fd_path = dest_path + j\n if os.path.isdir(fd_path):\n \tif os.path.lexists(src):\n \t\tos.remove(src)\n \tos.symlink(dst, src)\n\n \n\t\n","sub_path":"Seawulf_codes/HCP_pipeline_scripts/SB/new_hcp_link.py","file_name":"new_hcp_link.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149208657","text":"'''\nCreated on Aug 27, 2014\n\n\n'''\nimport os, subprocess\n\nfrom comMethods import getPureStructs\n\nclass Extractor:\n \"\"\" This class is responsible for creating \"pseudo-POSCAR\" files for each structure in the\n set of structures that we want to run through VASP calculations. It does this by \n \"extracting\" the information about the structure from struct_enum.out using the makestr.x\n routine from the enumlib library in UNCLE. The StructsToPoscar class will then take this \n set of pseudo-POSCARs and prepare them for VASP calculations. \"\"\"\n\n def __init__(self, user, atomSets, uncleOutput, startMethod,enumDir):\n \"\"\" CONSTRUCTOR \"\"\"\n self.atomSets = atomSets\n self.extractExec = '/fslhome/{}/graphener_links/uncle/makestr.x'.format(user['name'])\n self.uncleOut = uncleOutput\n self.exStructList = []\n self.startMethod = startMethod\n self.case = len(atomSets[0].split(','))\n self.enumDir = enumDir\n\n def checkPureInCurrent(self, iterNum, vstructsToStart, vstructsFinish, mainDir): #OK\n \"\"\" This checks that the pure structures are in the list to calculate or in the finished structures\n Only called on the first iteration for the firs \"\"\"\n #self.exStructList = []\n\n pureStructs = getPureStructs(mainDir + '/enum')\n for iatomSet in xrange(len(self.atomSets)):\n for struct in pureStructs:\n if not self.contains(str(struct), vstructsToStart[iatomSet]+vstructsFinish[iatomSet]):\n vstructsToStart[iatomSet].append(str(struct))\n return vstructsToStart\n\n def contains(self, struct, alist):\n \"\"\" Returns True if 'alist' contains the item 'struct', False otherwise. \"\"\"\n if len(alist) == 0:\n return False\n \n for i in xrange(len(alist)):\n if str(struct) == str(alist[i]):\n return True\n \n return False\n\n def extract(self,vstructsToStart):\n \"\"\" This method uses the makestr.x executable from the enumlib in UNCLE to \n create the pseudo-POSCAR files for each structure in self.exStructList. These files are \n generally called something like \"vasp.000241\" indicating the structure number in \n struct_enum.out. We only want to extract the union of all the lists in \n self.exStructList. \"\"\"\n subprocess.call(['echo','\\nExtracting structures from struct_enum.out\\n'])\n lastDir = os.getcwd()\n os.chdir(self.enumDir)\n uniqueSet = set() \n # Only extract the union of all the sets of structures. (No duplicates)\n for i in xrange(len(vstructsToStart)):\n uniqueSet = uniqueSet.union(vstructsToStart[i])\n for struct in uniqueSet:\n subprocess.call([self.extractExec, 'struct_enum.out', struct])\n os.chdir(lastDir)\n \n\n \n\n \n","sub_path":"graphener/Extractor.py","file_name":"Extractor.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254285419","text":"\"\"\"\nCP1404/CP5632 Practical\nBy Kenzin Igor\n\"\"\"\n\n\ndef main():\n colours = {\"AliceBlue\": \"#f0f8ff\", \"AntiqueWhite\": \"#faebd7\",\n \"AntiqueWhite1\": \"#ffefdb\", \"AntiqueWhite2\": \"#eedfcc\",\n \"AntiqueWhite3\": \"#cdc0b0\", \"AntiqueWhite4\": \"#8b8378\",\n \"aquamarine1\": \"#7fffd4\", \"aquamarine2\": \"#76eec6\",\n \"aquamarine4\": \"#458b74\"}\n user_input = input(\"Enter Colour name:\")\n print(\"Colour code is\", colours[user_input])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"prac5/hex_colours.py","file_name":"hex_colours.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143534608","text":"import re\nimport requests\nfrom file_worker import FileWorker\nfrom client import Client\n\n\nclass PhoneCheker:\n def __init__(self, input_file, output_file):\n self.input_file = input_file\n self.output_file = output_file\n\n def run(self):\n file = FileWorker()\n try:\n js_content = file.read_json(self.input_file)\n checked_clients = []\n for i in js_content[\"clients\"]:\n client = Client(**i)\n self._check(client)\n print(client.get_phone_numbers())\n checked_clients.append(client.get_dict())\n file.write_json(self.output_file, {\"clients\": checked_clients})\n except:\n # there may be an interception of errors\n pass\n\n def _check(self, client):\n loot_numbers = self._loot_pages(client.contact_pages)\n client_numbers = client.get_phone_numbers()\n checked_numbers = list(filter(lambda x: x in client_numbers, loot_numbers))\n checked_numbers = client.set_phone_numbers(checked_numbers)\n return client\n\n def _loot_pages(self, pages):\n loot = []\n for page in pages:\n page_content = self._get_page(page.link)\n page_numbers = self._find_numbers(page_content)\n loot.extend(page_numbers)\n return loot\n\n def _get_page(self, link):\n result = requests.get(link)\n if result.status_code == 200:\n return result.text\n\n def _find_numbers(self, text):\n reg = '([8|\\+7]?\\d{3}?[\\s\\-\\(\\)]*\\d{3}.?\\d{2}.?\\d{2})+|(\\d{3}.?\\d{2}.?\\d{2})'\n finder = re.compile(reg)\n raw_numbers = finder.findall(text)\n numbers = self._correct_numbers(raw_numbers)\n return numbers\n\n def _correct_numbers(self, raw_numbers):\n numbers = []\n for i in raw_numbers:\n s = ''.join(i)\n s = re.sub(r'\\D', '', s)\n if len(s) == 11 and s.startswith('7'):\n s = '8' + s[1:]\n numbers.append(s)\n elif len(s) == 7:\n s = '8495' + s\n numbers.append(s)\n # russian code's only\n elif len(s) == 10 and s.startswith(('3', '4', '8', '9')):\n s = '8' + s\n numbers.append(s)\n\n numbers = list(set(numbers)) # remove dubles\n return numbers\n\n\n","sub_path":"phone_checker.py","file_name":"phone_checker.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475953537","text":"\"\"\"\nThis routine scrapes prices for the brazilian government bonds.\nThis routine works for all bonds.\n\"\"\"\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nfrom quantfin.data import grab_connection\n\nurl = r'https://sisweb.tesouro.gov.br/apex/f?p=2031:2:0::::'\n\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.text, 'html.parser')\n\ndownload_tags = soup.find_all('a', {\"style\": \"padding-right:5px;\"})\n\ndownload_links = []\nfor tag in download_tags:\n download_links.append(r'https://sisweb.tesouro.gov.br/apex/' + tag.attrs['href'])\n\n# loop on everything\ndf = pd.DataFrame(columns=['Taxa Compra Manhã', 'Taxa Venda Manhã', 'PU Compra Manhã', 'PU Venda Manhã',\n 'PU Base Manhã', 'bond_name', 'maturity'])\n\nfor link in tqdm(download_links, 'Looping every link'):\n xls = pd.ExcelFile(link)\n\n for name in xls.sheet_names:\n\n # grab the bond name\n bond_name = name.split()[0].replace('-', '')\n if name.split()[1] == 'Princ':\n bond_name = bond_name + 'P'\n\n # grab the bond maturity\n maturity = pd.to_datetime(name.split()[-1], dayfirst=True)\n\n # read the data\n df_aux = pd.read_excel(xls, name, skiprows=1)\n df_aux.columns = df_aux.columns.str.replace('9:00', 'Manhã')\n df_aux = df_aux.rename({'PU Extrato Manhã': 'PU Base Manhã'}, axis=1)\n df_aux = df_aux.dropna(how='all', axis=1)\n\n try:\n df_aux['Dia'] = pd.to_datetime(df_aux['Dia'], dayfirst=True)\n except ValueError:\n print(f'Deu ruim na {bond_name} que vence em {maturity}')\n continue\n\n df_aux['bond_name'] = bond_name\n df_aux['maturity'] = maturity\n\n # concatenate\n df = pd.concat([df, df_aux], axis=0, ignore_index=True)\n\n# rename variables\nrename_dict = {'bond_name': 'bond_type',\n 'maturity': 'maturity',\n 'Dia': 'reference_date',\n 'Taxa Compra Manhã': 'taxa_compra',\n 'Taxa Venda Manhã': 'taxa_venda',\n 'PU Compra Manhã': 'preco_compra',\n 'PU Venda Manhã': 'preco_venda',\n 'PU Base Manhã': 'preco_base'}\n\ndf = df.rename(rename_dict, axis=1)\n\n# Save to database\nconn = grab_connection()\ndf.to_sql('raw_tesouro_nacional', con=conn, if_exists='replace', index=False)\n","sub_path":"quantfin/scrape/tesouronacional.py","file_name":"tesouronacional.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348973173","text":"import gzip\nfrom collections import Counter\nimport dictionary_functions as df\nimport pvml\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sn\nimport time\n\nstart=time.time()\n####### Read Files ##########\nf=gzip.open(\"train.txt.gz\", \"rt\")\nklass=[]\npublisher=[]\ntitle=[]\nfor line in f:\n k, p, t = line.split(\"|\")\n klass.append(k)\n publisher.append(p)\n title.append(t)\n\n####### Encode Classes and Publishers #########\npub=sorted(list(Counter(publisher).keys())) #Get the list of unique publishers\nkla=sorted(list(Counter(klass).keys())) #Get the list of unique classes\n\npub_enc=[]\nklass_enc=[]\nfor p in publisher:\n pub_enc.append(pub.index(p)) #use the publisher index in pub to encode publishers\nfor k in klass:\n klass_enc.append(kla.index(k)) #use the class index in kla to encode classes\n\ny_train=np.array(klass_enc)\nt_pub_enc=pub_enc\nt_title=title\n\n######## Repeat for Validation and Test Sets #########\n####### Read Files ##########\nf=gzip.open(\"validation.txt.gz\", \"rt\")\nklass=[]\npublisher=[]\ntitle=[]\nfor line in f:\n k, p, t = line.split(\"|\")\n klass.append(k)\n publisher.append(p)\n title.append(t)\n\n####### Encode Classes and Publishers #########\npub=sorted(list(Counter(publisher).keys())) #Get the list of unique publishers\nkla=sorted(list(Counter(klass).keys())) #Get the list of unique classes\npub_enc=[]\nklass_enc=[]\nfor p in publisher:\n pub_enc.append(pub.index(p)) #use the publisher index in pub to encode publishers\nfor k in klass:\n klass_enc.append(kla.index(k)) #use the class index in kla to encode classes\n\ny_val=np.array(klass_enc)\nval_pub_enc=pub_enc\nval_title=title\n\n####### Read Files ##########\nf=gzip.open(\"test.txt.gz\", \"rt\")\nklass=[]\npublisher=[]\ntitle=[]\nfor line in f:\n k, p, t = line.split(\"|\")\n klass.append(k)\n publisher.append(p)\n title.append(t)\n\n####### Encode Classes and Publishers #########\npub=sorted(list(Counter(publisher).keys())) #Get the list of unique publishers\nkla=sorted(list(Counter(klass).keys())) #Get the list of unique classes\n\npub_enc=[]\nklass_enc=[]\nfor p in publisher:\n pub_enc.append(pub.index(p)) #use the publisher index in pub to encode publishers\nfor k in klass:\n klass_enc.append(kla.index(k)) #use the class index in kla to encode classes\n\ny_test=np.array(klass_enc)\ntest_pub_enc=pub_enc\ntest_title=title\n\n\n\n######## Create Dictionary #########\nsize=8000\nstem=True\nic=True\ndic=df.build_dict(t_title, size, True, stemming=stem, ignore_common=ic)\n\n######## Training Set ######\n######## Create BoW ########\nbow=df.build_bow(t_title, False, \"train_bow\", size, stemming=stem)\nt_pub_enc=np.array(t_pub_enc)\n\n\n######## Consolidate Features ########\nx_train=np.zeros((len(t_pub_enc), size+1))\nbow=pvml.maxabs_normalization(bow)\nfor i in range(len(t_pub_enc)):\n x_train[i][:-1]=bow[i]\n x_train[i][-1]=t_pub_enc[i]\n\n######## Validation Set ####\n######## Create BoW ########\nbow=df.build_bow(val_title, False, \"val_bow\", size, stemming=stem)\nval_pub_enc=np.array(val_pub_enc)\n\n\n######## Consolidate Features ########\nx_val=np.zeros((len(val_pub_enc), size+1))\nbow=pvml.maxabs_normalization(bow)\nfor i in range(len(val_pub_enc)):\n x_val[i][:-1]=bow[i]\n x_val[i][-1]=val_pub_enc[i]\n\n######## Test Set ##########\n######## Create BoW ########\nbow=df.build_bow(test_title, False, \"test_bow\", size, stemming=stem)\ntest_pub_enc=np.array(test_pub_enc)\n\n\n######## Consolidate Features ########\nx_test=np.zeros((len(test_pub_enc), size+1))\nbow=pvml.maxabs_normalization(bow)\n\nfor i in range(len(test_pub_enc)):\n x_test[i][:-1]=bow[i]\n x_test[i][-1]=test_pub_enc[i]\n\n######## Train a Multi Layer Perceptron #########\nprint(\"Multi-Layer Perceptron\")\ndnn=pvml.MLP([size+1, 256, 4]) #16\n\nepochs=375 #<- Starts overfitting slightly if we go over that number of epochs\nbatch_size=256\nsteps=len(x_train)//batch_size #Automatically adjust steps so that steps*batch_size is almost the number of samples\ntrain_accs=[]\nval_accs=[]\nep_vec=[]\nplt.ion()\nlr0=0.01\nfor i in range(epochs):\n lr=lr0/np.sqrt(i+1)\n dnn.train(x_train, y_train, lr=lr, lambda_=1e-5, momentum=0.99,\n steps=steps, batch=batch_size)\n train_labels=dnn.inference(x_train)[0]\n train_acc=(train_labels==y_train).mean()*100\n train_accs.append(train_acc)\n val_labels=dnn.inference(x_val)[0]\n val_acc=(val_labels==y_val).mean()*100\n val_accs.append(val_acc)\n ep_vec.append(i)\n plt.clf()\n plt.plot(ep_vec, train_accs, label=\"Training Accuracy\")\n plt.plot(ep_vec, val_accs, label=\"Validation Accuracy\")\n plt.legend()\n plt.grid(1)\n plt.pause(0.005)\nplt.ioff()\n\ndnn.save(\"dnn_\"+str(size))\n\nplt.clf()\nplt.plot(ep_vec, train_accs, label=\"Training\")\nplt.plot(ep_vec, val_accs, label=\"Validation\")\nplt.title(\"MLP Training\")\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Accuracy [%]\")\nplt.legend()\nplt.grid(1)\nplt.savefig(\"mlp_training.png\")\n\nprint(\"Final Training Accuracy: \", train_accs[-1])\nprint(\"Final Validation Accuracy: \", val_accs[-1])\ntest_labels=dnn.inference(x_test)[0]\ntest_acc=(test_labels==y_test).mean()*100\nprint(\"Test Accuracy: \", test_acc)\n\nprint(\"Elapsed Time: \", time.time()-start, \"s\")\n\nconf_mat=np.zeros((4,4))\nfor i in range(len(y_test)):\n conf_mat[int(y_test[i])][int(test_labels[i])]+=1\nfor i in range(4):\n conf_mat[i,:]/=conf_mat[i,:].sum()\n\nsn.heatmap(conf_mat, annot=True, cmap=\"coolwarm\")\nplt.title(\"MLP Confusion Matrix\")\nplt.show()\n","sub_path":"exam/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572237387","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.latest_issue, name='index'),\n url(r'^archive/$', views.archive, name='archive'),\n url(r'^issue/(?P[0-9]+)/$', views.issue_details, name='issue'),\n url(r'^s/(?P[^/]+)/$', views.subscribe, name='subscribe'),\n url(r'^u/(?P[\\w\\-]+)/$', views.unsubscribe, name='unsubscribe'),\n url(r'^sponsor/$', views.sponsorship, name='sponsorship'),\n url(r'^submit/$', views.submit, name='submit',),\n url(r'^unsubscribe/$', views.unsubscribe, name='unsubscribe'),\n url(r'^contact/$', views.contact, name='contact'),\n #url(r'^logo/$', views.logo, name='logo')\n]\n","sub_path":"ios_blog/blog_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290177603","text":"import re\nimport urllib3\nimport zipfile\nfrom io import BytesIO\nfrom datetime import datetime\nfrom flask import Flask, url_for, jsonify, send_file\n\napp = Flask(__name__)\nhttp = urllib3.PoolManager()\n\n# Create and send zip file with all marmitton images\ndef sendZipFile(marmittonUrls):\n file = open('./images.txt', 'w+')\n\n # create file\n for element in marmittonUrls:\n file.write(element + '\\n')\n # Create zip file\n memory_file = BytesIO()\n with zipfile.ZipFile(memory_file, 'w') as zf:\n zf.write('./images.txt')\n memory_file.seek(0)\n return send_file(memory_file, attachment_filename='data.zip', as_attachment=True)\n\n\n# Set delemiter, return the list of searches\ndef getRecipesList(keywords):\n delemiter = ''\n recipesList = []\n\n if ';' in keywords:\n delemiter = ';'\n elif '|' in keywords:\n delemiter = '|'\n elif ' ' in keywords:\n delemiter = ' '\n elif ':' in keywords:\n delemiter = ':'\n\n if delemiter != '':\n recipesList = keywords.split(delemiter)\n else:\n recipesList.append(keywords)\n return recipesList\n\n\n# Check if image is not already stored in list and append it if not\ndef addUrlsInList(marmittonUrls, tmp):\n for imageToFormat in tmp:\n formatedUrl = 'https://' + imageToFormat + '.jpg'\n if '.aspx' in imageToFormat:\n continue\n if formatedUrl in marmittonUrls:\n continue\n marmittonUrls.append(formatedUrl)\n return marmittonUrls\n\n\n# This route get recipes from marimitton and allow user to have pagination\n@app.route('/recipes/search//', methods=['GET'])\ndef get_recipes(keywords, limit, offset):\n marmittonUrls = []\n recipesList = getRecipesList(keywords)\n\n for recipe in recipesList:\n marmittonResponse = http.request(\n 'GET', 'https://www.marmiton.org/recettes/recherche.aspx?aqt=' + recipe)\n tmp = re.findall('https://(.*?).jpg', marmittonResponse.data)\n marmittonResponse = addUrlsInList(marmittonUrls, tmp)\n\n return sendZipFile(marmittonUrls)\n # return jsonify(marmittonUrls)\n\n@app.route('/')\ndef index():\n return jsonify(status='ok', message='Hi there')\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"web/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542041111","text":"from PyQt5.QtWidgets import QApplication, QDialog\nfrom PyQt5.QtCore import Qt, pyqtSignal, QObject, QTimer\nfrom time import time\nfrom threading import Timer\nfrom datetime import datetime\nimport platform\nimport os\n\n\n# get the UI from here\nfrom diagnostics_ui import Ui_DiagnosticsDialog\n# get Dialog classes from here\nfrom rtc_cls import RtcDialog\nfrom power_control_cls import PowerControlDialog\nfrom display_mode_cls import DisplayModeDialog\nfrom lan_settings_cls import LanDialog\n\n\n#####################################################################\n# DiagnosticsDialog Class - Secondary window for diagnostics checks #\n#####################################################################\nclass DiagnosticsDialog(QDialog):\n\n #SIGNALS\n # signal to update in 1 second\n one_second_signal = pyqtSignal()\n\n def __init__(self, ser_instance, treatment, parent=None):\n super(DiagnosticsDialog, self).__init__()\n\n # Set up the user interface from Designer.\n self.ui = Ui_DiagnosticsDialog()\n self.ui.setupUi(self)\n\n # get the close event and connect the buttons \n self.ui.doneButton.clicked.connect(self.FinishThisDialog)\n self.ui.rtcButton.clicked.connect(self.RtcScreen)\n self.ui.localizationButton.clicked.connect(self.ChangeLocalization)\n self.ui.max_powerButton.clicked.connect(self.PowerScreen)\n self.ui.display_modeButton.clicked.connect(self.Display_ModeScreen)\n self.ui.lanButton.clicked.connect(self.LanScreen)\n\n # get the parent reference and data\n self.parent = parent\n self.ser = ser_instance\n self.t = treatment\n self.localization = self.t.GetLocalization()\n\n # usa its default\n if self.localization == 'arg':\n self.ui.localizationButton.setText('Localization\\nARG')\n else:\n self.ui.localizationButton.setText('Localization\\nUSA')\n\n ### to carry on with date-time\n date_now = datetime.today()\n self.minutes_last = date_now.minute\n self.UpdateDateTime(date_now)\n\n # to start 1 second timer\n self.t1seg = QTimer()\n self.t1seg.timeout.connect(self.TimerOneSec)\n self.t1seg.start(1000)\n\n # progress timer, these ones are qt\n self.init_timer = QTimer()\n\n # CONNECT SIGNALS\n # connect the timer signal to the Update\n self.one_second_signal.connect(self.UpdateOneSec)\n self.parent.rcv_signal.connect(self.SerialDataCallback)\n\n self.ui.highVLabel.setText(\"--\")\n self.ui.lowVLabel.setText(\"--\")\n\n if self.ser.port_open == False:\n self.ui.hardwareLabel.setText(\"No port \")\n self.ui.firmwareLabel.setText(\"No port \")\n else:\n self.ui.hardwareLabel.setText(\"Waiting... \")\n self.ui.firmwareLabel.setText(\"Waiting... \")\n \n # recupero informacion del sistema\n (distname, version, nid) = platform.linux_distribution(full_distribution_name=1)\n os_text = \"--\" + distname + version + \"-- \"\n self.ui.osLabel.setText(os_text)\n self.distname = distname\n\n (system, node, release, version, machine, processor) = platform.uname()\n self.ui.kernelLabel.setText(release)\n self.ui.softLabel.setText(self.t.GetCurrentVersion())\n\n # recupero informacion de la placa power si el puerto esta OK\n self.comm_progress = 'clean' \n if self.ser.port_open == True:\n self.GetPowerInfoSM()\n\n\n def UpdateDateTime(self, new_date_time):\n date_str = \"\"\n # usa its default\n if self.localization == 'arg':\n date_str = new_date_time.strftime(\"%d/%m/%Y - %H:%M\")\n else:\n date_str = new_date_time.strftime(\"%m/%d/%Y - %H:%M\")\n\n self.ui.date_timeLabel.setText(date_str)\n\n\n \"\"\" QTimer callback emit a signal to not upset the timer interval \"\"\"\n def TimerOneSec(self):\n self.one_second_signal.emit()\n\n\n def UpdateOneSec (self):\n # do a UI update if its necessary\n date_now = datetime.today()\n if date_now.minute != self.minutes_last:\n self.minutes_last = date_now.minute\n self.UpdateDateTime(date_now)\n\n\n def GetPowerInfoSM (self):\n if self.comm_progress == 'clean':\n # limpio el puerto y luego la configuracion\n self.ser.Write(\"keepalive,\\r\\n\")\n \n self.comm_progress = 'voltage'\n self.init_timer.singleShot(100, self.GetPowerInfoSM)\n\n elif self.comm_progress == 'voltage':\n self.ser.Write(\"voltage\\r\\n\")\n\n self.comm_progress = 'hardware_and_software'\n self.init_timer.singleShot(100, self.GetPowerInfoSM)\n\n elif self.comm_progress == 'hardware_and_software':\n self.ser.Write(\"hard_soft\\r\\n\")\n\n self.comm_progress = 'device_id'\n self.init_timer.singleShot(100, self.GetPowerInfoSM)\n\n elif self.comm_progress == 'device_id':\n self.ser.Write(\"serial num\\r\\n\")\n \n\n def SerialDataCallback (self, rcv):\n if rcv.startswith(\"High Supply:\"):\n h_voltage = rcv[12:]\n self.ui.highVLabel.setText(h_voltage)\n\n if rcv.startswith(\"Low Supply:\"):\n l_voltage = rcv[11:]\n self.ui.lowVLabel.setText(l_voltage)\n\n if rcv.startswith(\"Hardware Version:\"):\n hs = rcv[17:]\n self.ui.hardwareLabel.setText(hs)\n\n if rcv.startswith(\"Software Version:\"):\n hs = rcv[17:]\n self.ui.firmwareLabel.setText(hs)\n\n if rcv.startswith(\"Device Id:\"):\n hs = rcv[10:]\n self.ui.deviceLabel.setText(hs)\n \n\n def ChangeLocalization (self):\n local_str = self.ui.localizationButton.text()\n if 'USA' in local_str:\n self.ui.localizationButton.setText('Localization\\nARG')\n self.localization = 'arg'\n self.t.SetLocalization('arg')\n else:\n self.ui.localizationButton.setText('Localization\\nUSA')\n self.localization = 'usa'\n self.t.SetLocalization('usa')\n\n date_now = datetime.today()\n self.UpdateDateTime(date_now)\n\n \n def FinishThisDialog (self):\n # to save localization\n self.t.SaveConfigFile()\n self.accept()\n\n\n ###############\n # Screens #\n ###############\n ## RtcScreen\n def RtcScreen (self):\n date_now = datetime.today()\n a = RtcDialog(self.localization, date_now)\n a.setModal(True)\n a.exec_()\n\n # default its usa\n if self.localization == 'arg':\n new_day = a.ui.dayButton.text()\n new_month = a.ui.monthButton.text()\n else: \n new_month = a.ui.dayButton.text()\n new_day = a.ui.monthButton.text()\n \n new_year = a.ui.yearButton.text()\n new_hour = a.ui.hourButton.text()\n new_minute = a.ui.minuteButton.text()\n myCmd1 = \"sudo date -s {1}/{0}/20{2}\".format(new_day, new_month, new_year)\n myCmd2 = \"sudo date -s {0}:{1}:00\".format(new_hour, new_minute)\n myCmd3 = \"sudo hwclock -w\" #guardo info del date en hwclock\n \n if self.distname == 'Slackware ':\n print(myCmd1)\n print(myCmd2)\n print(myCmd3) \n\n elif self.distname == 'debian':\n os.system(myCmd1)\n os.system(myCmd2) \n os.system(myCmd3)\n\n # do a UI update\n date_now = datetime.today()\n self.minutes_last = date_now.minute\n self.UpdateDateTime(date_now)\n\n\n ## PowerScreen\n def PowerScreen (self):\n a = PowerControlDialog(self.t)\n a.setModal(True)\n a.exec_()\n\n \n ## Display_ModeScreen\n def Display_ModeScreen (self):\n a = DisplayModeDialog (self.t)\n a.setModal(True)\n a.exec_()\n\n \n ## LanScreen\n def LanScreen (self):\n a = LanDialog ()\n a.setModal(True)\n a.exec_()\n \n \n### end of file ###\n\n","sub_path":"rpi_base/diagnostics/diagnostics_cls.py","file_name":"diagnostics_cls.py","file_ext":"py","file_size_in_byte":8095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173440857","text":"def last25(array, i):\n return array[i-25:i]\n\ndef find(num, array):\n for i in range(len(array)):\n for j in range(len(array)):\n if array[i] + array[j] == num:\n return False\n return True\n\nwith open(\"input.txt\", \"r\") as fh:\n lines = fh.readlines()\n array = []\n for line in lines:\n array.append(int(line))\n\n error = 0\n for trial in range(25,len(array)):\n if find(array[trial], last25(array, trial)):\n error = array[trial]\n print(f\"Puzzle 1: {error}\")\n\n begin = 0\n end = 0\n while True:\n theSlice = array[begin:end+1]\n theSum = sum(theSlice)\n if theSum == error:\n print(f\"Puzzle 2: {min(theSlice) + max(theSlice)}\")\n break\n if theSum > error:\n begin += 1\n if theSum < error:\n end += 1\n","sub_path":"9-encoding-error/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80118264","text":"from models.clients import Map,Scene,Object,Entity\n\nclass SceneUtils:\n def __init__(self,m:Map):\n self.map = m\n\n def append_static_obj_to_scene(self, s:Scene,oref:str,sx:int,sy:int,solid:int):\n s.objects.append(Object(oref,sx,sy,self.map.cell_size,self.map.cell_size,solid))\n\n def append_entity_to_scene(self, s:Scene,oref:str,animref:str,sx:int,sy:int,animrate:str,solid:int):\n s.entities.append(Entity(oref,sx,sy,self.map.cell_size,self.map.cell_size,animref,animrate,solid))\n\n def get_ref_for_obj_code(self, code: str) -> str:\n try:\n toret = self.map.obj_defs[self.map.obj_codes[code]]\n # print(toret)\n return toret\n except:\n return ''\n\n def get_ref_for_entity_code(self, code: str) -> str:\n try:\n toret = self.map.obj_defs[self.map.obj_codes[code][0]]\n # print(toret)\n return toret\n except:\n return ''\n\n def get_anim_ref_for_entity_code(self, code: str) -> str:\n try:\n # print(code)\n toret = self.map.obj_defs[self.map.obj_codes[code][1][0]]\n # print(toret)\n return toret\n except:\n return ''\n\n def get_anim_rate_for_entity_code(self, code: str) -> int:\n try:\n toret = self.map.obj_codes[code][1][1]\n # print(toret)\n return toret\n except:\n return ''\n\n def things_setup_from_grid(self, scene: Scene, grid: str):\n scene_size = scene.background.size_x\n grid_rows = grid.count('\\n')-1\n\n\n grid = grid.rstrip()\n grid = grid.replace(',','')\n grid = grid.replace('\\n','')\n\n\n self.map.cell_size = int(scene_size/grid_rows)\n for i in range(0,grid_rows):\n for j in range(0,grid_rows):\n cell_x = int(self.map.cell_size*j)\n cell_y = int(self.map.cell_size*i)\n\n obj_code = grid[i*grid_rows+j]\n obj_ref = self.get_ref_for_obj_code(obj_code)\n ent_ref = self.get_ref_for_entity_code(obj_code)\n\n if obj_ref != '':\n sol = 1\n if obj_code != '0':\n sol = 0\n if obj_code == '1' and 'isometric' in self.map.source:\n self.append_static_obj_to_scene(scene,self.get_ref_for_obj_code('*'),cell_x,cell_y,0)\n self.append_static_obj_to_scene(scene,obj_ref,cell_x,cell_y,sol)\n\n if ent_ref != '':\n anim_rate = self.get_anim_rate_for_entity_code(obj_code)\n anim_ref = self.get_anim_ref_for_entity_code(obj_code)\n sol = 1\n if anim_rate > 0:\n sol = 0\n if 'custom_1' not in self.map.source:\n self.append_static_obj_to_scene(scene,self.get_ref_for_obj_code('*'),cell_x,cell_y,0)\n self.append_entity_to_scene(scene,ent_ref,anim_ref,cell_x,cell_y,anim_rate,sol)\n","sub_path":"utils/scene_utils.py","file_name":"scene_utils.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213180216","text":"import binaryninja as bn\nfrom clang.cindex import *\nfrom typing import *\nimport xxhash\n\n# Keys are the original spelling of the type\\object name in the header file, the Value is the name entered into\n# the binaryView.\n# BinaryNinja doesn't accept certain user defined names so we must alter them (e.g ptrdiff_t).\nprocessed_types = dict()\n\n# This is a list of libclangs' base types (mainly used in the check_if_base_type() function\nbase_types = [TypeKind.BOOL, TypeKind.CHAR16, TypeKind.CHAR32, TypeKind.CHAR_S,\n TypeKind.CHAR_U, TypeKind.DOUBLE, TypeKind.FLOAT, TypeKind.FLOAT128,\n TypeKind.HALF, TypeKind.INT, TypeKind.UINT, TypeKind.INT128, TypeKind.LONG,\n TypeKind.LONGLONG, TypeKind.LONGDOUBLE, TypeKind.SCHAR, TypeKind.SHORT,\n TypeKind.ULONG, TypeKind.UCHAR, TypeKind.ULONGLONG, TypeKind.USHORT,\n TypeKind.VOID, TypeKind.WCHAR]\n\n# This is a list of compiler directives to remove from the type string, since binaryNinja can't handle them.\ncompiler_directives = ['__unaligned', '__attribute__((stdcall))']\n\n# Incomplete arrays have no size, so we declare an arbitrary size in order to be able to parse them.\nINCOMPLETE_ARRAY_ARBITRARY_SIZE = 0x1000\n\n# Binary Ninja cannot parse these types, so we need to change them to simply 'void'\nvoid_types = ('const void', 'const volatile void', 'volatile void', '__unaligned void', 'const __unaligned void')\n\n\ndef define_type(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'define_type: Dispatch for \"{node.type.spelling} {node.spelling}\", CursorKind: {node.kind}, type '\n f'{node.type.spelling}, TypeKind: {node.type.kind}')\n # Dispatch the correct handler for the declaration recursively.\n # It is important to check for type kind before we check for cursor kind in order\n # to detect arrays and such.\n if node.spelling:\n # For some reason libclang parses some typedefs (usually ENUM_DECL) as having no spelling, but doesn't\n # recognize them as anonymous.\n # BinaryNinja returns a type for the empty string ('') - which causes problems when trying to determine if\n # the type is already defined.\n current_type = bv.get_type_by_name(node.type.spelling)\n else:\n current_type = None\n if isinstance(current_type, bn.types.Type):\n # Check if type already defined.\n bn.log.log_debug(f'define_type: type {node.spelling} already defined, skipping re-definition.')\n var_type = current_type\n var_name = node.spelling\n return var_name, var_type\n elif check_if_base_type(node.type):\n var_type, var_name = bv.parse_type_string(f'{node.type.spelling} {node.spelling}')\n return str(var_name), var_type\n elif node.is_anonymous():\n return define_anonymous_type(node, bv)\n elif node.type.kind == TypeKind.ELABORATED:\n return define_type(node.type.get_declaration(), bv)\n elif node.type.kind == TypeKind.CONSTANTARRAY:\n return constantarray_type(node, bv)\n elif node.type.kind == TypeKind.INCOMPLETEARRAY:\n return incompletearray_type(node, bv)\n elif node.type.kind == TypeKind.FUNCTIONPROTO:\n return functionproto_type(node, bv)\n elif node.type.kind == TypeKind.POINTER:\n return pointer_type(node, bv)\n elif node.kind == CursorKind.TYPEDEF_DECL:\n if node.type.kind == TypeKind.TYPEDEF:\n if node.underlying_typedef_type.kind == TypeKind.FUNCTIONPROTO:\n return function_decl(node, bv)\n elif node.underlying_typedef_type.kind == TypeKind.POINTER:\n return pointer_type(node, bv)\n return typedef_decl(node, bv)\n elif node.kind == CursorKind.PARM_DECL:\n if node.type.kind == TypeKind.TYPEDEF:\n return typedef_decl(node, bv)\n else:\n bn.log.log_debug(f'define_type: Unhandled case - node.kind {node.kind}, node.type.kind {node.type.kind}')\n elif node.kind == CursorKind.VAR_DECL:\n return var_decl(node, bv)\n elif node.kind == CursorKind.FUNCTION_DECL:\n return function_decl(node, bv)\n elif node.kind == CursorKind.ENUM_DECL:\n return enum_decl(node, bv)\n elif node.kind == CursorKind.STRUCT_DECL:\n return struct_decl(node, bv)\n elif node.kind == CursorKind.FIELD_DECL:\n return field_decl(node, bv)\n elif node.kind == CursorKind.UNION_DECL:\n return struct_decl(node, bv)\n else:\n bn.log.log_info(f'no handler for cursorKind {node.kind}')\n\n\ndef pointer_type(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'pointer_type: {node.type.spelling} {node.spelling}, \\n'\n f'node.type.kind: {node.type.kind} \\n')\n if node.type.kind == TypeKind.TYPEDEF:\n pointee_type = node.underlying_typedef_type.get_pointee()\n elif node.type.kind == TypeKind.POINTER:\n pointee_type = node.type.get_pointee()\n else:\n bn.log.log_debug(f'pointer_type: Unhandled node type: {node.type.kind}')\n return\n\n if check_if_base_type(pointee_type):\n pointee_type_spelling = pointee_type.spelling\n if pointee_type_spelling in void_types:\n # BinaryNinja can't parse the expression 'const void'.\n pointee_type_spelling = 'void'\n # If its a base type then no need to define pointee type.\n bn.log.log_debug(f'pointer_type: Parsing type string: {pointee_type_spelling}')\n bn_pointee_type, name = bv.parse_type_string(pointee_type_spelling)\n pointer = bn.Type.pointer(bv.arch, bn_pointee_type)\n else:\n pointee_node = pointee_type.get_declaration()\n if pointee_node.kind == CursorKind.NO_DECL_FOUND:\n # Some types of TypeKind.TYPEDEF have no declaration node because they the type is just a pointer.\n # example: typedef EXCEPTION_ROUTINE *PEXCEPTION_ROUTINE;\n bn.log.log_debug(f'pointer_type: No declaration found for: {pointee_type.spelling} \\n'\n f' pointee_type.kind: {pointee_type.kind}')\n if pointee_type.kind == TypeKind.FUNCTIONPROTO:\n # A special case happens when a type is a typedef for a function pointer - the function might be\n # an anonymous function that was not previously defined, so we must define it first (can't just parse\n # the string with parse_type_string().\n # Example: typedef void\n # (__stdcall *PIMAGE_TLS_CALLBACK) (\n # PVOID DllHandle,\n # DWORD Reason,\n # PVOID Reserved\n # );\n bn_pointee_name, bn_pointee_type = function_decl(node, bv)\n pointer = bn.Type.pointer(bv.arch, bn_pointee_type)\n elif pointee_type.kind == TypeKind.FUNCTIONNOPROTO:\n # FUNCTIONNOPROTO means there are no arguments, only a possible return type\n pointee_result_type = pointee_type.get_result()\n if check_if_base_type(pointee_result_type):\n # Result is a base type, thus no declaration node.\n # Example: long ()\n pointee_result_string = pointee_result_type.spelling\n if pointee_result_string in void_types:\n pointee_result_string = 'void'\n bn_result_type, bn_result_name = bv.parse_type_string(pointee_result_string)\n else:\n result_type = pointee_type.get_result().get_declaration()\n bn_result_name, bn_result_type = define_type(result_type, bv)\n pointer = bn.Type.pointer(bv.arch, bn.Type.function(bn_result_type, []))\n elif pointee_type.kind == TypeKind.POINTER:\n # we are dealing with a pointer to a pointer\n if check_if_base_type(pointee_type.get_pointee()):\n type_string = pointee_type.get_pointee().spelling\n if type_string in void_types:\n type_string = 'void'\n bn_pointee_type, bn_pointee_name = bv.parse_type_string(type_string)\n elif pointee_type.get_pointee().kind == TypeKind.POINTER:\n # We have multiple nested pointers.\n # Example: int ****a;\n # The problem here is that if the pointee type is also a pointer, then it has no declaration node,\n # so we can't call pointer_type() on it directly.\n nested_pointer_count = 1\n current_pointer_type = pointee_type.get_pointee()\n while current_pointer_type.kind == TypeKind.POINTER:\n nested_pointer_count += 1\n current_pointer_type = current_pointer_type.get_pointee()\n if check_if_base_type(current_pointer_type):\n bn_pointee_type, bn_pointee_name = bv.parse_type_string(current_pointer_type.spelling)\n else:\n bn_pointee_name, bn_pointee_type = define_type(current_pointer_type, bv)\n temp_bn_pointer_type = bn.Type.pointer(bv.arch, bn_pointee_type)\n for nesting_level in range(nested_pointer_count):\n temp_bn_pointer_type = bn.Type.pointer(bv.arch, temp_bn_pointer_type)\n bn_pointee_type = bn.Type.pointer(bv.arch, temp_bn_pointer_type)\n elif pointee_type.get_pointee().get_declaration().kind == CursorKind.NO_DECL_FOUND:\n # For some reason there is no declaration of the pointee.\n # Manually parse the type and hope it was previously defined.\n # TODO: Find a way to handle a case where the type was not already defined.\n print(f'pointee_type.get_pointee().get_named_type().kind: {pointee_type.get_pointee().get_named_type().kind}')\n # The reason I am parsing the pointee_type and not pointee_type.get_pointee() is that in some\n # cases the pointer is pointing to a function prototype that has no declaration, and it is much\n # easier to just parse the pointer to a known type then parse the underlying type.\n bn_pointee_type, bn_pointee_name = bv.parse_type_string(pointee_type.spelling)\n else:\n bn_pointee_name, bn_pointee_type = define_type(pointee_type.get_pointee().get_declaration(), bv)\n pointer = bn.Type.pointer(bv.arch, bn_pointee_type)\n else:\n bn_pointee_type, bn_pointee_name = bv.parse_type_string(node.underlying_typedef_type.spelling)\n pointer = bn.Type.pointer(bv.arch, bn_pointee_type)\n else:\n bn_pointee_type = bv.get_type_by_name(pointee_node.spelling)\n if bn_pointee_type is None:\n # need to define the pointee type before declaring the pointer\n bn_pointee_name, bn_pointee_type = define_type(pointee_node, bv)\n pointer = bn.Type.pointer(bv.arch, bn_pointee_type)\n else:\n # type already defined in the binaryView.\n pointer = bn.Type.pointer(bv.arch, bn_pointee_type)\n\n bv.define_user_type(node.spelling, pointer)\n bn.log.log_debug(f'pointer_type: Successfully defined : {node.spelling}')\n return node.spelling, pointer\n\n\ndef functionproto_type(node: Cursor, bv: bn.BinaryView):\n # A libclang node with a TypeKind FUNCTIONPROTO is exactly the same as a libclang node with a CursorKind FUNCTION\n if node.kind == CursorKind.TYPEDEF_DECL or node.kind == CursorKind.PARM_DECL or not node.is_definition():\n bn.log.log_debug(f'functionproto_type: Processing {node.spelling}')\n return function_decl(node, bv)\n else:\n # If the CursorKind is not TYPEDEF_DECL or PARM_DECL but it is a definition - it means the header file contains\n # the actual implementation of the function - we do not want to parse such functions.\n bn.log.log_debug(f'functionproto_type: {node.spelling} contains full function implementation. skipping.')\n pass\n\n\ndef constantarray_type(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'constantarray_type: {node.type.spelling} {node.spelling} \\n'\n f' node.kind: {node.kind}, node.type.kind: {node.type.kind}')\n element_type = node.type.get_array_element_type()\n bn.log.log_debug(f'constantarray_type: element_type: {element_type.spelling} \\n'\n f' element_type.kind: {element_type.kind}')\n\n array = None\n element_type_node = None\n\n bn_element_type = bv.get_type_by_name(element_type.spelling)\n if bn_element_type:\n # element type is already defined in the binaryView\n array = bn.Type.array(bn_element_type, node.type.get_array_size())\n bn.log.log_debug(f'constantarray_type: {element_type.spelling} already defined in the binaryView.')\n elif node.type.get_array_element_type().get_declaration().is_anonymous():\n # Anonymous struct\\union\\enum as the array member type\n element_type_node = node.type.get_array_element_type().get_declaration()\n anonymous_name, bn_anonymous_type = define_anonymous_type(element_type_node, bv)\n array = bn.Type.array(bn_anonymous_type, node.type.get_array_size())\n bn.log.log_debug(f'constantarray_type: Successfully proccessed anonymous type: {bn_anonymous_type} .')\n else:\n if check_if_base_type(element_type):\n # If its a base type then it wont apear in bv.get_type_by_name() but it is still defined.\n var_type, name = bv.parse_type_string(element_type.spelling)\n array = bn.Type.array(var_type, node.type.get_array_size())\n else:\n # Not a libclang base type, need to define it normally in the binaryView.\n if node.type.get_array_element_type().kind == TypeKind.POINTER:\n # The element is a pointer, so it won't have a declaration.\n # Get the declaration of the pointed type and create a binaryNinja pointer object as the type.\n if check_if_base_type(node.type.get_array_element_type().get_pointee()):\n # The pointed type is a base type, parse it directly.\n bn_element_type, bn_element_name = bv.parse_type_string(\n node.type.get_array_element_type().get_pointee().spelling\n )\n pointer = bn.Type.pointer(bv.arch, bn_element_type)\n array = bn.Type.array(pointer, node.type.get_array_size())\n else:\n element_type_node = node.type.get_array_element_type().get_pointee().get_declaration()\n elif node.type.get_array_element_type().kind == TypeKind.CONSTANTARRAY:\n # The element type is another constant array, meaning we are dealing with a matrix.\n # Example: int a[3][4][5]\n if check_if_base_type(node.type.get_array_element_type().get_array_element_type()):\n # The underlying matrix type is a base type, parse it directly.\n bn_element_type, bn_element_name = bv.parse_type_string(\n node.type.get_array_element_type().get_array_element_type().spelling\n )\n temp_array = bn.Type.array(bn_element_type, node.type.get_array_element_type().get_array_size())\n array = bn.Type.array(temp_array, node.type.get_array_size())\n else:\n element_type_node = node.type.get_array_element_type().get_array_element_type().get_declaration()\n else:\n element_type_node = node.type.get_array_element_type().get_declaration()\n\n if not array:\n # If array is defined at this point it means we have an array of pointers or a matrix, in which case\n # it was already handled and defined above.\n bn_element_name, bn_element_type = define_type(element_type_node, bv)\n array = bn.Type.array(bn_element_type, node.type.get_array_size())\n bv.define_user_type(node.spelling, array)\n bn.log.log_debug(f'constantarray_type: Successfully defined: {node.type.spelling} {node.spelling}')\n return node.spelling, array\n\n\ndef incompletearray_type(node: Cursor, bv: bn.BinaryView):\n # TODO: There is no good way to parse an incomplete array into binaryNinja since we do not know its size.\n # For now, convert an incomplete array to a complete array with a very big size since it will probably be defined\n # on the heap anyway.\n bn.log.log_debug(f'incompletearray_type: Processing {node.type.spelling} {node.spelling}, \\n'\n f'node.kind: {node.kind}, node.type.kind: {node.type.kind}')\n bn_array_element_type = node.type.get_array_element_type()\n if check_if_base_type(bn_array_element_type):\n var_type, var_name = bv.parse_type_string(bn_array_element_type.spelling)\n elif bn_array_element_type.kind == TypeKind.POINTER:\n # The array element type is a pointer - it does not have a declaration node so we cannot directly call\n # define_type().\n # Example: int *a[]\n if check_if_base_type(bn_array_element_type.get_pointee()):\n pointee_type_string = bn_array_element_type.get_pointee().spelling\n if pointee_type_string in void_types:\n pointee_type_string = 'void'\n pointee_var_type, pointee_var_name = bv.parse_type_string(pointee_type_string)\n else:\n pointee_var_name, pointee_var_type = define_type(bn_array_element_type.get_pointee().get_declaration(), bv)\n var_type = bn.Type.pointer(bv.arch, pointee_var_type)\n else:\n var_name, var_type = define_type(bn_array_element_type.get_declaration(), bv)\n array = bn.Type.array(var_type, INCOMPLETE_ARRAY_ARBITRARY_SIZE)\n\n return node.spelling, array\n\n\ndef check_if_base_type(type: Type):\n # In libclang, a base type is a type that has no declaration since it is a baes\n # type of the c language.\n # Examples of base types in libclang: Typekind.UCHAR, Typekind.INT etc\n if type.kind in base_types:\n bn.log.log_debug(f'check_if_base_type: {type.spelling} is a base type.')\n return True\n else:\n return False\n\n\ndef typedef_decl(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'typedef_decl: {node.underlying_typedef_type.spelling} {node.spelling}, \\n'\n f'underlying_typedef_type: {node.underlying_typedef_type.kind}')\n if node.spelling and bv.get_type_by_name(node.spelling):\n bn.log.log_debug(f'typedef_decl: Type already defined')\n return node.spelling, bv.get_type_by_name(node.spelling)\n elif not node.underlying_typedef_type.spelling:\n try:\n var_type, name = bv.parse_type_string(f'{node.type.spelling} {node.spelling}')\n except Exception as e:\n bn.log.log_debug(f'typedef_decl: Failed to parse {node.type.spelling} {node.spelling}, with exception {e}')\n else:\n\n # Sanitize the type - remove any compiler directives such as __aligned and such.\n underlying_typedef_type_string = remove_compiler_directives(node.underlying_typedef_type.spelling)\n try:\n var_type, name = bv.parse_type_string(f'{underlying_typedef_type_string}')\n # The reason we are not using the name inside the parsed string is that sometimes you get a typedef\n # like 'int [1] td', and if you parse it like that it's a binaryNinja exception.\n # instead we parse 'int [1]' and attach the name of the typedef to it afterwards.\n name = node.spelling\n bn.log.log_debug(f'typedef_decl: Successfully parsed {underlying_typedef_type_string} {node.spelling}')\n except SyntaxError as se:\n if 'syntax error' in str(se):\n if node.spelling.endswith('_t'):\n # Some variables names are internal to binaryNinja and cannot be used. These var names usually\n # end with _t, for example size_t \\ ptrdiff_t etc.\n # In order to not clash with the internal vars, change the _t to _T.\n altered_spelling = node.spelling[:-1] + 'T'\n var_type, name = bv.parse_type_string(f'{underlying_typedef_type_string} {altered_spelling}')\n elif 'is not defined' in str(se):\n var_type, name = bv.define_user_type(underlying_typedef_type_string)\n else:\n bn.log.log_debug(f'typedef_decl: Failed to parse {node.underlying_typedef_type.spelling} '\n f'{node.spelling}')\n\n try:\n bv.define_user_type(name, var_type)\n bn.log.log_debug(f'typedef_decl: Successfully processed {node.underlying_typedef_type.spelling} '\n f'{node.spelling}')\n return str(name), var_type\n except Exception as e:\n bn.log.log_debug(f'typedef_decl: Failed Processing {node.underlying_typedef_type.spelling} '\n f'{node.spelling} with exception {e}')\n\n\ndef remove_compiler_directives(type_str: str):\n sanitized_str = ''\n for str_token in type_str.split():\n if str_token in compiler_directives:\n continue\n sanitized_str += str_token + ' '\n return sanitized_str\n\n\ndef var_decl(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'var_decl: Processing var {node.underlying_typedef_type.spelling} {node.spelling}')\n var_type, name = bv.parse_type_string(f'{node.type.spelling} {node.spelling}')\n\n try:\n bv.define_user_type(name, var_type)\n bn.log.log_debug(f'var_decl: Successfully processed var {node.underlying_typedef_type.spelling} '\n f'{node.spelling}')\n return str(name), var_type\n except Exception as e:\n bn.log.log_debug(f'var_decl: Failed Processing var {node.underlying_typedef_type.spelling} {node.spelling} '\n f'with exception {e}')\n\n\ndef function_decl(node: Cursor, bv: bn.BinaryView):\n func_params: List = list()\n variable_arguments = False\n function_calling_convention: bn.CallingConvention = bv.platform.default_calling_convention\n\n bn.log.log_debug(f'function_decl: Processing function {node.spelling} \\n'\n f' node.kind: {node.kind}, node.type.kind: {node.type.kind}')\n\n if node.kind == CursorKind.TYPEDEF_DECL:\n if node.type.kind == TypeKind.TYPEDEF:\n if node.underlying_typedef_type.kind == TypeKind.POINTER:\n # A special case in which we have a typedef for a function pointer to an anonymous function, so the\n # underlying type is a POINTER and not the actual function declaration. because it is an anonymous\n # function defined within a typedef there is no declaration node for it, only a type node.\n # Example: typedef void\n # (__stdcall *PIMAGE_TLS_CALLBACK) (\n # PVOID DllHandle,\n # DWORD Reason,\n # PVOID Reserved\n # );\n arg_types = node.underlying_typedef_type.get_pointee().argument_types()\n node_result_type = node.underlying_typedef_type.get_pointee().get_result()\n variable_arguments = node.underlying_typedef_type.get_pointee().is_function_variadic()\n else:\n arg_types = node.underlying_typedef_type.argument_types()\n node_result_type = node.underlying_typedef_type.get_result()\n variable_arguments = node.underlying_typedef_type.is_function_variadic()\n else:\n arg_types = node.type.argument_types()\n node_result_type = node.type.get_result()\n variable_arguments = node.type.is_function_variadic()\n # This is a libclang function prototype - need to use node.argument_types() to get all types.\n for param_type in arg_types:\n bn.log.log_debug(f'function_decl: Processing parameter type - {param_type.spelling} \\n'\n f' param_type.kind: {param_type.kind}')\n if param_type.kind == TypeKind.INCOMPLETEARRAY:\n # An incomplete array cannot be parsed by binary ninja, need to manually create it.\n # This type usually has no declaration node, so cannot call define_type() on it.\n # Example: int a[]\n if check_if_base_type(param_type.get_array_element_type()):\n arr_var_type, var_name = bv.parse_type_string(param_type.get_array_element_type().spelling)\n elif param_type.get_array_element_type().kind == TypeKind.POINTER:\n # Example: int *a[]\n # The pointer type has no declaration node so can't call define_type() directly.\n pointee_name, pointee_type = define_type(\n param_type.get_array_element_type().get_pointee().get_declaration(), bv\n )\n arr_var_type = bn.Type.pointer(bv.arch, pointee_type)\n # TODO: Need to figure out a way to get the name of a parameter of this type.\n var_name = ''\n else:\n var_name, arr_var_type = define_type(param_type.get_array_element_type().get_declaration(), bv)\n var_type = bn.Type.array(arr_var_type, INCOMPLETE_ARRAY_ARBITRARY_SIZE)\n else:\n var_type, var_name = bv.parse_type_string(f'{remove_compiler_directives(param_type.spelling)}')\n p = bn.FunctionParameter(var_type, str(var_name))\n func_params.append(p)\n elif node.type.kind == TypeKind.POINTER:\n # If we got here, it means the pointee type is a FUNCTIONPROTO but has no declaration (if it had a declaration\n # then node arguemnt would be the declaration node itself and not a pointer.\n # Example: typedef struct _NCB {\n # UCHAR ncb_command;\n # void (__stdcall *ncb_post)( struct _NCB * );\n # } NCB, *PNCB;\n # ncb_post is a pointer to a FUNCTIONPROTO that has no Cursor node, only a type node.\n arg_types = node.type.get_pointee().argument_types()\n node_result_type = node.type.get_pointee().get_result()\n variable_arguments = node.type.get_pointee().is_function_variadic()\n for param_type in arg_types:\n bn.log.log_debug(f'function_decl: Processing pointee parameter type - {param_type.spelling} \\n'\n f' param_type.kind: {param_type.kind}')\n param_type_string = remove_compiler_directives(param_type.spelling)\n if param_type.kind == TypeKind.INCOMPLETEARRAY:\n # Special case where we have an incomplete array without a declaration node, so we can't use\n # define_type().\n # Example: const PROPSPEC []\n # Since we know the base type of the array is already defined, all we need to do is modify the string\n # slightly so that binaryNinja can parse it (binja parser doesn't accept an incomplete array, it must\n # have an array size.\n # TODO: Find a more elegant way to insert an array size to the string.\n param_type_string = param_type_string.replace('[]', f'[{INCOMPLETE_ARRAY_ARBITRARY_SIZE}]')\n var_type, var_name = bv.parse_type_string(param_type_string)\n p = bn.FunctionParameter(var_type, str(var_name))\n func_params.append(p)\n else:\n node_result_type = node.type.get_result()\n if node.type.kind == TypeKind.FUNCTIONNOPROTO:\n # FUNCTIONNOPROTO means there are no arguments, only a possible return type\n pass\n else:\n variable_arguments = node.type.is_function_variadic()\n for param in node.get_arguments():\n bn.log.log_debug(f'function_decl: Processing parameter - {param.type.spelling} {param.spelling} \\n'\n f' param.kind: {param.kind}, param.type.kind: {param.type.kind}')\n var_name, var_type = define_type(param, bv)\n p = bn.FunctionParameter(var_type, str(var_name))\n func_params.append(p)\n bn.log.log_debug(f'function_decl: Successfully Processed parameter - {param.type.spelling} '\n f'{param.spelling}')\n\n func_return_val_type, ret_name = bv.parse_type_string(remove_compiler_directives(node_result_type.spelling))\n\n # No direct way to get the calling convention specified in the source code, need to iterate tokens and find it\n for token in node.get_tokens():\n if token.kind == TokenKind.KEYWORD:\n if token.spelling == '__cdecl':\n function_calling_convention: bn.CallingConvention = bv.platform.cdecl_calling_convention\n elif token.spelling == '__fastcall':\n function_calling_convention: bn.CallingConvention = bv.platform.fastcall_calling_convention\n elif token.spelling == '__stdcall':\n function_calling_convention: bn.CallingConvention = bv.platform.stdcall_calling_convention\n\n function_type = bn.Type.function(func_return_val_type,\n func_params,\n calling_convention=function_calling_convention,\n variable_arguments=variable_arguments\n )\n\n try:\n bv.define_user_type(node.spelling, function_type)\n bn.log.log_debug(f'function_decl: Successfully processed function {node.spelling}')\n return node.spelling, function_type\n except Exception as e:\n bn.log.log_debug(f'function_decl: Failed Processing function {node.spelling} with exception {e}')\n\n\ndef enum_decl(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'enum_decl: Processing enum {node.type.spelling} {node.spelling}')\n\n enum = bn.Enumeration()\n for enum_member in node.get_children():\n enum.append(enum_member.spelling, enum_member.enum_value)\n\n try:\n if node.spelling:\n enum_name = node.spelling\n else:\n enum_name = node.type.spelling\n bv.define_user_type(enum_name, bn.Type.enumeration_type(bv.arch, enum))\n bn.log.log_debug(f'enum_decl: Successfully processed enum {node.spelling}')\n return node.spelling, bn.Type.enumeration_type(bv.arch, enum)\n except Exception as e:\n bn.log.log_debug(f'enum_decl: Failed Processing enum {node.spelling} with exception {e}')\n\n\ndef struct_decl(node: Cursor, bv: bn.BinaryView):\n struct = bn.Structure()\n struct.width = node.type.get_size()\n struct.alignment = node.type.get_align()\n if node.spelling:\n struct_name = node.spelling\n else:\n # A struct can be defined anonymously and assigned via a typedef, which means the struct_decl node itself\n # will have no spelling.\n # example: typedef struct {\n # DWORD Version;\n # GUID Guid;\n # SYSTEM_POWER_CONDITION PowerCondition;\n # DWORD DataLength;\n # BYTE Data[1];\n # } SET_POWER_SETTING_VALUE, *PSET_POWER_SETTING_VALUE;\n struct_name = node.type.spelling\n\n bn.log.log_debug(f'struct_decl: Processing struct {node.spelling}')\n\n # In order to avoid recursion problems with structs, always define the struct name as a binaryNinja forward decl\n bv.define_user_type(struct_name, bn.Type.structure_type(bn.Structure()))\n\n # check if struct is a forward declaration within the source code - if it is not a definition, then it is a forward\n # decl, and no fields should be defined at this point.\n if node.is_definition():\n for field in node.type.get_fields():\n bn.log.log_debug(f'struct_decl: Processing struct field {field.spelling}')\n\n if is_recursive_field(field, bv):\n forward_decl_struct = bn.Structure()\n forward_decl_struct_name = field.type.get_pointee().get_declaration().spelling\n bv.define_user_type(forward_decl_struct_name, bn.Type.structure_type(forward_decl_struct))\n t = bv.get_type_by_name(forward_decl_struct_name)\n struct.append(t, forward_decl_struct_name)\n else:\n var_type = bv.get_type_by_name(field.spelling)\n if not var_type:\n # Need to define the field type\n var_name, var_type = define_type(field.get_definition(), bv)\n struct.append(var_type, field.spelling)\n bn.log.log_debug(f'struct_decl: Successfully processed struct field {field.spelling}')\n\n try:\n if node.kind == CursorKind.UNION_DECL:\n # set type to union\n struct.type = bn.StructureType.UnionStructureType\n\n bv.define_user_type(struct_name, bn.Type.structure_type(struct))\n bn.log.log_debug(f'struct_decl: Successfully processed struct {struct_name}')\n return struct_name, bn.Type.structure_type(struct)\n except Exception as e:\n bn.log.log_debug(f'struct_decl: Failed Processing struct {struct_name} with exception {e}')\n\n\ndef define_anonymous_type(node: Cursor, bv: bn.BinaryView) -> bn.Type:\n # An anonymous type must be either a Struct\\UNION\\ENUM.\n # In order to simplify working with binaryNinja, an anonymized type is de-anonymized:\n # The name of the anonymous type is a hash of its location in the source file prepended by 'anon_'\n bn.log.log_debug(f'define_anonymous_type: Processing {node.type.spelling}')\n\n struct = bn.Structure()\n struct.width = node.type.get_size()\n struct.alignment = node.type.get_align()\n struct_name = 'anon_' + xxhash.xxh64_hexdigest(node.type.spelling)\n\n for field in node.type.get_fields():\n bn_field_type = bv.get_type_by_name(field.spelling)\n field_name = field.spelling\n if not bn_field_type:\n # Need to define the field type\n # if field.is_anonymous():\n # field_name, bn_field_type = define_anonymous_type(field, bv)\n # else:\n field_name, bn_field_type = define_type(field.get_definition(), bv)\n bn.log.log_debug(f'define_anonymous_type: Appending field - {bn_field_type} {field_name}')\n struct.append(bn_field_type, field_name)\n\n # Check if the underlying struct is a union\n if node.type.kind == TypeKind.ELABORATED:\n if node.type.get_named_type().get_declaration().kind == CursorKind.UNION_DECL:\n # set type to union\n struct.type = bn.StructureType.UnionStructureType\n\n return struct_name, bn.Type.structure_type(struct)\n\n\ndef is_recursive_field(field: Cursor, bv: bn.BinaryView):\n # Check if a struct field is recursive.\n # If the field is a pointer to a type whos' spelling is the same as the fields' semantic parents' type spelling,\n # then this is a recursive field.\n bn.log.log_debug(f'is_recursive_field: Processing field {field.type.spelling} {field.spelling} \\n'\n f'field.type.kind: {field.type.kind}, field.kind: {field.kind}')\n\n field_type_declaration_node = None\n if field.type.kind == TypeKind.POINTER:\n pointee_type = field.type.get_pointee()\n if pointee_type.spelling == field.semantic_parent.type.spelling:\n return True\n return False\n\n\ndef field_decl(node: Cursor, bv: bn.BinaryView):\n bn.log.log_debug(f'field_decl: Processing {node.type.spelling} {node.spelling}'\n f' node.type.kind: {node.type.kind}, node.kind: {node.kind}')\n try:\n if not is_recursive_field(node, bv):\n if check_if_base_type(node.type):\n field_type, field_name = bv.parse_type_string(f'{node.type.spelling} {node.spelling}')\n else:\n field_name, field_type = define_type(node, bv)\n return str(field_name), field_type\n else:\n bn.log.log_debug(f'field_decl: Unhandled recursive field {node.type.spelling} {node.spelling}')\n except Exception as e:\n bn.log.log_debug(f'field_decl: Failed Processing field {node.type.spelling} {node.spelling}')\n","sub_path":"ast_handlers.py","file_name":"ast_handlers.py","file_ext":"py","file_size_in_byte":37137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570127579","text":"from algorithms import *\nfrom color import *\nimport time\nimport sys\n\n\ndef TimeTest(strFirst, strSecond, countOperations):\n print(\"Длина первой строки = \", len(strFirst))\n print(\"Длина второй строки = \", len(strSecond))\n\n t1 = time.process_time()\n for _ in range(countOperations):\n Levenshtein(strFirst, strSecond)\n t2 = time.process_time()\n print(\"Левенштейн = \", t2 - t1)\n\n t1 = time.process_time()\n for _ in range(countOperations):\n LevenshteinRecursion(strFirst, strSecond)\n t2 = time.process_time()\n print(\"Левенштейн (рекурсия)= \", t2 - t1)\n\n t1 = time.process_time()\n for _ in range(countOperations):\n DamerauLevenshtein(strFirst, strSecond)\n t2 = time.process_time()\n print(\"Дамерау-Левенштейн = \", t2 - t1)\n\n t1 = time.process_time()\n for _ in range(countOperations):\n DamerauLevenshteinRecursion(strFirst, strSecond)\n t2 = time.process_time()\n print(\"Дамерау-Левенштейн (рекурсия)= \", t2 - t1)\n\n\n# sys.setrecursionlimit(1500)\n","sub_path":"lab_01/time_test.py","file_name":"time_test.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515124842","text":"class Solution(object):\r\n def flipAndInvertImage(self, A):\r\n \"\"\"\r\n :type A: List[List[int]]\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n res = list()\r\n for a in A:\r\n a.reverse()\r\n res.append((1 - i) for i in a)\r\n return res","sub_path":"LeetCode-Python/0832.翻转图像/0832-翻转图像.py","file_name":"0832-翻转图像.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65272698","text":"import csv\nimport itertools\nimport os\nimport re\n\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\n\nimport scipy\n\nimport munch\n\nimport spartan\n\n\n\ndef recode_villages(df):\n map_func = lambda x: village_id_map[x.upper()]\n \n new_codes = df.Village.apply(map_func)\n df.Village = new_codes\n\n##########################################\n\ndef recode_dead(df):\n def recode_func(x):\n # this is treated as an unknown case\n if pd.isnull(x):\n return x\n\n x = unicode(x)\n\n # True means DEAD\n # False means LIVE or NOT-DEAD\n # None means unknown\n\n try:\n # deal with Live type cases\n if x.upper().startswith('L'):\n return False\n\n\n if x.startswith('0'):\n return False\n\n\n # deal with Dead type cases\n if x.upper().startswith('D'):\n return True\n\n\n if x.startswith('1'):\n return True\n\n\n # deal with unknown type cases\n if x.upper().startswith('UN'):\n return None\n except AttributeError:\n return x\n\n msg = \"The value {x} was not expected and this function must be corrected to continue.\".format(x=x)\n raise ValueError(msg)\n\n new_dead = df.Dead.apply(recode_func)\n df.Dead = new_dead\n\n##########################################\n\ndef recode_teneral(df):\n def recode_func(x):\n\n # this is treated as an unknown case\n if pd.isnull(x):\n return x\n\n x = unicode(x)\n\n # True means teneral\n # False means NOT-teneral\n # None means unknown\n\n try:\n # deal with NOT-teneral type cases\n if x.upper().startswith('N'):\n return False\n\n if x.startswith('0'):\n return False\n\n # deal with Teneral type cases\n if x.upper().startswith('T'):\n return True\n \n if x.startswith('1'):\n return True\n\n\n # Deal with unknown type cases\n if x.upper().startswith('UN'):\n return x\n except AttributeError:\n return x\n\n msg = \"The value {x} was not expected and this function must be corrected to continue.\".format(x=x)\n raise ValueError(msg)\n \n \n new_teneral = df.Teneral.apply(recode_func)\n df.Teneral = new_teneral\n\n##########################################\n\ndef recode_positives(df):\n def recode_func(x):\n # this is treated as an unknown case\n if pd.isnull(x):\n return x\n\n y = unicode(x)\n\n # deal with Unknown type cases\n if y.upper().startswith('UN'):\n return None\n\n if y.upper().startswith('DEAD'):\n return None\n\n\n # deal with Positive type cases\n if y.startswith('1'):\n return True\n\n\n if y.upper().startswith('TRUE'):\n return True\n\n if y.upper().startswith('P'):\n return True\n\n if y.upper().startswith('Y'):\n return True\n\n\n # deal with Negative type cases\n if y.upper().startswith('NO'):\n return False\n\n if y.upper().startswith('FALSE'):\n return False\n\n\n if y.startswith('0'):\n return False\n\n\n msg = \"The value {x} was not expected and this function must be corrected to continue.\".format(x=x)\n raise ValueError(msg)\n\n\n new_prob = df.prob.apply(recode_func)\n df.prob = new_prob\n \n new_midgut = df.midgut.apply(recode_func)\n df.midgut = new_midgut\n \n new_sal_gland = df.sal_gland.apply(recode_func)\n df.sal_gland = new_sal_gland\n\n##########################################\n\ndef recode_species(df):\n\n recode_func = lambda x: ''.join(x.split('.')).capitalize()\n\n new_Species = df.Species.apply(recode_func)\n df.Species = new_Species\n\n##########################################\n\ndef recode_sex(df):\n\n recode_func = lambda x: x.upper()\n\n new_Sex = df.Sex.apply(recode_func)\n df.Sex = new_Sex\n \n##########################################\n\ndate_delim = re.compile('[\\./-]')\n\ndef cast_unicode_as_date(x):\n if not isinstance(x, unicode):\n return x\n \n parts = date_delim.split(x)\n \n if len(parts) != 3:\n return x\n \n if len(parts[0]) != 4:\n return x\n \n return dt.datetime(int(parts[0]), int(parts[1]), int(parts[2]))\n\ndef recode_date(df):\n new_date = df.Date.apply(cast_unicode_as_date)\n df.Date = new_date\n\n##########################################\n\nfly_no_delim = re.compile('[\\W\\s]', re.UNICODE)\n\ndef split_number(x):\n# ipdb.set_trace()\n \n # to prevent unicode creating a string with a '.' AFTER\n # the numbert we are intersted in!\n try:\n if isinstance(x,float):\n return int(x)\n except ValueError as exc:\n if 'NAN' in exc.message.upper():\n return x\n \n x = unicode(x)\n parts = fly_no_delim.split(x)\n \n try:\n number = int(parts[-1])\n return number\n except ValueError:\n return x\n\n\ndef recode_fly_number(df):\n \n new_fly_number = df.Fly_Number.apply(split_number)\n df.Fly_Number = new_fly_number","sub_path":"src/gs_ddRAD2015/utils/recode.py","file_name":"recode.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452074052","text":"\nimport utils.factors as f\n\n\ndef euler50(x):\n primes = [2]\n\n sequences = {}\n sequences[0] = {0:2}\n\n i = 1\n for n in range(3, x+1):\n if f.is_prime(n):\n primes.append(n)\n sequences[i] = {}\n\n for j in sequences.keys():\n for k in sequences[j].keys():\n sequences[i][k] = sequences[j][k] + n\n\n i += 1\n\n return primes, sequences\n\n\nif __name__ == '__main__':\n import sys\n\n if len(sys.argv) < 2:\n euler50(1000000)\n else:\n x = int(sys.argv[1])\n euler50(x)\n","sub_path":"python/euler50.py","file_name":"euler50.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550662851","text":"\n# QT imports\nfrom PyQt4 import Qt, QtCore, QtGui\nfrom ..config import Config\nfrom ..rest import Rest\nfrom ._combobox import combobox\nfrom ..interface import ui\nfrom ..decorators import check_start_button\n\n\nclass WidthCombobox(combobox):\n\t\"\"\"\n\tWidth setting for a DL Project\n\t\"\"\"\n\t\n\tdef __init__(self):\n\t\tself.valid = False\n\t\tself.widget = ui.width_combobox\n\t\tself.case_sense = QtCore.Qt.CaseSensitive\n\t\tself.width = None\n\t\t# validity\n\t\tself.valid = False\n\t\t# error message\n\t\tself.error_msg = None\n\t\t# build the widget\n\t\tself.build_widget(1,editable=True)\n\t\t# add validator\n\t\tself.widget.lineEdit().setValidator(\n\t\t\tQtGui.QIntValidator(24,8192,self.widget))\n\t\t# override the keyPressEvent\n\t\tui.width_combobox.keyPressEvent = self.keyPressEvent\n\t\tself.populate()\n\t\tself.connect_signals()\n\t\n\tdef connect_signals(self):\n\t\tself.widget.activated.connect(self.on_width_combobox_changed)\n\n\t@QtCore.pyqtSlot()\n\tdef on_width_combobox_changed(self,*args):\n\t\t#print \"\\x1b[38;5;91mon_width_combobox_changed\\x1b[m:\",self,args\n\t\tself.validate()\n\n\t@check_start_button\n\tdef validate(self):\n\t\t# get the current value of the widget\n\t\tcurrent_text = self.widget.currentText()\n\t\tif str(current_text) == '':\n\t\t\tvalue = 0\n\t\telif not str(current_text).isdigit():\n\t\t\treturn\n\t\telse:\n\t\t\tvalue = int(current_text)\n\t\t\tself.width = value\n\t\tif value < 24:\n\t\t\tself.valid = False\n\t\t\tself.stylize_entry_widget(highlight=True)\n\t\t\tself.error_msg = 'Minimum width is 24'\n\t\t\treturn False\n\t\telif value > 8192:\n\t\t\tself.valid = False\n\t\t\tself.stylize_entry_widget(highlight=True)\n\t\t\tself.error_msg = 'Maximum width is 8192'\n\t\t\treturn False\n\t\telse:\n\t\t\tself.valid = True\n\t\t\tself.stylize_entry_widget(highlight=False)\n\t\t\tself.error_msg = None\n\t\t\treturn True\n\t\n\tdef keyPressEvent(self,event):\n\t\t# return / enter / escape should clear the focus\n\t\tif event.key() == QtCore.Qt.Key_Return or \\\n\t\t event.key() == QtCore.Qt.Key_Enter or \\\n\t\t event.key() == QtCore.Qt.Key_Escape:\n\t\t\tui.width_combobox.clearFocus()\n\t\t\treturn\n\t\t# simulate a tab order between width and height\n\t\tif event.key() == QtCore.Qt.Key_Tab:\n\t\t\t# focus the height widget\n\t\t\tui.height_combobox.setFocus()\n\t\t# pass the keypress to the original widget\n\t\tQtGui.QComboBox.keyPressEvent(ui.width_combobox,event)\n\t\t# emit the activated signal which will trigger\n\t\t# the validation in the main settings object\n\t\tui.width_combobox.activated.emit(0)\n\n\n\tdef build_table_data(self):\n\t\t#self.table_data = Rest.get_resolutions()\n\t\tresolutions = Rest.get_resolutions()\n\t\tself.table_data = set([obj.width for obj in resolutions])\n\n\tdef populate(self):\n\t\t# get the current widths\n\t\tself.build_table_data()\n\t\tfor width in self.table_data:\n\t\t\tself.source_model.insertRow(0)\n\t\t\tself.source_model.setData(self.source_model.index(0,0), width)\n\t\t\t\n\t\t# default sorting\n\t\tself.sort()\n\n\tdef set_width(self,width=None,default=True):\n\t\t\"\"\"\n\t\tSet the width combobox to the given value.\n\t\t\"\"\"\n\t\tif not width and default:\n\t\t\twidth = str(Config.DEFAULT_WIDTH)\n\t\tif not width:\n\t\t\treturn\n\t\tself.widget.lineEdit().setText(str(width))\n\t\t# validate the new value\n\t\tself.validate()\n\t\t# emit the changed signals\n\t\tself.widget.activated.emit(0)\n\n\tdef sort(self):\n\t\tself._sort(0)\n\n\tdef get_current_selection(self):\n\t\t\"\"\"\n\t\tGet the current framerate selection from the combobox\n\t\t\"\"\"\n\t\treturn self._get_current_selection(0)\n\n\n","sub_path":"widgets/width_combobox.py","file_name":"width_combobox.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"132352555","text":"# Bot will get replies to the comment and determine asshole or not\n\n\nimport config\nimport praw\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nreddit = praw.Reddit(client_id=config.client_id\n , client_secret=config.client_secret\n , user_agent='testscript by /u/fakebot3'\n , username=config.reddit_username\n , password=config.reddit_password)\n\ncomments = []\nfor submission in reddit.subreddit('askreddit').hot(limit=10):\n print(submission.title)\n sub = reddit.submission(id=submission)\n submission.comments.replace_more(limit=None)\n submission.comment_sort = 'top'\n post_time = submission.created_utc\n\n for top_level_comment in submission.comments:\n sec_since_post = top_level_comment.created_utc - post_time\n min_since_post = round(sec_since_post / 60)\n if min_since_post <= 5:\n band = '01 - 5 mins'\n elif min_since_post <= 15:\n band = '02 - 15 mins'\n elif min_since_post <= 30:\n band = '03 -30 mins'\n elif min_since_post <= 45:\n band = '04 - 45 mins'\n elif min_since_post <= 60:\n band = '05 - 60 mins'\n elif min_since_post <= 90:\n band = '06 - 90 mins'\n elif min_since_post <= 90:\n band = '07 - 120 mins'\n elif min_since_post <= 90:\n band = '08 - 180 mins'\n else:\n continue\n details = (top_level_comment.id, band, top_level_comment.score)\n comments.append(details)\n\ndf = pd.DataFrame.from_records(comments, columns=['id', 'band', 'score'])\n\ndf2 = df.groupby('band').median()\ndf2.sort_index(inplace=True)\n\noutput = df2.plot(figsize=(10, 4), legend=False)\nplt.savefig('test.png')\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"askreddit_scraper.py","file_name":"askreddit_scraper.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249246729","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Notebook 1 - Prime Numbers\n# \n# A Jupyter Notebook to find the 9991st to 10000th prime numbers.\n\n# In[2]:\n\n\ndef is_prime(number):\n if number == 0 or number == 1: \n return False\n \n for x in range(2, (number // 2)):\n if number % x == 0:\n return False\n \n return True\n\n\n# In[3]:\n\n\nnumber = 0\nprime_counter = 0\nprimes_to_print = []\nwhile prime_counter < 10000:\n number += 1\n if is_prime(number):\n prime_counter += 1\n if prime_counter <= 9990:\n continue\n '''\n The 'continue' statement is used here because we do not wish\n to print the current prime number because it is prior to the\n 9991st prime number. \n '''\n else:\n primes_to_print.append(number)\n \nprint(primes_to_print)\n\nf = open(\"prime.txt\", \"w\")\nf.write(str(primes_to_print))\nf.close()\n\n","sub_path":"Module3_Notebook1.py","file_name":"Module3_Notebook1.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437236262","text":"from .pages.main_page import MainPage\nimport pytest\n\nurl = \"http://ati.su/\"\n\n@pytest.mark.ui\n@pytest.mark.parametrize(\n 'title_page', [\"АТИ – биржа грузоперевозок. Грузы, транспорт, тендеры.\"])\ndef test_open(browser, title_page):\n page = MainPage(browser, url)\n page.open_browser()\n assert page.title() == title_page, \"Title not equal\"\n\n\n@pytest.mark.ui\n@pytest.mark.parametrize('correct_distance', [\"724\"])\ndef test_distance_result(browser, correct_distance):\n page = MainPage(browser, url)\n page.inpt_from(\"Санкт-Петербург\")\n page.inpt_to(\"Москва\")\n page.click_calc_button()\n distance = page.distance()\n assert distance == correct_distance, \\\n f\"Wrong distance: {distance}, must be {correct_distance}\"","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444736633","text":"import sys\n\nimport os\nimport web\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\ncurrnt_path = os.path.dirname(os.path.abspath(__file__))\nrender = web.template.render('templates/')\n\nurls = (\n '/', 'index',\n '/crash_stack', 'crash_stack'\n)\n\nimport time\n\nDATE_FORMAT = \"%Y/%m/%d %H:%M:%S\"\n\nclass index:\n def GET(self):\n return render.index()\n\n def POST(self):\n\n client_request_log = \"client_request.log\"\n with open(client_request_log, \"a\") as f:\n f.write(time.strftime(DATE_FORMAT) + \" recv create crash log request from: \" + web.ctx.ip + \"\\n\")\n\n input_data = web.input()\n\n so_build_date = input_data.build_date\n crash_log = input_data.crash_log\n\n crash_log_file = \"crash.log\"\n with open(crash_log_file, \"w\") as f:\n f.write(crash_log)\n\n symbol_path = (\"/var/www/xtrunk/symbol/\" + so_build_date + \"/\")\n session_so_path = (symbol_path + \"libsessionvc.so\")\n so_version = os.popen(\"strings %s | grep mmpc_version \" % (session_so_path)).read()\n\n crash_stack = os.popen(\n \"cat '%s' | ndk-stack -sym '%s' | head -n 30\" % (crash_log_file, symbol_path)).read()\n if crash_stack == \"\":\n return \"\\nServer %s \\n\\nNo crash found!\" % (so_version)\n\n if \"com.yunos.tv.videochat\" not in crash_stack and \"com.yunos.videochat.phone\" not in crash_stack:\n return \"\\nServer %s \\n\\nThe crash log is not from videochat!\" % (so_version)\n\n # find bug hander\n\n bug_hanlder = self.getBugHandler(crash_stack)\n if bug_hanlder == \"\":\n return \"\\nServer %s \\n\\n%s \\n\\nbug hanlder not found, please contact xiaocheng\" % (\n so_version, crash_stack)\n\n return \"\\nServer %s \\n\\n%s \\n\\nplease contact the bug handler: %s\" % (\n so_version, crash_stack, bug_hanlder)\n\n # \treturn render.crash_stack(\"\\nServer %s \\n\\n %s\" % (so_version, crash_stack))\n\n def getBugHandler(self, crash_stack):\n\n crash_stack_content = \"\"\n for line in crash_stack:\n if \"videochat\" not in line:\n crash_stack_content += line\n\n video_keyword = [\"openh264\", \"video\"]\n ado_keyword = [\"ado\"]\n audio_keyword = [\"opensles\", \"audio\", \"decoder\"]\n session_keyword = [\"session\", \"jxcore\"]\n transport_keyword = [\"transport\"]\n\n bug_hanlder = \"\"\n if any(keyword in crash_stack_content for keyword in video_keyword):\n bug_hanlder = \"tiantu\"\n if any(keyword in crash_stack_content for keyword in audio_keyword):\n bug_hanlder = \"mafeng\"\n if any(keyword in crash_stack_content for keyword in session_keyword):\n bug_hanlder = \"xiaocheng\"\n if any(keyword in crash_stack_content for keyword in ado_keyword):\n bug_hanlder = \"xizhu\"\n if any(keyword in crash_stack_content for keyword in transport_keyword):\n bug_hanlder = \"mishao\"\n\n return bug_hanlder\n\nif __name__ == \"__main__\":\n app = web.application(urls, globals())\n app.run()\n","sub_path":"webpy/create_mmpc_stack/mmpccs_main.py","file_name":"mmpccs_main.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309883128","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .api import TaskViewSet, BidViewSet, BidderAPI,OnGoingAPI,UpdateTask,ChatAPI # RegisterTaskAPI\n\nrouter = routers.DefaultRouter()\nrouter.register('api/tasks', TaskViewSet, 'tasks')\nrouter.register('api/registar/bid', BidViewSet, 'bids')\nrouter.register('api/bidder/bids', BidderAPI, 'bids')\nrouter.register('api/ongoing',OnGoingAPI,'ongoing')\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('api/atualizar/task', UpdateTask.as_view()),\n path('api/auth/chat', ChatAPI.as_view()),\n\n # path('api/tasks/register', RegisterTaskAPI.as_view(),name='reg_task')\n]\n\n# urlpatterns = [\n#\tpath('api/tasks', TaskViewSet.as_view()),\n#\tpath('api/tasks/register', TaskViewSet.as_view()),\n\n\n# ]\n","sub_path":"Desenvolvimento/plataforma/Plataforma/tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192462250","text":"import sys\nsys.path.append(\"../gen_data/\")\nsys.path.append(\"../evaluation/\")\nsys.path.append(\"../\")\nfrom data_rum_cmn import Data_rum_cmn\nfrom evaluation import leave_one_out\nfrom mylog import Logger\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport random\nfrom sklearn.neighbors import KDTree\nimport matplotlib.pyplot as plt\nimport os\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\n\nclass ELMN1():\n def __init__(self):\n\n self.logdir = 'result/rum_cmn_query_hop'\n self.model_note = 'rum_cmn'\n\n self.global_dimension = 50 #\n self.top_k_memory_number = 50 #\n self.epoches = 100\n self.neg_number = 1\n self.test_neg_number = 100\n self.learning_rate = 0.001\n self.neg_number = 1\n\n self.filename = '../data/citeulike-a.npz'\n self.pretrain_path = '../pretrain/citeulike-a_e50.npz'\n self.data = Data_rum_cmn()\n self.data.get_data(self.filename)\n self.batch_size = 32\n\n def record(self):\n logger = Logger('root', self.logdir)\n self.logger = logger.getlog()\n self.logger.info('Model_Note: ' + self.model_note)\n parameter = {\n \"filename\": self.filename,\n \"pretrain\": self.pretrain_path,\n \"item_count\": self.data.item_number,\n \"user_count\": self.data.user_number,\n \"train_number\": len(self.data.train_item),\n \"batch_size\": self.batch_size,\n \"embed_size\": self.global_dimension,\n \"learning_rate\": self.learning_rate,\n \"neg_count\": self.neg_number,\n \"top_k_memory_number\": self.top_k_memory_number\n }\n write_par = ''\n for k, v in parameter.items():\n write_par += '\\n' + k + ' : ' + str(v)\n self.logger.info('\\n' + write_par + '\\n')\n\n\n def build_model(self):\n\n #initialize embedding matrix\n self.initializer =tf.truncated_normal_initializer(stddev=0.01)#tf.random_uniform_initializer(minval=0, maxval=0.1) # tf.truncated_normal_initializer(stddev=0.1)#\n self.user_embedding_matrix = tf.get_variable('user_embedding_matrix', initializer=self.initializer,\n shape=[self.data.user_number, self.global_dimension])\n self.item_embedding_matrix = tf.get_variable('item_embedding_matrix', initializer=self.initializer,\n shape=[self.data.item_number, self.global_dimension])\n\n self.neighbor_user_embedding_matrix = tf.get_variable('neighbor_user_embedding_matrix', initializer=self.initializer,\n shape=[self.data.user_number, self.global_dimension])\n\n self.neighbor_item_embedding_matrix = tf.get_variable('neighbor_item_embedding_matrix',\n initializer=self.initializer,\n shape=[self.data.item_number, self.global_dimension])\n\n #input\n self.user_id = tf.placeholder(tf.int32, shape=[None], name='user_id')\n self.item_id = tf.placeholder(tf.int32, shape=[None], name='item_id')\n self.label = tf.placeholder(tf.float32, shape=[None], name='label')\n\n self.item_user_id = tf.placeholder(tf.int32, shape=[None,None], name='item_user_id')\n self.user_item_id = tf.placeholder(tf.int32, shape=[None,None], name='user_item_id')\n self.item_user_len=tf.placeholder(tf.float32, shape=[None,None], name='item_user_len')\n self.user_item_len = tf.placeholder(tf.float32, shape=[None,None], name='user_item_len')\n\n\n #get current embedding\n self.user_embedding = tf.nn.embedding_lookup(self.user_embedding_matrix, self.user_id)\n self.item_embedding = tf.nn.embedding_lookup(self.item_embedding_matrix, self.item_id)\n self.item_user_embedding = tf.nn.embedding_lookup(self.user_embedding_matrix, self.item_user_id )\n self.user_item_embedding = tf.nn.embedding_lookup(self.item_embedding_matrix, self.user_item_id )\n self.neighbor_user_embedding = tf.nn.embedding_lookup(self.neighbor_user_embedding_matrix, self.item_user_id)\n self.neighbor_item_embedding = tf.nn.embedding_lookup(self.neighbor_item_embedding_matrix, self.user_item_id)\n\n\n\n self.query = tf.add(self.user_embedding, self.item_embedding) #self.item_embedding\n\n self.user_item_w = tf.reduce_sum(tf.multiply(tf.expand_dims(self.query, 1), self.user_item_embedding),2)\n self.user_item_weight = tf.nn.softmax(tf.multiply(self.user_item_w, self.user_item_len))\n # self.user_item_weight = tf.multiply(tf.nn.softmax(self.user_item_w), self.user_item_len)\n self.output_user_item_embedding = tf.reduce_sum(tf.multiply(self.neighbor_item_embedding, tf.expand_dims(self.user_item_weight, 2)), 1)\n\n # self.query = tf.add(self.user_embedding, self.item_embedding) #self.user_embedding\n self.item_user_w = tf.reduce_sum(tf.multiply(tf.expand_dims(self.query, 1), self.item_user_embedding), 2)\n # self.item_user_weight = tf.multiply(tf.nn.softmax(self.item_user_w), self.item_user_len)\n self.item_user_weight = tf.nn.softmax(tf.multiply(self.item_user_w, self.item_user_len))\n # self.neighbor_user_embedding = tf.nn.embedding_lookup(self.user_embedding_matrix, self.item_user_id)\n self.output_item_user_embedding = tf.reduce_sum(tf.multiply(self.neighbor_user_embedding, tf.expand_dims(self.item_user_weight, 2)), 1)\n\n initializer2 = tf.contrib.layers.variance_scaling_initializer(factor=2.0,\n mode='FAN_IN',\n uniform=False)\n self.U_user_hop = tf.get_variable('U_user_hop', initializer=initializer2, shape=[self.global_dimension, self.global_dimension])\n self.W_user_hop = tf.get_variable('W_user_hop', initializer=initializer2,shape=[self.global_dimension, self.global_dimension])\n self.b_user_hop = tf.get_variable('b_user_hop', initializer=tf.constant_initializer(1.0), shape=[1, self.global_dimension])\n self.query_hop = tf.nn.relu(tf.add(tf.add(tf.add(tf.matmul(self.query, self.U_user_hop), self.output_item_user_embedding),self.output_user_item_embedding), self.b_user_hop))\n\n\n self.item_user_w_hop = tf.reduce_sum(tf.multiply(tf.expand_dims(self.query_hop, 1), self.item_user_embedding), 2)\n # self.item_user_weight_hop = tf.multiply(tf.nn.softmax(self.item_user_w_hop), self.item_user_len)\n self.item_user_weight_hop = tf.nn.softmax(tf.multiply(self.item_user_w_hop, self.item_user_len))\n self.output_item_user_embedding_hop = tf.reduce_sum(tf.multiply(self.neighbor_user_embedding, tf.expand_dims(self.item_user_weight_hop, 2)), 1)\n\n\n self.user_item_w_hop = tf.reduce_sum(tf.multiply(tf.expand_dims(self.query_hop, 1), self.user_item_embedding),2)\n self.user_item_weight_hop = tf.nn.softmax(tf.multiply(self.user_item_w_hop, self.user_item_len))\n self.output_user_item_embedding_hop = tf.reduce_sum(\n tf.multiply(self.neighbor_item_embedding, tf.expand_dims(self.user_item_weight_hop, 2)), 1)\n\n # MF\n # self.final_user_embedding = self.user_embedding + 0.2 * self.output_user_item_embedding\n # self.final_item_embedding = self.item_embedding + 0.2 * self.output_item_user_embedding\n # self.y = tf.reduce_sum(tf.multiply(self.final_user_embedding, self.final_item_embedding), 1)\n\n\n # rum+cmn\n initializer = tf.contrib.layers.xavier_initializer()#tf.random_uniform_initializer(minval=-2, maxval=2) #\n self.U = tf.get_variable('U', initializer=initializer, shape=[self.global_dimension, self.global_dimension])\n self.W_user = tf.get_variable('W_user', initializer=initializer,shape=[self.global_dimension, self.global_dimension])\n self.W_item = tf.get_variable('W_item', initializer=initializer ,shape=[self.global_dimension, self.global_dimension])\n self.V = tf.get_variable('V', initializer=initializer, shape=[self.global_dimension, 1])\n self.b = tf.get_variable('b', initializer=tf.constant_initializer(1.0), shape=[1, self.global_dimension])\n\n self.MF = tf.matmul(tf.multiply(self.user_embedding, self.item_embedding), self.U)\n self.cf_user = tf.matmul(self.output_item_user_embedding_hop, self.W_user)\n self.cf_item = tf.matmul(self.output_user_item_embedding_hop, self.W_item)\n self.result = tf.add(tf.add(self.MF, tf.add(self.cf_user, self.cf_item)), self.b)\n self.y = tf.reduce_sum(tf.matmul(tf.nn.relu(self.result), self.V),1)\n\n # self.final_user_embedding = self.user_embedding + 0.2 * self.output_user_item_embedding_hop\n # self.final_item_embedding = self.item_embedding + 0.2 * self.output_item_user_embedding_hop\n # self.y = tf.reduce_sum(tf.multiply(self.final_user_embedding, self.final_item_embedding), 1)\n\n # self.y=0.5*(self.cmn_y+self.rum_y)\n\n\n #calculate loss\n self.loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.label, logits=self.y))\n self.regular_loss = tf.add(0.0 * tf.nn.l2_loss(self.user_embedding), 0.0 * tf.nn.l2_loss(self.item_embedding))\n self.all_loss = tf.add(self.regular_loss, self.loss)\n # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.all_loss)\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate,momentum=0.9, decay=0.9,).minimize(self.all_loss)\n\n\n test_top_value, self.test_top_index = tf.nn.top_k(self.y, k=(self.test_neg_number+1), sorted=True)\n\n def run_model(self):\n self.record()\n print('train number:', len(self.data.train_item), ' user_number:', self.data.user_number, ' item_number:', self.data.item_number)\n with tf.Session() as self.sess:\n self.build_model()\n self.sess.run(tf.global_variables_initializer())\n #\n if(self.pretrain_path):\n pretrain = np.load(self.pretrain_path)\n self.sess.run([\n self.user_embedding_matrix.assign(pretrain['user'] * 0.5),\n self.item_embedding_matrix.assign(pretrain['item'] * 0.5)])\n\n for epoch in range(self.epoches):\n\n batch_loss = []\n\n progress = tqdm(enumerate(self.data.gen_batch_train_data(self.neg_number, self.batch_size)), dynamic_ncols=True,\n total=(len(self.data.true_user) * (self.neg_number+1)) // self.batch_size)\n for k,e in progress:\n batch, item_user, item_user_l, user_item, user_item_l=e\n self.batch_user, self.batch_item, self.batch_label=batch[:,0],batch[:,1],batch[:,2]\n feed={self.user_id:self.batch_user,\n self.item_id:self.batch_item,\n self.label:self.batch_label,\n self.user_item_id:user_item,\n self.user_item_len:user_item_l,\n self.item_user_id:item_user,\n self.item_user_len:item_user_l}\n self.sess.run(self.optimizer,feed_dict=feed)\n\n # print('----------------')\n # print(self.sess.run(self.user_itme_embedding,feed_dict=feed))\n # print(self.sess.run(self.output_user_item_embedding, feed_dict=feed))\n # print(self.sess.run(self.item_embedding, feed_dict=feed))\n batch_loss.append(self.sess.run(self.all_loss,feed_dict=feed))\n progress.set_description(u\"[{}] Loss: {:,.6f} ----- \".format(epoch, batch_loss[-1]))\n\n self.logger.info('epoch:{} , loss:{:.6f}'.format(epoch, np.mean(batch_loss)))\n\n all_HR5 = []\n all_NDCG5 = []\n all_HR10 = []\n all_NDCG10 = []\n\n progress_test = tqdm(enumerate(self.data.gen_batch_test_data(self.test_neg_number, self.top_k_memory_number)),\n dynamic_ncols=True, total=self.data.user_number)\n for k, e in progress_test:\n purchased_item,batch, item_user, item_user_l, user_item, user_item_l=e\n self.batch_user, self.batch_item = batch[:, 0], batch[:, 1]\n feed={self.user_id:self.batch_user,\n self.item_id:self.batch_item,\n self.label:self.batch_label,\n self.user_item_id:user_item,\n self.user_item_len:user_item_l,\n self.item_user_id:item_user,\n self.item_user_len:item_user_l}\n recommend_list=self.sess.run(self.test_top_index,feed_dict=feed)\n recommend_list=list(np.array(self.batch_item)[recommend_list])\n\n HR5, NDCG5 = leave_one_out(purchased_item, recommend_list, 5)\n HR10, NDCG10 = leave_one_out(purchased_item, recommend_list, 10)\n\n all_HR5.append(HR5)\n all_NDCG5.append(NDCG5)\n all_HR10.append(HR10)\n all_NDCG10.append(NDCG10)\n progress_test.set_description(u\"Evaluation: [{}] HR: {} ----- \".format(epoch, all_HR5[-1]))\n\n self.logger.info(\"\\nHR@5= {:.6f} NDCG@5= {:.6f}\\n\"\n \"HR@10= {:.6f} NDCG@10= {:.6f}\\n\".format(np.mean(all_HR5), np.mean(all_NDCG5),\n np.mean(all_HR10), np.mean(all_NDCG10)))\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2,3\"\n elmn=ELMN1()\n elmn.run_model()\n\n","sub_path":"rum/rum_cmn_query_hop.py","file_name":"rum_cmn_query_hop.py","file_ext":"py","file_size_in_byte":13958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501741853","text":"import pickle\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom core.utils.parser import get_feat_parser\n\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC,LinearSVC,LinearSVR\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom xgboost import XGBClassifier\n# from EvoDAG.model import EvoDAG,EvoDAGE\n# from EvoMSA.base import EvoMSA\nparser = get_feat_parser()\noptions = parser.parse_args()\n\nif options.features is None:\n raise IOError(\"Enter features!\")\n\ndict = pickle.load(open(options.features, \"rb\"))\n\n\nfeats=[]\nhumor = []\nfor key in dict.keys():\n value = dict[key]\n feats.append(value[0].tolist())\n humor.append(value[1].tolist())\nfeats = np.array(feats)\nhumor = np.array(humor)\n\n\nif options.clf == 'GaussianProc':\n clf = GaussianProcessClassifier()\nelif options.clf == \"SVC\":\n clf = SVC()\nelif options.clf == \"LinearSVC\":\n clf = LinearSVC(max_iter=10000,dual=False)\nelif options.clf == \"DecisionTree\":\n clf = DecisionTreeClassifier()\nelif options.clf == \"RandomForest\":\n clf = RandomForestClassifier()\nelif options.clf == \"AdaBoost\":\n clf = AdaBoostClassifier(n_estimators=100)\nelif options.clf == \"XGBoost\":\n clf = XGBClassifier()\nelif options.clf == \"KNN\":\n clf = KNeighborsClassifier(n_neighbors=5)\nelif options.clf == \"GaussianNB\":\n clf = GaussianNB()\nelif options.clf == \"RBF\":\n kernel = 1.0 * RBF(1.0)\n clf = GaussianProcessClassifier(kernel=kernel, random_state=0)\nelif options.clf == \"EvoDAGE\":\n clf = EvoDAGE(n_estimators=30, n_jobs=4)\nelif options.clf == \"EvoDAG\":\n clf = EvoDAG()\n# elif options.clf == \"EvoMSA\":\n# clf = EvoMSa(Emo=True, lang='es')\nelse:\n raise IOError(\"Please select a valid clf!\")\n\n# perform kfold cross-validation with k=5\nkf = KFold(n_splits=5)\n\nf1 = []\nacc = []\nfor train_index, test_index in kf.split(humor):\n X_train,X_test = feats[train_index], feats[test_index]\n y_train,y_test = humor[train_index],humor[test_index]\n clf.fit(X_train, y_train)\n pred = clf.predict(X_test)\n f1.append(f1_score(y_test, pred))\n acc.append(accuracy_score(y_test, pred))\n\nprint(\"F1-score: \",np.mean(f1))\nprint(\"Accuracy score: \",np.mean(acc))\n\nif options.clf == 'GaussianProc':\n clf = GaussianProcessClassifier()\nelif options.clf == \"SVC\":\n clf = SVC()\nelif options.clf == \"LinearSVC\":\n clf = LinearSVC(max_iter=10000,dual=False)\nelif options.clf == \"DecisionTree\":\n clf = DecisionTreeClassifier()\nelif options.clf == \"RandomForest\":\n clf = RandomForestClassifier()\nelif options.clf == \"AdaBoost\":\n clf = AdaBoostClassifier()\nelif options.clf == \"XGBoost\":\n clf = XGBClassifier()\nelif options.clf == \"KNN\":\n clf = KNeighborsClassifier(n_neighbors=5)\nelif options.clf == \"GaussianNB\":\n clf = GaussianNB()\nelif options.clf == \"RBF\":\n kernel = 1.0 * RBF(1.0)\n clf = GaussianProcessClassifier(kernel=kernel, random_state=0)\nelif options.clf == \"EvoDAGE\":\n clf = EvoDAGE(n_estimators=30, n_jobs=4)\nelif options.clf == \"EvoDAG\":\n clf = EvoDAG()\n# elif options.clf == \"EvoMSA\":\n# clf = EvoMSa(Emo=True, lang='es')\nelse:\n raise IOError(\"Please select a valid clf!\")\n\nclf.fit(feats,humor)\nif not os.path.exists(options.ckpt):\n os.makedirs(options.ckpt)\npickle.dump(clf, open(os.path.join(options.ckpt,\"{}.pth\".format(options.clf)),\n \"wb\"))\n","sub_path":"experiments/task71/features_extraction/train_clf.py","file_name":"train_clf.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571482899","text":"#from classes import *\nfrom main_page import *\nfrom company_class import *\n\n\ndef add_company(root):\n AdditionCompanyScreen(root).pack(side=\"top\", fill=\"both\", expand=True)\n root.mainloop()\n\n\nclass AdditionCompanyScreen(Frame):\n def __init__(self, root):\n\n Frame.__init__(self, root)\n self.canvas = Canvas(root, borderwidth=0, background=\"#FFCCBC\")\n self.frame = Frame(self.canvas, background=\"#FFCCBC\")\n self.vsb = Scrollbar(root, orient=\"vertical\", command=self.canvas.yview)\n self.canvas.configure(yscrollcommand=self.vsb.set)\n self.names = ['name', 'founder' ,'country','industry', 'company_size', 'website']\n\n self.vsb.pack(side=\"right\", fill=\"y\")\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\n self.canvas.create_window((4, 4), window=self.frame, anchor=\"nw\",\n tags=\"self.frame\")\n\n self.frame.bind(\"\", self.onFrameConfigure)\n\n self.populate()\n self.entries(root)\n\n def populate(self):\n '''Put in some fake data'''\n for row in range(len(self.names)):\n Label(self.frame, text=self.names[row], width=12, borderwidth=\"1\",\n relief=\"solid\").grid(row=row * 2, column=0)\n Label(self.frame, text='', background='#FFCCBC').grid(row=row * 2 + 1, column=0)\n\n\n def onFrameConfigure(self, event):\n '''Reset the scroll region to encompass the inner frame'''\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))\n\n\n def destroy_example(self, root):\n\n self.canvas.destroy()\n self.frame.destroy()\n\n self.vsb.destroy()\n self.destroy()\n main_page(root)\n\n def entries(self, root):\n\n def six_entries(self, strVar, i):\n counter = 0\n for x in range(2):\n for y in range(1, 4, 1):\n entr(self, strVar[counter], x+i, y)\n counter = counter + 1\n\n def entr(self, strVar, i, j = 1):\n entry = Entry(self.frame, textvariable=strVar, width=22, bd=2)\n entry.grid(row=i, column=j)\n print(i)\n\n but2 = Button(self.frame, text=\"Back\", command=lambda: self.destroy_example(root), width=18) # self.quit)#\n but2.grid(row=0, column=3)\n idif = 2\n #'name', 'founder' ,'country','industry', 'company_size', 'company_size'\n name = StringVar()\n founder = StringVar()\n country = StringVar()\n industry = StringVar()\n company_size = StringVar()\n website = StringVar()\n strVars = [name, founder, country, industry, company_size,website]\n for i in range(len(strVars)):\n entr(self, strVars[i], i * 2)\n\n def Save():\n #name, founder, country, industry, company_size, website = None,\n p = Company(name.get(), founder.get(), country.get(), industry.get(), company_size.get(), website.get())\n print(p)\n companies.update({str(name.get()): p})\n #dict.update({str(name.get()): p})\n print(companies)\n\n\n i = len(strVars)*2\n Label(self.frame, text='', background='#FFCCBC').grid(row=i, column=0)\n but1 = Button(self.frame, text=\"Save\", command=Save, width=18)\n but1.grid(row=i+1, column=2)\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title(\"GUI на Python\")\n root.geometry(\"600x520\")\n add_company(root)\n","sub_path":"new_company.py","file_name":"new_company.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334778842","text":"import logging\nlogging.basicConfig(level=logging.INFO)\n\nimport asyncio, os, json, time\nfrom datetime import datetime\n\nfrom aiohttp import web\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom config import configs\n\nimport orm\nfrom coroweb import add_routes\n\n# 这个函数的作用就是当有 http 请求的时候,通过 logging.info 输出请求的信息,其中包括请求的方法和路径\n@asyncio.coroutine\ndef logger_factory(app, handler):\n @asyncio.coroutine\n def logger(request):\n logging.info('Request: %s %s' % (request.method, request.path))\n # yield from asyncio.sleep(0.3)\n # handler 为处理函数,request 为参数\n return (yield from handler(request))\n\n return logger\n\n\n@asyncio.coroutine\ndef data_factory(app, handler):\n @asyncio.coroutine\n def parse_data(request):\n if request.method == 'POST':\n if request.content_type.startswith('application/json'):\n request.__data__ = yield from request.json()\n logging.info('request json: %s' % str(request.__data__))\n elif request.content_type.startswith(\n 'application/x-www-form-urlencoded'):\n request.__data__ = yield from request.post()\n logging.info('request form: %s' % str(request.__data__))\n return (yield from handler(request))\n\n return parse_data\n\n# 请求对象 request 的处理工序流水线先后依次是:\n# logger_factory->response_factory->RequestHandler().__call__->get 或 post->handler\n# 对应的响应对象 response 的处理工序流水线先后依次是:\n# 由 handler 构造出要返回的具体对象\n# 然后在这个返回的对象上加上'__method__'和'__route__'属性,以标识别这个对象并使接下来的程序容易处理\n# RequestHandler 目的就是从请求对象 request 的请求 content 中获取必要的参数,调用 URL 处理函数, 然后把结果返回给 response_factory\n# response_factory 在拿到经过处理后的对象,经过一系列类型判断,构造出正确 web.Response 对象,以正确的方式返回给\n@asyncio.coroutine\ndef response_factory(app, handler):\n @asyncio.coroutine\n def response(request):\n logging.info('Response handler...')\n r = yield from handler(request)\n if isinstance(r, web.StreamResponse):\n return r\n if isinstance(r, bytes):\n resp = web.Response(body=r)\n resp.content_type = 'application/octet-stream'\n return resp\n if isinstance(r, str):\n if r.startswith('redirect:'):\n return web.HTTPFound(r[9:])\n resp = web.Response(body=r.encode('utf-8'))\n resp.content_type = 'text/html;charset=utf-8'\n return resp\n if isinstance(r, dict):\n template = r.get('__template__')\n if template is None:\n resp = web.Response(\n body=json.dumps(\n r, ensure_ascii=False, default=lambda o: o.__dict__)\n .encode('utf-8'))\n resp.content_type = 'application/json;charset=utf-8'\n return resp\n else:\n r['__user__'] = request.__user__\n resp = web.Response(body=app['__templating__'].get_template(\n template).render(**r).encode('utf-8'))\n resp.content_type = 'text/html;charset=utf-8'\n return resp\n if isinstance(r, int) and t >= 100 and t < 600:\n return web.Response(t)\n if isinstance(r, tuple) and len(r) == 2:\n t, m = r\n if isinstance(t, int) and t >= 100 and t < 600:\n return web.Response(t, str(m))\n # default:\n resp = web.Response(body=str(r).encode('utf-8'))\n resp.content_type = 'text/plain;charset=utf-8'\n return resp\n\n return response\n\n\n@asyncio.coroutine\ndef init(loop):\n yield from orm.create_pool(loop=loop, **configs.db)\n # 这是装饰模式的体现,logger_factory, response_factory 都是 URL 处理函数前(如 handler.index)的装饰功能\n app = web.Application(\n loop=loop, middlewares=[logger_factory, response_factory])\n add_routes(app, 'handlers')\n srv = yield from loop.create_server(app.make_handler(), '0.0.0.0', 9000)\n logging.info('server started at http://0.0.0.0:9000...')\n return srv\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()","sub_path":"www/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425001890","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: nico\n\nScript: train_MobileNetV2.py\nDescripción: Entrena una red CNN partiendo del modelo MobileNetV2 preentrenado\n para clasificar imágenes en las que se muestre un peatón de aquellas\n en las que sólo aparezca fondo.\nUso: train_MobileNetV2.py -d [-s]\n\nEl documento JSON con los experimentos deberá de presentar el siguiente formato:\n- epochs -> int: Número de épocas del entrenamiento\n- batch_size -> int o list de int: Tamaños de batch a considerar\n- lr -> float o lista de floats con los learning rates a considerar\n- arquitectura: Conjunto de capas que se añadirán al final del modelo\npreentrenado. Se debe de especificar como un modelo keras en formato JSON\n\nNota: Para aquellos parámetros en los que se pase una lista de valores, se\n\trealizará una experimentación para cada valor del parámetro combinándose\n\tcon cada uno de los valores del resto de parámetros.\n\nPor su parte, si se especifica el parámetro -s, todo modelo generado se\nalmacenará.\n\n\"\"\"\n\n# Librerías incluídas\nimport sys\nimport time\nimport json\nimport numpy as np\nimport argparse\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import (accuracy_score, precision_score, recall_score,\n f1_score, confusion_matrix)\nimport tensorflow as tf\nfrom keras.callbacks.callbacks import History\nfrom utils import (load_image_class_dataset, compute_HOG, train_svm, test_svm,\n check_experiment_document, extract_experiments_cnn_parameters,\n no_pederestian_img_prepro, model_transfer_MobileNetV2, plot_results)\n\nPATH_POSITIVE = '../data/pedestrians128x64/'\nPATH_NEGATIVE = '../data/pedestrians_neg/'\n\n#### Cuerpo del script ####\n\n## Leer argumentos pasados al script\nparser = argparse.ArgumentParser(description='Entrena un red CNN partiendo de'\\\n ' la red MobileNetV2 para identificar imágenes con '\\\n ' peatones e imágenes de fondo')\nparser.add_argument('-d', '--document', help='Fichero JSON con los parámetros'\\\n ' del entrenamiento a considerar', type=str)\nparser.add_argument('-s', '--save_model', help='Establecer para almacenar los'\\\n ' modelos generados durante el entrenamiento',\n action='store_true', default=False)\n\nargs = parser.parse_args()\n\nexp_filename = args.document\nstore_models = args.save_model\n\nnp.random.seed(27) # Semilla inicializadora\n\n# Leer fichero de experimentación\nwith open(exp_filename) as f:\n try:\n exp_data = json.load(f)\n except Exception as e:\n print('Se produjo un error con el fichero de experimentación'\\\n ' expecificado:\\n',str(e), file=sys.stderr)\n exit(-1)\n\n# Comprobar el formato del fichero de documentación\ntry:\n check_experiment_document(exp_data)\nexcept Exception as e:\n print('El documento de experimentación no es correcto:', file=sys.stderr)\n print(str(e), file=sys.stderr)\n exit(-1)\n\n# Determinar el nombre del fichero destino y del modelo destino\ndot_pos = exp_filename.rfind('.')\nif dot_pos != -1:\n results_filename = exp_filename[:dot_pos] + '_experimentos.json'\n model_base_filename = exp_filename[:dot_pos]\nelse:\n results_filename = exp_filename + '_experimentos.json'\n model_base_filename = exp_filename[:]\n\n## Deshabilitar GPU (por ahora)\n#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n## Ajustar el uso máximo de la GPU y la CPU\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n## Carga de imágenes\n\n# Cargar las imágenes\nprint('Cargando imágenes: ')\nX, y = load_image_class_dataset(data={1: PATH_POSITIVE, 0: PATH_NEGATIVE},\n prepro_operation={0: no_pederestian_img_prepro},\n n_samples={1: 400},\n rep_samples={0: 400//50})\nprint('Cargadas con éxito ',X.shape[0],' imágenes')\n\n# Dividir en conjunto de train y test (80% train, 20% test)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n## Extracción de todas las combinaciones de parámetros\nparams = extract_experiments_cnn_parameters(exp_data)\nresults = []\n\nfor p in params:\n print('Entrenando con parámetros: {}'.format(p))\n\n # Cargar la arquitectura y compilar\n model = model_transfer_MobileNetV2(p['arquitectura'], p['load_weights'])[0]\n\n # Preparar el registro de la evolución de las métricas\n history = History()\n\n model.compile(loss='binary_crossentropy', metrics=['accuracy'],\n optimizer='adam')\n\n\n # Entrenar modelos\n try:\n model.fit(X_train, y_train, verbose=True,\n epochs=p['epochs'],\n callbacks=[history])\n except Exception as e:\n p['resultados'] = 'Error al realizar el experimento: '+str(e)\n results.append(p)\n\n # Almacenar los datos en un JSON\n with open(results_filename, 'w') as f:\n json.dump(results, f, indent=4)\n\n continue\n\n ## Evaluar el modelo\n train_pred = model.predict(X_train).astype('int64')\n test_pred = model.predict(X_test).astype('int64')\n score_train = model.evaluate(X_train, y_train)\n score_test = model.evaluate(X_test, y_test)\n\n # Almacenar gráfica de accuracy y loss\n plot_results({'accuracy - train': history.history['accuracy']},\n 'Accuracy',\n model_base_filename + '-experimento-'+str(len(results)+1) +\n '_accuracy.pdf')\n\n plot_results({'loss - train': history.history['loss']},\n 'Loss',\n model_base_filename + '-experimento-'+str(len(results)+1) +\n '_loss.pdf')\n\n # Almacenar el modelo\n if store_models:\n model.save(model_base_filename + '-experimento-'+str(len(results)+1) +\n '_modelo.h5')\n\n conf_matrix_train = confusion_matrix(y_train, train_pred)\n conf_matrix_test = confusion_matrix(y_test, test_pred)\n\n # Almacenar toda la información de parámetros y resultados\n p['script'] = __file__\n #p['arquitectura'] = model.get_config()\n p['resultados'] = {'loss_train': float(score_train[0]),\n 'accuracy_train': float(score_train[1]),\n 'loss_test': float(score_test[0]),\n 'accuracy_test': float(score_test[1]),\n 'ratio_loss_train_val': float(score_train[0])/\n float(score_test[0]),\n 'precision_train': precision_score(y_train, train_pred),\n 'precision_test': precision_score(y_test, test_pred),\n 'recall_train': recall_score(y_train, train_pred),\n 'recall_test': recall_score(y_test, test_pred),\n 'f1_train': f1_score(y_train, train_pred),\n 'f1_test': f1_score(y_test, test_pred),\n 'confusion_matrix_train': {\n 'TP': int(conf_matrix_train[1,1]),\n 'TN': int(conf_matrix_train[0,0]),\n 'FP': int(conf_matrix_train[0,1]),\n 'FN': int(conf_matrix_train[1,0])\n },\n 'confusion_matrix_test': {\n 'TP': int(conf_matrix_test[1,1]),\n 'TN': int(conf_matrix_test[0,0]),\n 'FP': int(conf_matrix_test[0,1]),\n 'FN': int(conf_matrix_test[1,0])\n }\n }\n\n results.append(p)\n\n # Almacenar los datos en un JSON\n with open(results_filename, 'w') as f:\n json.dump(results, f, indent=4)\n","sub_path":"Vision_por_computador/Trabajo6/codigos_fuente/train_MobileNetV2.py","file_name":"train_MobileNetV2.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403035617","text":"#Present value required at start of retirement [at age 65 this should be the present value of the account]\nn = 30*12\n\ni = 0.04\n\n\nAtoP = ((1+i)**n-1)/(i*(1+i)**n)\n\nP_retire = AtoP*2000\n\nprint(2000*n)\nprint(P_retire)\n\n\n","sub_path":"ExtraCredit/2.7 Check.py","file_name":"2.7 Check.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53382867","text":"# Given a list, rotate the list to the right by k places, where k is\n# non-negative.\n#\n# For example:\n# Given 1->2->3->4->5->NULL and k = 2,\n# return 4->5->1->2->3->NULL.\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n # @param {ListNode} head\n # @param {integer} k\n # @return {ListNode}\n def rotateRight(self, head, k):\n if not head or not head.next or k == 0:\n return head\n p, size = head, 1\n while p.next:\n size += 1\n p = p.next\n p.next, k = head, k % size\n size -= 1\n while size >= k:\n size -= 1\n p = p.next\n first, p.next = p.next, None\n\n return first\n","sub_path":"061_Roatate_List.py","file_name":"061_Roatate_List.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288002415","text":"import itertools\n\ndef pancakes(order):\n order = list(order)\n order = [i[0] for i in itertools.groupby(order)]\n if order[-1] == '+':\n order.pop()\n return len(order)\n\nt = int(input())\n\nfor i in range(t):\n line = input().strip()\n print(\"Case #{}: {}\".format(i + 1, pancakes(line)))","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_Benderv_pancakes.py","file_name":"16_0_2_Benderv_pancakes.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560778826","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style(\"whitegrid\")\ncmap = sns.color_palette(\"Paired\", 4)\n\n\ndef findnth(string, substring, n):\n\n parts = string.split(substring, n+1)\n\n if len(parts) <= n+1:\n return -1\n\n return len(string)-len(parts[-1])-len(substring)\n\n\nruns = [i for i in os.listdir('../code_outputs') if i.startswith('2017_07')]\n\nfor run in runs:\n\n filename = os.path.join('../code_outputs', run, 'out.txt')\n\n elbos = []\n kls = []\n x_test = []\n test_set_elbos = []\n\n with open(filename, 'rb') as f:\n\n last_line_test = False\n\n for l in f.readlines():\n\n try:\n l = l.decode('ascii')\n except UnicodeDecodeError:\n continue\n\n if l.startswith('Iteration'):\n\n elbos.append(float(l[findnth(l, ' ', 3) + 1: findnth(l, ' ', 4)]))\n\n try:\n kls.append(-float(l[findnth(l, 'KL', 0) + 5: findnth(l, ')', 0)]))\n except:\n pass\n\n if last_line_test:\n x_test.append(len(elbos))\n\n last_line_test = False\n\n elif l.startswith('Test set ELBO') and l.strip().endswith('per data point'):\n\n test_set_elbos.append(float(l[findnth(l, ' ', 3) + 1: findnth(l, ' ', 4)]))\n\n last_line_test = True\n\n test_set_elbos = [i for i in test_set_elbos if i != 0]\n\n moving_average = 10\n\n x_test_avg = []\n test_set_avg = []\n\n for t in range(moving_average - 1, len(test_set_elbos)):\n x_test_avg.append(x_test[t])\n test_set_avg.append(np.mean(test_set_elbos[t - (moving_average - 1): t + 1]))\n\n fig, ax1 = plt.subplots()\n\n ax1.plot(elbos, label='train', c=cmap[0], zorder=2)\n ax1.plot(x_test_avg, test_set_avg, c=cmap[1], zorder=3)\n\n ax1.set_ylim(bottom=max(elbos) * 3)\n\n ax1.tick_params('y', colors=cmap[1])\n ax1.set_xlabel('Training Iteration')\n ax1.set_ylabel('L(X)', color=cmap[1])\n\n ax2 = ax1.twinx()\n ax2.plot(kls, label='kl divergence', c=cmap[2], zorder=1)\n\n try:\n ax2.set_ylim(bottom=0, top=max([kls[-1] * 3, 5]))\n except IndexError:\n pass\n\n ax2.tick_params('y', colors=cmap[3])\n ax2.set_ylabel('KL divergence', color=cmap[3])\n\n ax1.set_yticks(ax1.get_yticks())\n ax2.set_yticks(np.linspace(ax2.get_yticks()[0], ax2.get_yticks()[-1], len(ax1.get_yticks())))\n ax2.grid(None)\n\n fig.tight_layout()\n\n plt.savefig('../pics/elbos/' + run + '.png')\n\n plt.clf()\n","sub_path":"Reconstruction/WaveNetText/evaluation/plot_elbos.py","file_name":"plot_elbos.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614059697","text":"# membuka file\nbuka = open(\"d:\\hasilPenyandian\", 'r')\n#menginput nilai n\nn = int(input('nilai n : '))\n#membaca file\nbaca = buka.read()\nbaca_list = list(baca)\nisi = []\n\nfor m in baca_list :\n if (m == ' ') :\n x = ord(m)\n else :\n i = ord(m)\n x = i - n\n if (d < 65) :\n d = x + 26\n elif (90 < x and x <97) :\n x = x + 26\n kars = chr(x)\n isi.append(kars)\njoined = ','.join(isi)\n\n#menambahkan file\nbuka_open = open('latihan7.txt', 'w')\nbuka_open.write(joined)\nbuka_open.close()\n \n","sub_path":"latihan 7.py","file_name":"latihan 7.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334039892","text":"\"\"\"Overwatch Stats Class\"\"\"\n# pylint: disable=I0011, R0903\n\nclass OverwatchStats(object):\n \"\"\"Object representing overwatch stats from the ow stats api\"\"\"\n poll_id = \"\"\n team_id = \"\"\n channel_id = \"\"\n name = \"\"\n date_open = 0\n date_close = 0\n owner_user_id = \"\"\n poll_choices = []\n\n def __init__(self, poll_id, team_id, team_domain, channel_id, channel_name, \\\n name, date_open, date_close, owner_user_id, poll_choices):\n self.poll_id = poll_id\n self.team_id = team_id\n self.team_domain = team_domain\n self.channel_id = channel_id\n self.channel_name = channel_name\n self.name = name\n self.date_open = date_open\n self.date_close = date_close\n self.owner_user_id = owner_user_id\n self.poll_choices = poll_choices\n\n\n","sub_path":"app/domain/ow_stats.py","file_name":"ow_stats.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404638908","text":"# -*- utf-8 -*-\nimport sys\n# -*- coding:utf-8 -*-\nimport socket\nimport asyncio\nfrom enum import Enum\nfrom concurrent.futures import ThreadPoolExecutor\n\nclass ConnectionStatus(Enum):\n DISCONNECTED = 0\n CONNECTED = 1\n\nclass TcpClient():\n def __init__(self):\n self.host = '127.0.0.1'\n self.port = '50000'\n self._client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #オブジェクトの作成をします\n self._connected = None\n self._discooneted = None\n self.status = ConnectionStatus.DISCONNECTED\n\n def connect(self, host, port):\n \"\"\"サーバーへ接続\"\"\"\n self.host = host\n self.port = port\n ret = True\n try:\n self._client.connect((self.host, self.port)) #これでサーバーに接続します\n self.status = ConnectionStatus.CONNECTED\n if self._connected is not None:\n self._connected((self.host, self.port))\n except ConnectionRefusedError:\n ret = False\n \n return ret\n\n def close(self):\n if self.status is ConnectionStatus.CONNECTED:\n self._client.close()\n\n def sendto(self, data):\n \"\"\"パケット送信\"\"\"\n if self.status is ConnectionStatus.CONNECTED:\n self._client.send(data) \n\n def connected_event(self, func):\n \"\"\"接続イベント\"\"\"\n self._connected = func\n\n def disconnected_event(self, func):\n \"\"\"切断イベント\"\"\"\n self._discooneted = func\n\n def recv(self):\n with ThreadPoolExecutor(max_workers = 128) as tpool:\n try:\n packet = self._client.recv(4096)\n except ConnectionResetError:\n self._client.close()\n self.status = ConnectionStatus.DISCONNECTED\n if self._discooneted is not None:\n self._discooneted(addr)\n else:\n if not recv_data:\n self.status = ConnectionStatus.DISCONNECTED\n if self._discooneted is not None:\n self._discooneted(addr)\n else:\n tbool.submit(self.do_proc_packet_worker, recv_data)\n\n def do_proc_packet_worker(self, recvdata):\n \"\"\"受信処理用のテンプレート\"\"\"\n pass\n \nif __name__ == '__main__':\n import time\n import struct\n client = TcpClient()\n if client.connect('127.0.0.1', 50001) is not False: \n data = struct.pack('16s', 'Hello, World!'.encode('ascii'))\n client.sendto(data)\n client.close()\n","sub_path":"Python/mylibpy/socket/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371343978","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport time\nfrom flask import current_app\nfrom hashlib import sha256\ntry:\n from urlparse import urlparse # Python 2\nexcept ImportError:\n from urllib.parse import urlparse # Python 3\n\n\n__author__ = 'lundberg'\n\n\ndef verify_auth_token(eppn, token, nonce, timestamp, generator=sha256):\n \"\"\"\n Authenticate a user who just signed up, for user convenience.\n\n Authentication is done using a shared secret in the configuration of the\n authn and signup applications. The signup application can effectively\n log a new user in.\n\n :param eppn: the identifier of the user as string\n :param token: authentication token as string\n :param nonce: a public nonce for this authentication request as string\n :param timestamp: unixtime of signup application as hex string\n :param generator: hash function to use (default: SHA-256)\n :return: bool, True on valid authentication\n \"\"\"\n current_app.logger.debug('Trying to authenticate user {} with auth token {}'.format(eppn, token))\n shared_key = current_app.config.get('TOKEN_LOGIN_SHARED_KEY')\n\n # check timestamp to make sure it is within -300..900 seconds from now\n now = int(time.time())\n ts = int(timestamp, 16)\n if (ts < now - 300) or (ts > now + 900):\n current_app.logger.debug('Auth token timestamp {} out of bounds ({} seconds from {})'.format(\n timestamp, ts - now, now))\n return False\n\n # verify there is a long enough nonce\n if len(nonce) < 16:\n current_app.logger.warning('Auth token nonce {} too short'.format(nonce))\n return False\n\n # verify token format\n expected = generator('{0}|{1}|{2}|{3}'.format(\n shared_key, eppn, nonce, timestamp)).hexdigest()\n if len(expected) != len(token):\n current_app.logger.warning('Auth token bad length')\n return False\n\n # constant time comparision of the hash, courtesy of\n # http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/\n result = 0\n for x, y in zip(expected, token):\n result |= ord(x) ^ ord(y)\n current_app.logger.debug('Auth token match result: {}'.format(result == 0))\n return result == 0\n\n\ndef verify_relay_state(relay_state, safe_default='/'):\n \"\"\"\n :param relay_state: Next url\n :param safe_default: The default if relay state is found unsafe\n\n :type safe_default: six.string_types\n :type relay_state: six.string_types\n\n :return: Safe relay state\n :rtype: six.string_types\n \"\"\"\n if relay_state is not None:\n current_app.logger.debug('Checking if relay state {} is safe'.format(relay_state))\n url_scheme = current_app.config['PREFERRED_URL_SCHEME']\n safe_domain = current_app.config['SAFE_RELAY_DOMAIN']\n parsed_relay_state = urlparse(relay_state)\n\n # If relay state is only a path\n if (not parsed_relay_state.scheme and not parsed_relay_state.netloc) and parsed_relay_state.path:\n return relay_state\n\n # If schema matches PREFERRED_URL_SCHEME and fqdn ends with dot SAFE_RELAY_DOMAIN or equals SAFE_RELAY_DOMAIN\n if parsed_relay_state.scheme == url_scheme:\n if parsed_relay_state.netloc.endswith('.' + safe_domain) or parsed_relay_state.netloc == safe_domain:\n return relay_state\n\n # Unsafe relay state found\n current_app.logger.warning('Caught unsafe relay state: {}. '\n 'Using safe relay state: {}.'.format(relay_state, safe_default))\n return safe_default\n","sub_path":"src/eduid_webapp/authn/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"506411562","text":"import aiml\nimport os\nimport pyttsx3\nimport speech_recognition as sr\n\n#Crear Kernel y aprender los archivos AIML\nkernel = aiml.Kernel()\n\nif os.path.isfile(\"bot_brain.brn\"):\n kernel.bootstrap(brainFile = \"bot_brain.brn\")\nelse:\n kernel.bootstrap(learnFiles = \"std-startup.xml\", commands = \"load aiml b\")\n kernel.saveBrain(\"bot_brain.brn\")\n\n#obtener audio del microfono\nr = sr.Recognizer()\n\n#Iniciar el TTS\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\n\n# kernel está listo para usarse\nrespuesta = kernel.respond(\"inicia\")\nengine.setProperty('voice', voices[0].id)\nengine.setProperty('rate', 130)\nengine.say(respuesta)\nengine.runAndWait()\nwhile True:\n entrada = input(\"Ingresa el mensaje: \")\n respuesta = kernel.respond(entrada)\n engine.setProperty('voice', voices[0].id)\n engine.setProperty('rate', 130)\n\n #Respuesta de Eliza\n engine.say(respuesta)\n engine.runAndWait()\n if entrada.lower() == \"buenas noches\" or entrada.lower() == \"adios\" or entrada.lower() == \"hasta luego\" or entrada.lower() == \"bye\":\n exit()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358124167","text":"import random\n\nfrom django.core.management.base import BaseCommand\nfrom django_seed import Seed\nfrom authors.models import Author\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"--number\", default=1, type=int,\n help=\"How many books do you want to create??\")\n\n def handle(self, *args, **options):\n number = int(options.get('number', 1))\n seeder = Seed.seeder()\n\n seeder.add_entity(Author, number, {\n \"name\": lambda x: seeder.faker.name(),\n \"born_address\": lambda x: f\"{seeder.faker.city()} + {seeder.faker.country()}\",\n \"bio\": lambda x: seeder.faker.paragraph(nb_sentences=3, variable_nb_sentences=True, ext_word_list=None),\n \"born_date\": seeder.faker.date_of_birth(tzinfo=None, minimum_age=0, maximum_age=115),\n \"died_date\": seeder.faker.date_of_birth(tzinfo=None, minimum_age=0, maximum_age=115),\n \"user\": None,\n \"photo\": lambda x: f\"author_img/test/tux{random.randint(1, 5)}.jpg\"\n })\n\n seeder.execute()\n self.stdout.write(self.style.SUCCESS(f\"{number} authors created!!\"))\n","sub_path":"authors/management/commands/seed_authors.py","file_name":"seed_authors.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493684419","text":" \n# imports \nimport numpy as np\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore\n\n# init qApp\napp = pg.QtGui.QApplication([])\n\n# setup the main window\nview = pg.GraphicsView()\nview.resize(900,500)\nview.setWindowTitle('Notebook')\nview.show()\n\n# main layout\nlayout = pg.GraphicsLayout(border='r') # with a red bordercolor \n\n# set the layout as a central item\nview.setCentralItem(layout)\n\n# create a text block\nlabel = pg.LabelItem('PyQtGraph Grid Layout Example', size='25px', color='y')\n\n# create a plot with two random curves\np1 = pg.PlotItem()\ncurve11 = pg.PlotCurveItem(pen=pg.mkPen(color='g', width=1))\ncurve12 = pg.PlotCurveItem(pen=pg.mkPen(color='b', width=1, style=QtCore.Qt.DashLine))\np1.addItem(curve11); p1.addItem(curve12)\np1.setXRange(0,100); p1.setYRange(-1.1,2.1)\n\n# create another plot with two random curves\np2 = pg.PlotItem()\ncurve21 = pg.PlotCurveItem(pen=pg.mkPen(color='w', width=1, style=QtCore.Qt.DotLine))\ncurve22 = pg.PlotCurveItem(pen=pg.mkPen(color='c', width=1, style=QtCore.Qt.DashLine))\np2.addItem(curve21); p2.addItem(curve22)\ncurve21.setData(np.random.rand(100) + 0.5)\ncurve22.setData(np.random.rand(100) - 0.5)\np2.setXRange(0,100); p2.setYRange(-1.1,2.1)\n\n# finally organize the layout\nlayout.addItem(label, row=0, col=0, colspan=2)\nlayout.addItem(p1, row=1, col=0)\nlayout.addItem(p2, row=1, col=1)\n\n# data to animate\nx1 = np.random.rand(500,100) + 0.5\nx2 = np.random.rand(500,100) - 0.5\n\n# repeating data generator\ncnt=0\ndef animLoop():\n global cnt\n curve11.setData(x1[cnt%x1.shape[0]])\n curve12.setData(x2[cnt%x2.shape[0]])\n cnt+=1\n \ntimer = QtCore.QTimer()\ntimer.timeout.connect(animLoop)\ntimer.start(0)\n\napp.exec_()","sub_path":"solutions/pyQtGraph/Exercise_3_2.py","file_name":"Exercise_3_2.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580332088","text":"from django.shortcuts import render\nfrom basketapp.models import Basket\nfrom mainapp.views import get_products\n\n\ndef index(request):\n title = 'geekshop'\n products = get_products()[:4] # Wow! 0 queries\n # products = Product.objects.filter(is_active=True, category__is_active=True)[:4]\n # products = Product.objects.filter(is_active=True, category__is_active=True).select_related('category')[:4]\n\n basket = []\n\n if request.user.is_authenticated:\n basket = Basket.objects.filter(user=request.user)\n\n context = {\n 'title': title,\n 'products': products,\n 'basket': basket,\n }\n return render(request, 'geekshop/index.html', context=context)\n\n\ndef contacts(request):\n title = 'контакты'\n context = {\n 'title': title,\n }\n return render(request, 'geekshop/contact.html', context=context)\n","sub_path":"geekshop/geekshop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591915412","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport multiprocessing\n\nfrom downloader import Downloader\nfrom extract_urls import ParserLinks\nfrom url_queue import UrlQ\nfrom storage import Storage\n\nclass Crawler(object):\n\n def __init__(self):\n self.__frontier = UrlQ()\n self.__urlseentest = Storage()\n self.__curl = Storage()\n \n def __call__(self, seed):\n self.__frontier.add(seed, { 'visited': False })\n self.run()\n\n def run(self):\n if not isinstance(self.__frontier, UrlQ):\n raise TypeError('Tipo do objeto deveria ser < UrlQ >.')\n if not isinstance(self.__urlseentest, Storage):\n raise TypeError('Tipo do objeto deveria ser < Storage >.')\n\n try:\n while self.__frontier:\n url_curr = self.__frontier.next()\n page = Downloader.fetcher(url_curr)\n\n # URL visited and now saved.\n self.__urlseentest.save(url_curr, { 'visited': True })\n # URL to be showed on view.\n self.__curl.save('curl', url_curr)\n\n if page:\n html = str(page.read())\n links = ParserLinks.extract_urls(url_curr, html)\n\n for link in links:\n if not self.__frontier.has_item(link) and not self.__urlseentest.get(link):\n self.__frontier.add(link, { 'visited': False })\n\n except KeyboardInterrupt:\n logging.info('KEYBOARD_INTERRUPT')\n\n except Exception as error:\n if hasattr(error, 'message'):\n logging.error('crawler.py --> {}'.format(error.message))\n\n @property\n def curl(self):\n process_queue = multiprocessing.Queue()\n process_queue.put(self.__curl.get('curl'))\n return process_queue.get()\n","sub_path":"www_traveler/www_traveler.py","file_name":"www_traveler.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"555050800","text":"import time\n\ndef random_number(minimum,maximum):\n now = str(time.clock())\n rnd = float(now[::-1][:5:])/100000\n return minimum + rnd*(maximum-minimum)\n \nfor i in range (1000):\n print(int(random_number(0, 100)))\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261755829","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='n3th0us3',\n version='1.1.2',\n packages=find_packages(exclude=['unittests*', ]),\n package_dir={'n3th0us3': 'n3th0us3'},\n author='Anzhela',\n author_email='dev.anzhela@gmail.com',\n description=\"Unofficial python package for domains.nethouse.ru service.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.4',\n)\n","sub_path":"pypi_install_script/n3th0us3-1.1.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70445239","text":"import os\nfrom socket import *\n\ndef getNameFile(fromWhere):\n if fromWhere == \"server\":\n directName = \"./fileOnServer\"\n elif fromWhere ==\"client\":\n directName = \".\"\n result=\"\"\n for _, _, files in os.walk(directName):\n for file in files:\n result += file + ','\n return result\n\ndef sendSizeFunc(conn, fileToSend):\n bytesSent=0\n while bytesSent !=40:\n bytesSent += conn.send(fileToSend[bytesSent:].encode())\n\ndef sendStringFunc(conn, string):\n bytesSent=0\n while bytesSent !=len(string):\n bytesSent += conn.send(string[bytesSent:].encode())\n\ndef recieveStringFunc(conn, size=1040):\n tempBuf =\"\"\n data=\"\"\n while len(data) != size:\n tempBuf = conn.recv(size)\n if not tempBuf:\n break\n else:\n data+= tempBuf.decode()\n return data\n\ndef menu():\n ######\"What would you like to do?\")\n print(\" get - download file\")\n print(\" put - upload file\")\n print(\" ls - list files\")\n print(\" help - print options\")\n print(\" quit - terminate program\")\n\ndef getMenuOption():\n option = input(\"fpt> \")\n return option\n\ndef padStringLen(string, size):\n tempString = str(len(string))\n while (len(tempString) < size):\n tempString+=\" \"\n return tempString\n\ndef padString(string, size):\n tempString = string\n while (len(tempString) < size):\n tempString+=\" \"\n return tempString\n\n\ndef processNPrintNameFile(namesFile):\n files = namesFile.split(\",\")\n print(\"Files On Server Are:\")\n for file in files:\n print(\" \"+file)\n\ndef checkFileExist(fileName, fromWhere):\n if fromWhere == \"server\":\n directName = \"./fileOnServer\"\n elif fromWhere ==\"client\":\n directName = \".\"\n for _, _, files in os.walk(directName):\n for file in files:\n if file == fileName:\n return True\n return False\n\ndef sendACK(conn, typeNum):\n if typeNum ==0:\n type = \"Errr\"\n elif typeNum ==1:\n type = \"Okay\"\n elif typeNum ==2:\n type = \"Exst\"\n elif typeNum ==3:\n type = \"NEst\"\n elif typeNum ==4:\n type = \"Cont\"\n elif typeNum ==5:\n type = \"NCnt\"\n sendStringFunc(conn, type)\n\ndef recieveACK(conn):\n ackSize = 4\n ack = recieveStringFunc(conn, ackSize)\n return ack\n\ndef continueOption():\n stopOption = False\n while (not stopOption):\n subOption = input(\"Try again?(Y/N) \")\n if subOption == 'Y' or subOption == 'y':\n return True\n elif subOption == 'N' or subOption == 'n':\n return False\n else:\n print(\"Invalid Option. Please Choose again\")\n\ndef downloadFile(conn, fileName, size, fromWhere):\n if fromWhere == \"server\":\n directName = \"DLFromServer/\"\n elif fromWhere ==\"client\":\n directName = \"fileOnServer/\"\n if not os.path.exists(directName):\n os.mkdir(directName)\n tempBuf = 0\n data = 0\n print(\"Downloading...\")\n with open(directName+fileName, \"wb\") as f:\n while tempBuf < size:\n data=conn.recv(1024)\n tempBuf+=len(data)\n if not data:\n break\n f.write(data)\n if tempBuf == size:\n print(\"Download Completed\")\n f.close()\n\ndef padFileNameSize(fileNameSize, size):\n tempString = str(fileNameSize)\n while (len(tempString) < size):\n tempString+=\" \"\n return tempString\n\ndef sendDownloadFile(conn, fileName):\n print(\"Sending...\")\n with open(fileName, 'rb') as f: \n while True:\n contents = f.read(1024)\n bytesSent=0\n if not contents:\n break\n while bytesSent < len(contents):\n bytesSent += conn.send(contents)\n\n f.close()\n print(\"Send Completed\")","sub_path":"pythonVer/DuyVer/supportFunc.py","file_name":"supportFunc.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14838226","text":"import zmq\nimport time\ncontext = zmq.Context()\npublisher = context.socket(zmq.PUB)\n\n\ndef pubips(PORTS):\n print(\"pubs start\")\n publisher.bind('tcp://127.0.0.1:%s' % PORTS)\n while True:\n # Allow clients to connect before sending data\n time.sleep(10)\n socket.send(b'HI from port: %s' % PORTS.encode('utf-8'))\n","sub_path":"PEERS/PUB/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505433738","text":"import os\nimport json\nimport allure\nfrom jsonschema import validate\nfrom jsonschema import exceptions\n\nfrom src.logging.logger import logger, log_exception\n\n\nclass SchemaValidator:\n \"\"\"\n This class reads all schemas in the src/validation/schema folder,\n and then validates the chosen schema against the given JSON\n \"\"\"\n __schema_path = os.path.join('src', 'validation', 'schema')\n\n @allure.step('Validating actual response body against the schema {schema_name}')\n def validate_json(self, schema_name, actual_json):\n logger.info(f'Starting to validate actual response body against the schema {schema_name}')\n schema_path = os.path.join(self.__schema_path, schema_name)\n try:\n with open(schema_path, 'r') as f:\n schema = json.loads(f.read())\n validate(actual_json, schema)\n logger.info('Done. The JSON is OK')\n except exceptions.ValidationError as err:\n message = err.args[0]\n log_exception('Failed validating the JSON: ' + message)\n raise AssertionError('JSON schema validation failed. See log for details.')\n","sub_path":"src/validation/schema_validator.py","file_name":"schema_validator.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308865338","text":"\n\nclass Contact:\n\n def __init__(self, firstname, middlename, lastname,nickname,title,home_number,company,adress):\n self.firstname=firstname\n self.middlename=middlename\n self.lastname=lastname\n self.nickname=nickname\n self.title=title\n self.home_number=home_number\n self.company=company\n self.adress=adress\n","sub_path":"model/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"108474381","text":"# Copyright (C) 2013 Google Inc., authors, and contributors \n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n# Created By: dan@reciprocitylabs.com\n# Maintained By: dan@reciprocitylabs.com\n\n\"\"\"Remove obsolete log events table for LogEvents model\n\nRevision ID: 5459dba4c5e9\nRevises: 4155c544acb5\nCreate Date: 2013-07-31 14:26:21.992050\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5459dba4c5e9'\ndown_revision = '4155c544acb5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.drop_table('log_events')\n\ndef downgrade():\n op.create_table('log_events',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('description', sa.Text(), nullable=True),\n sa.Column('severity', sa.String(length=250), nullable=True),\n sa.Column('modified_by_id', sa.String(length=250), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n","sub_path":"src/ggrc/migrations/versions/20130731142621_5459dba4c5e9_remove_obsolete_log_.py","file_name":"20130731142621_5459dba4c5e9_remove_obsolete_log_.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398986253","text":"import matplotlib.pyplot as plt\nfrom matplotlib.table import Table\n# from cfr_v2 import CFR\nimport pickle\n\n\n# cfr = CFR()\n# ante = 1.0\n# bet1 = 2.0\n# bet2 = 8.0\n\n# util = cfr.train(1000, ante, bet1, bet2)\n\nACTION_TO_HISTORY_MAPPING = {\"fold\": \"0\", \"call\": \"1\", \"raise\": \"2\", \"small_blind\": \"3\", \"big_blind\": \"4\", \"ante\": \"5\"}\nHISTORY_TO_ACTION_MAPPING = {\"0\": \"fold\", \"1\": \"call\", \"2\": \"raise\", \"3\": \"small_blind\", \"4\": \"big_blind\", \"5\": \"ante\"}\n\nlabel = ['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', \n '10', '11', '12', '13', '14', '15', '16', '17']\nrank_index_map = {'-1': 0, '0': 1, '1': 2, '2': 3,\n'3': 4, '4': 5, '5': 6,\n'6': 7, '7': 8, '8': 9,\n'9': 10, '10': 11, '11': 12, '12': 13,\n'13': 14, '14': 15, '15': 16, '16': 17,\n'17': 18}\n\ndef get_color(frequency):\n if frequency >= 0.9:\n return 'green'\n elif frequency >= 0.75:\n return 'yellowgreen'\n elif frequency >= 0.5:\n return 'yellow'\n elif frequency >= 0.25:\n return 'orange'\n elif frequency >= 0.05:\n return 'orangered'\n else:\n return 'red'\n\ndef filter_strategy(original_result):\n p1_bet = 'Player One Betting Range'\n p1_bet_call = 'Player One Call All-in Range'\n p1_check_call = 'Player One Check-Call Range'\n p1_check_raise = 'Player One Check-Raise All-in Range'\n\n p2_call = 'Player Two Calling Range'\n p2_raise = 'Player Two All-in Range'\n p2_bet = 'Player Two Betting Range'\n p2_bet_call = 'Player Two Call All-in Range'\n\n result[p1_bet] = dict()\n result[p1_bet_call] = dict()\n result[p1_check_raise] = dict()\n result[p1_check_call] = dict()\n\n result[p2_call] = dict()\n result[p2_raise] = dict()\n result[p2_bet] = dict()\n result[p2_bet_call] = dict()\n\n for state, node in original_result.items(): \n split_state = state.split(\"G\")\n if len(split_state) > 2:\n continue\n hand = split_state[0][1:]\n history = split_state[1] if len(split_state) == 2 else \"\"\n # print(\"hand: %s \\nhistory: %s\" %(hand, history))\n\n for i in range(len(history)):\n if i % 2 == 0:\n print(history[i])\n # # player 1\n # if len(history) == 0:\n # result[p1_bet][hand] = node[ACTION_TO_HISTORY_MAPPING[\"raise\"]]\n # # player 2\n # elif len(history) == 1:\n # result[p2_raise][hand] = node[ACTION_TO_HISTORY_MAPPING[\"raise\"]]\n # result[p2_call][hand] = node[ACTION_TO_HISTORY_MAPPING[\"call\"]]\n # # player 1\n # elif len(history) == 2:\n # if history[0] == ACTION_TO_HISTORY_MAPPING[\"raise\"]:\n # result[p1_bet_call][hand] = node[ACTION_TO_HISTORY_MAPPING[\"call\"]]\n # else:\n # result[p1_check_raise][hand] = node[ACTION_TO_HISTORY_MAPPING[\"raise\"]]\n # result[p1_check_call][hand] = node[ACTION_TO_HISTORY_MAPPING[\"call\"]]\n # # player 2\n # elif len(history) == 3:\n # result[p2_bet_call][hand] = node[ACTION_TO_HISTORY_MAPPING[\"call\"]]\n\n # clean graphs\n tol = 0.005\n for hand, frequency in result[p1_bet].items():\n if frequency > 1 - tol and hand in result[p1_check_raise]:\n result[p1_check_raise][hand] = 0.0\n if frequency > 1 - tol and hand in result[p1_check_call]:\n result[p1_check_call][hand] = 0.0\n if frequency < tol and hand in result[p1_bet_call]:\n result[p1_bet_call][hand] = 0.0\n for hand, frequency in result[p2_bet].items():\n if frequency < tol and hand in result[p2_bet_call]:\n result[p2_bet_call][hand] = 0.0\n\n return result\n\ndef create_table(title, frequencies):\n fig, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n nrows, ncols = 1, len(label)\n width, height = 1.0 / ncols, 1.0 / nrows\n\n # Add cells\n for hand, val in frequencies.items():\n # print(hand)\n # print(val)\n i = rank_index_map[hand]\n\n color = get_color(val)\n\n value_formatted = '{0:.2f}'.format(val)\n tb.add_cell(i + 1, 0, width, height, text=hand,\n loc='center', facecolor=color)\n\n # Row Labels...\n for i in range(len(label)):\n tb.add_cell(i + 1, -1, width, height, text=label[i], loc='right',\n edgecolor='none', facecolor='none')\n # Column Labels...\n # for j in range(len(label)):\n # tb.add_cell(0, j, width, height/2, text=label[j], loc='center',\n # edgecolor='none', facecolor='none')\n ax.add_table(tb)\n plt.title(title)\n return fig\n\n\n# print \"Player One Expected Value Per Hand: %f\" % util\n\n# result = cfr.get_strategy()\nresult = pickle.load(open(\"strats/no_fold_cfr.strat\", \"rb\"))\n# print(result)\nfiltered_result = filter_strategy(result)\n# for decision in sorted(result):\n# table = create_table(decision, filtered_result[decision])\n\nplt.show()\n","sub_path":"cfr_v2_test.py","file_name":"cfr_v2_test.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59048312","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 17 22:49:49 2021\r\n\r\n@author: Mohammed Al-Fahdi\r\n\"\"\"\r\n\r\nimport IPython as IP \r\nIP.get_ipython().magic('reset -sf')\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport seaborn as sns\r\nimport sklearn as sk\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.metrics import mean_absolute_error\r\n\r\nimport matplotlib.pyplot as plt\r\nimport xgboost as xgb\r\n\r\nimport tkinter as tk\r\n\r\n# import lime as lm\r\n# from lime import lime_tabular\r\n# changing color parameters\r\n# cc = plt.rcParams['axes.prop_cycle'].by_key()['color']\r\n\r\nplt.close('all')\r\n\r\n# Load Data\r\ndata = pd.read_excel(\"ASC Student Challenge 2021_Data Set.xlsx\"); df=data.iloc[0:449,:]\r\n#ASC_2021_data = pd.read_excel(\"ASC Student Challenge 2021_Data Set.xlsx\")\r\no=df.loc[:, 'Heat Rate 1 [C/min]':'Autoclave Duration [min]']\r\norig_columns=o.columns\r\n#print(max(df['Heat Rate 2 [C/min]']), min(df['Heat Rate 2 [C/min]']))\r\nregex = re.compile(r\"\\[|\\]|<\", re.IGNORECASE)\r\ndf.columns = [regex.sub(\"_\", col) if any(x in str(col) for x in set(('[', ']', '<'))) else col for col in df.columns.values]\r\nprint(df.columns)\r\n\r\nx_cols=['Vacuum Pressure (*Patm) _Pa_', 'Cure Cycle Total Time _min_', \r\n 'Vacuum Duration _min_','Heat Rate 1 _C/min_', \r\n 'Temperature Dwell 1 _min_', 'Ramp 1 Duration _min_',\r\n 'Autoclave Pressure (*Patm) _Pa_', 'Autoclave Duration _min_']\r\nx=df.loc[:, x_cols]; print(x); print(x.columns)\r\n\r\ny = df.loc[:, 'Eff. Porosity (%)']\r\nx_train, x_test, y_train, y_test=train_test_split(x, y, test_size=0.2, shuffle=True)\r\n#%% Plots for Facesheet Consolidation - Max. Fibre Volume Fraction\r\n\r\n# =============================================================================\r\n# Gradient Boosting Regression\r\n# =============================================================================\r\nparams = {'n_estimators': 800}\r\nregr = xgb.XGBRegressor(max_depth=4, learning_rate=0.03, n_estimators=params['n_estimators'], verbosity=1, objective='reg:squarederror',\r\n \tbooster='gbtree', tree_method='auto', n_jobs=1, gamma=0.0001, min_child_weight=8,max_delta_step=0,\r\n \tsubsample=0.6, colsample_bytree=0.7, colsample_bynode=1, reg_alpha=0,\r\n \treg_lambda=4, scale_pos_weight=1, base_score=0.6, missing=None,\r\n \tnum_parallel_tree=1, importance_type='gain', eval_metric='mae',nthread=4).fit(x_train,y_train)\r\n\r\n\r\n#%%\r\n\r\ny_pred=regr.predict(x_test)\r\ndf = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})\r\nscores=regr.score(x_test,y_test)\r\nr2_test=r2_score(y_test, y_pred); \r\nprint('R2 score:',r2_test)\r\nprint('Mean Absolute Error:', mean_absolute_error(y_test, y_pred))\r\nprint('Mean Squared Error:', mean_squared_error(y_test, y_pred))\r\nprint('Root Mean Squared Error:', np.sqrt(mean_squared_error(y_test, y_pred)))\r\n\r\n\r\n#%%\r\n# =============================================================================\r\n# GUI\r\n# tkinter GUI\r\n\r\nroot= tk.Tk()\r\n\r\ncanvas1 = tk.Canvas(root, width = 700, height = 800)\r\ncanvas1.pack()\r\n\r\n\r\n# New_Vacuum_Pressure label and input box\r\nlabel1 = tk.Label(root, text='Vacuum Pressure (0-1): ')\r\ncanvas1.create_window(160, 100, window=label1)\r\nentry1 = tk.Entry (root) # create 1st entry box\r\ncanvas1.create_window(380, 100, window=entry1)\r\n\r\n# New_AutoclavePressure_Rate label and input box\r\nlabel2 = tk.Label(root, text=' Cure Cycle Total Time [min] (200-337): ')\r\ncanvas1.create_window(160, 125, window=label2)\r\nentry2 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 125, window=entry2)\r\n\r\n# New_AutoclavePressure_Rate label and input box\r\nlabel3 = tk.Label(root, text=' Vaccuum Duration (120-336): ')\r\ncanvas1.create_window(160, 150, window=label3)\r\nentry3 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 150, window=entry3)\r\n\r\nlabel5 = tk.Label(root, text='Heat Rate 1 [C/min] (1-4): ')\r\ncanvas1.create_window(160, 175, window=label5)\r\nentry5 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 175, window=entry5)\r\n\r\nlabel8 = tk.Label(root, text='Temperature Dwell 1 [min] (60-120): ')\r\ncanvas1.create_window(160, 200, window=label8)\r\nentry8 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 200, window=entry8)\r\n\r\nlabel9 = tk.Label(root, text='Ramp 1 Duration _min_ (22.5-90): ')\r\ncanvas1.create_window(160, 225, window=label9)\r\nentry9 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 225, window=entry9)\r\n\r\nlabel10 = tk.Label(root, text='Autoclave Pressure (*Patm) [Pa] (2-4): ')\r\ncanvas1.create_window(160, 250, window=label10)\r\nentry10 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 250, window=entry10)\r\n\r\nlabel11 = tk.Label(root, text='Autoclave Duration [min] (120-336): ')\r\ncanvas1.create_window(160, 275, window=label11)\r\nentry11 = tk.Entry (root) # create 2nd entry box\r\ncanvas1.create_window(380, 275, window=entry11)\r\n\r\n# Cycle Number : min: 1 max: 1800\r\n# Heat Rate 1 _C/min_ : min: 1 max: 4\r\n# Ramp 1 Duration _min_ : min: 22.5 max: 90.0\r\n# Temperature Dwell 1 _min_ : min: 60 max: 120\r\n# Heat Rate 2 _C/min_ : min: 0 max: 4\r\n# Ramp 2 Duration _min_ : min: 0.0 max: 67.0\r\n# Temperature Dwell 2 _min_ : min: 0 max: 120\r\n# Vacuum Pressure (*Patm) _Pa_ : min: 0.01 max: 1.0\r\n# Vacuum Start Time _min_ : min: 1 max: 80\r\n# Vacuum Duration _min_ : min: 120.0 max: 336.0\r\n# Autoclave Pressure (*Patm) _Pa_ : min: 2 max: 4\r\n# Cure Cycle Total Time _min_ : min: 200.0 max: 337.0\r\n# Autoclave Start Time _min_ : min: 1 max: 80\r\n# Autoclave Duration _min_ : min: 120.0 max: 336.0\r\n\r\ndef values(): \r\n global New_Vacuum_Pressure #our 1st input variable\r\n New_Vacuum_Pressure = float(entry1.get()) \r\n \r\n global New_cure_cycle #our 2nd input variable\r\n New_cure_cycle = float(entry2.get()) \r\n \r\n global New_vacuum_duration #our 2nd input variable\r\n New_vacuum_duration = float(entry3.get()) \r\n \r\n global New_heat_rate1\r\n New_heat_rate1=float(entry5.get())\r\n \r\n global New_temp_dwell_1\r\n New_temp_dwell_1=float(entry8.get())\r\n \r\n global New_ramp1_duration\r\n New_ramp1_duration=float(entry9.get())\r\n \r\n global New_autoclave_press\r\n New_autoclave_press=float(entry10.get())\r\n \r\n global New_autoclave_duration\r\n New_autoclave_duration=float(entry11.get())\r\n\r\n new_data={'Vacuum Pressure (*Patm) _Pa_': New_Vacuum_Pressure,\r\n 'Cure Cycle Total Time _min_': New_cure_cycle,\r\n 'Vacuum Duration _min_': New_vacuum_duration,\r\n 'Heat Rate 1 _C/min_':New_heat_rate1,\r\n 'Temperature Dwell 1 _min_':New_temp_dwell_1,\r\n 'Ramp 1 Duration _min_':New_ramp1_duration,\r\n 'Autoclave Pressure (*Patm) _Pa_':New_autoclave_press,\r\n 'Autoclave Duration _min_':New_autoclave_duration}\r\n new_data_df=pd.DataFrame([new_data])\r\n Prediction_result = ('Predicted Effective Porosity: ', regr.predict(new_data_df))\r\n label_Prediction = tk.Label(root, text= Prediction_result, bg='orange')\r\n canvas1.create_window(290, 350, window=label_Prediction)\r\n \r\nbutton1 = tk.Button (root, text='Predict Eff. Porosity',command=values, bg='orange') # button to call the 'values' command above \r\ncanvas1.create_window(290, 310, window=button1)\r\n \r\n#plot 1st scatter \r\n# figure3 = plt.Figure(figsize=(5,4), dpi=100)\r\n# ax3 = figure3.add_subplot(111)\r\n# ax3.scatter(ASC_2021_data['Autoclave Pressure (*Patm) [Pa]'].astype(float),ASC_2021_data['Max (Fiber Volume Fraction) (%)'].astype(float), color = 'r')\r\n# scatter3 = FigureCanvasTkAgg(figure3, root) \r\n# scatter3.get_tk_widget().pack(side=tk.RIGHT, fill=tk.BOTH)\r\n# ax3.set_title('Max-Vf Vs. Autoclave Pressure')\r\n# ax3.set_xlabel('Autoclave Pressure (*Patm) [Pa]')\r\n# ax3.set_ylabel('Max (Fiber Volume Fraction) (%)')\r\n\r\n\r\n# #plot 2nd scatter \r\n# figure4 = plt.Figure(figsize=(5,4), dpi=100)\r\n# ax4 = figure4.add_subplot(111)\r\n# ax4.scatter(ASC_2021_data['Vacuum Pressure (*Patm) [Pa]'].astype(float),ASC_2021_data['Max (Fiber Volume Fraction) (%)'].astype(float), color = 'g')\r\n# scatter4 = FigureCanvasTkAgg(figure4, root) \r\n# scatter4.get_tk_widget().pack(side=tk.RIGHT, fill=tk.BOTH)\r\n# ax4.set_xlabel('Vacuum Pressure (*Patm) [Pa]')\r\n# ax4.set_title('Max-Vf Vs. Vacuum Pressure')\r\n# ax4.set_ylabel('Max (Fiber Volume Fraction) (%)')\r\n\r\nroot.mainloop()\r\n\r\n\r\n#%%\r\n","sub_path":"prediction_eff_porosity_code_1.py","file_name":"prediction_eff_porosity_code_1.py","file_ext":"py","file_size_in_byte":8513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573584355","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('players', '0002_auto_20151109_1350'),\n ('teams', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='team',\n name='player2',\n field=models.ForeignKey(null=True, to='players.Player', related_name='player2'),\n ),\n migrations.AddField(\n model_name='team',\n name='player3',\n field=models.ForeignKey(null=True, to='players.Player', related_name='player3'),\n ),\n migrations.AddField(\n model_name='team',\n name='player4',\n field=models.ForeignKey(null=True, to='players.Player', related_name='player4'),\n ),\n migrations.AddField(\n model_name='team',\n name='player5',\n field=models.ForeignKey(null=True, to='players.Player', related_name='player5'),\n ),\n migrations.AddField(\n model_name='team',\n name='player6',\n field=models.ForeignKey(null=True, to='players.Player', related_name='player6'),\n ),\n migrations.AlterField(\n model_name='team',\n name='player1',\n field=models.ForeignKey(null=True, to='players.Player', related_name='player1'),\n ),\n ]\n","sub_path":"collegiatemasters/teams/migrations/0002_auto_20151110_1425.py","file_name":"0002_auto_20151110_1425.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432550759","text":"MESES = (\n ('1', 'Janeiro'),\n ('2', 'Fevereiro'),\n ('3', 'Março'),\n ('4', 'Abril'),\n ('5', 'Maio'),\n ('6', 'Junho'),\n ('7', 'Julho'),\n ('8', 'Agosto'),\n ('9', 'Setembro'),\n ('10', 'Outubro'),\n ('11', 'Novembro'),\n ('12', 'Dezembro'),\n)\n\nTIPO_TELEFONE = (\n ('', 'Escolha uma opção'),\n ('residencial', 'Residêncial'),\n ('celular', 'Celular'),\n ('comercial', 'Comercial'),\n)\n\nUF = (\n ('', 'Escolha uma opção'),\n ('AC', 'Acre'),\n ('AL', 'Alagoas'),\n ('AP', 'Amapá'),\n ('AM', 'Amazonas'),\n ('BA', 'Bahia'),\n ('CE', 'Ceará'),\n ('DF', 'Distrito Federal'),\n ('ES', 'Espírito Santo'),\n ('GO', 'Goiás'),\n ('MT', 'Mato Grosso'),\n ('MA', 'Maranhão'),\n ('MS', 'Mato Grosso do Sul'),\n ('MG', 'Minas Gerais'),\n ('PA', 'Pará'),\n ('PB', 'Paraíba'),\n ('PR', 'Paraná'),\n ('PE', 'Pernambuco'),\n ('PI', 'Piauí'),\n ('RJ', 'Rio de Janeiro'),\n ('RN', 'Rio Grande do Norte'),\n ('RS', 'Rio Grande do Sul'),\n ('RO', 'Rondônia'),\n ('RR', 'Roraima'),\n ('SC', 'Santa Catarina'),\n ('SP', 'São Paulo'),\n ('SE', 'Sergipe'),\n ('TO', 'Tocantins'),\n)\n\nPAIS = (\n ('', ''),\n ('afeganistao', 'Afeganistão'),\n ('africa_do_sul', 'África do Sul'),\n ('akrotiri', 'Akrotiri'),\n ('albania', 'Albânia'),\n ('alemanha', 'Alemanha'),\n ('andorra', 'Andorra'),\n ('angola', 'Angola'),\n ('anguila', 'Anguila'),\n ('antarctida', 'Antárctida'),\n ('antigua_e_barbuda', 'Antígua e Barbuda'),\n ('antilhas_neerlandesas', 'Antilhas Neerlandesas'),\n ('arabia_saudita', 'Arábia Saudita'),\n ('arctic_ocean', 'Arctic Ocean'),\n ('argelia', 'Argélia'),\n ('argentina', 'Argentina'),\n ('armenia', 'Arménia'),\n ('aruba', 'Aruba'),\n ('ashmore_and_cartier_islands', 'Ashmore and Cartier Islands'),\n ('atlantic_ocean', 'Atlantic Ocean'),\n ('australia', 'Austrália'),\n ('austria', 'Áustria'),\n ('azerbaijao', 'Azerbaijão'),\n ('baamas', 'Baamas'),\n ('bangladeche', 'Bangladeche'),\n ('barbados', 'Barbados'),\n ('barem', 'Barém'),\n ('belgica', 'Bélgica'),\n ('belize', 'Belize'),\n ('benim', 'Benim'),\n ('bermudas', 'Bermudas'),\n ('bielorrussia', 'Bielorrússia'),\n ('birmania', 'Birmânia'),\n ('bolivia', 'Bolívia'),\n ('bosnia_e_herzegovina', 'Bósnia e Herzegovina'),\n ('botsuana', 'Botsuana'),\n ('brasil', 'Brasil'),\n ('brunei', 'Brunei'),\n ('bulgaria', 'Bulgária'),\n ('burquina_faso', 'Burquina Faso'),\n ('burundi', 'Burúndi'),\n ('butao', 'Butão'),\n ('cabo_verde', 'Cabo Verde'),\n ('camaroes', 'Camarões'),\n ('camboja', 'Camboja'),\n ('canada', 'Canadá'),\n ('catar', 'Catar'),\n ('cazaquistao', 'Cazaquistão'),\n ('chade', 'Chade'),\n ('chile', 'Chile'),\n ('china', 'China'),\n ('chipre', 'Chipre'),\n ('clipperton_island', 'Clipperton Island'),\n ('colombia', 'Colômbia'),\n ('comores', 'Comores'),\n ('congo_brazzaville', 'Congo-Brazzaville'),\n ('congo_kinshasa', 'Congo-Kinshasa'),\n ('coral_sea_islands', 'Coral Sea Islands'),\n ('coreia_do_norte', 'Coreia do Norte'),\n ('coreia_do_sul', 'Coreia do Sul'),\n ('costa_do_marfim', 'Costa do Marfim'),\n ('costa_rica', 'Costa Rica'),\n ('croacia', 'Croácia'),\n ('cuba', 'Cuba'),\n ('dhekelia', 'Dhekelia'),\n ('dinamarca', 'Dinamarca'),\n ('dominica', 'Domínica'),\n ('egipto', 'Egipto'),\n ('emiratos_arabes_unidos', 'Emiratos Árabes Unidos'),\n ('equador', 'Equador'),\n ('eritreia', 'Eritreia'),\n ('eslovaquia', 'Eslováquia'),\n ('eslovenia', 'Eslovénia'),\n ('espanha', 'Espanha'),\n ('estados_unidos', 'Estados Unidos'),\n ('estonia', 'Estónia'),\n ('etiopia', 'Etiópia'),\n ('faroe', 'Faroé'),\n ('fiji', 'Fiji'),\n ('filipinas', 'Filipinas'),\n ('finlandia', 'Finlândia'),\n ('franca', 'França'),\n ('gabao', 'Gabão'),\n ('gambia', 'Gâmbia'),\n ('gana', 'Gana'),\n ('gaza_strip', 'Gaza Strip'),\n ('georgia', 'Geórgia'),\n ('georgia_do_sul_e_sandwich_do_sul', 'Geórgia do Sul e Sandwich do Sul'),\n ('gibraltar', 'Gibraltar'),\n ('granada', 'Granada'),\n ('grecia', 'Grécia'),\n ('gronelandia', 'Gronelândia'),\n ('guame', 'Guame'),\n ('guatemala', 'Guatemala'),\n ('guernsey', 'Guernsey'),\n ('guiana', 'Guiana'),\n ('guine', 'Guiné'),\n ('guine_equatorial', 'Guiné Equatorial'),\n ('guine_bissa', 'Guiné-Bissa'),\n ('haiti', 'Haiti'),\n ('honduras', 'Honduras'),\n ('hong_kong', 'Hong Kong'),\n ('hungria', 'Hungria'),\n ('iemen', 'Iémen'),\n ('ilha_bouvet', 'Ilha Bouvet'),\n ('ilha_do_natal', 'Ilha do Natal'),\n ('ilha_norfolk', 'Ilha Norfolk'),\n ('ilhas_caimao', 'Ilhas Caimão'),\n ('ilhas_cook', 'Ilhas Cook'),\n ('ilhas_dos_cocos', 'Ilhas dos Cocos'),\n ('ilhas_falkland', 'Ilhas Falkland'),\n ('ilhas_heard_e_mcdonald', 'Ilhas Heard e McDonald'),\n ('ilhas_marshall', 'Ilhas Marshall'),\n ('ilhas_salomao', 'Ilhas Salomão'),\n ('ilhas_turcas_e_caicos', 'Ilhas Turcas e Caicos'),\n ('ilhas_virgens_americanas', 'Ilhas Virgens Americanas'),\n ('ilhas_virgens_britanicas', 'Ilhas Virgens Britânicas'),\n ('india', 'Índia'),\n ('indian_ocean', 'Indian Ocean'),\n ('indonesia', 'Indonésia'),\n ('irao', 'Irão'),\n ('iraque', 'Iraque'),\n ('irlanda', 'Irlanda'),\n ('islandia', 'Islândia'),\n ('israel', 'Israel'),\n ('italia', 'Itália'),\n ('jamaica', 'Jamaica'),\n ('jan_mayen', 'Jan Mayen'),\n ('japao', 'Japão'),\n ('jersey', 'Jersey'),\n ('jibuti', 'Jibuti'),\n ('jordania', 'Jordânia'),\n ('kuwait', 'Kuwait'),\n ('laos', 'Laos'),\n ('lesoto', 'Lesoto'),\n ('letonia', 'Letónia'),\n ('libano', 'Líbano'),\n ('liberia', 'Libéria'),\n ('libia', 'Líbia'),\n ('listenstaine', 'Listenstaine'),\n ('lituania', 'Lituânia'),\n ('luxemburgo', 'Luxemburgo'),\n ('maca', 'Maca'),\n ('macedonia', 'Macedónia'),\n ('madagascar', 'Madagáscar'),\n ('malasia', 'Malásia'),\n ('malavi', 'Malávi'),\n ('maldivas', 'Maldivas'),\n ('mali', 'Mali'),\n ('malta', 'Malta'),\n ('man_isle_of', 'Man, Isle of'),\n ('marianas_do_norte', 'Marianas do Norte'),\n ('marrocos', 'Marrocos'),\n ('mauricia', 'Maurícia'),\n ('mauritania', 'Mauritânia'),\n ('mayotte', 'Mayotte'),\n ('mexico', 'México'),\n ('micronesia', 'Micronésia'),\n ('mocambique', 'Moçambique'),\n ('moldavia', 'Moldávia'),\n ('monaco', 'Mónaco'),\n ('mongolia', 'Mongólia'),\n ('monserrate', 'Monserrate'),\n ('montenegro', 'Montenegro'),\n ('mundo', 'Mundo'),\n ('namibia', 'Namíbia'),\n ('naur', 'Naur'),\n ('navassa_island', 'Navassa Island'),\n ('nepal', 'Nepal'),\n ('nicaragua', 'Nicarágua'),\n ('niger', 'Níger'),\n ('nigeria', 'Nigéria'),\n ('niue', 'Niue'),\n ('noruega', 'Noruega'),\n ('nova_caledonia', 'Nova Caledónia'),\n ('nova_zelandia', 'Nova Zelândia'),\n ('oma', 'Omã'),\n ('pacific_ocean', 'Pacific Ocean'),\n ('paises_baixos', 'Países Baixos'),\n ('pala', 'Pala'),\n ('panama', 'Panamá'),\n ('papua_nova_guine', 'Papua-Nova Guiné'),\n ('paquistao', 'Paquistão'),\n ('paracel_islands', 'Paracel Islands'),\n ('paraguai', 'Paraguai'),\n ('per', 'Per'),\n ('pitcairn', 'Pitcairn'),\n ('polinesia_francesa', 'Polinésia Francesa'),\n ('polonia', 'Polónia'),\n ('porto_rico', 'Porto Rico'),\n ('portugal', 'Portugal'),\n ('quenia', 'Quénia'),\n ('quirguizistao', 'Quirguizistão'),\n ('quiribati', 'Quiribáti'),\n ('reino_unido', 'Reino Unido'),\n ('republica_centro_africana', 'República Centro-Africana'),\n ('republica_checa', 'República Checa'),\n ('republica_dominicana', 'República Dominicana'),\n ('romenia', 'Roménia'),\n ('ruanda', 'Ruanda'),\n ('russia', 'Rússia'),\n ('salvador', 'Salvador'),\n ('samoa', 'Samoa'),\n ('samoa_americana', 'Samoa Americana'),\n ('santa_helena', 'Santa Helena'),\n ('santa_lucia', 'Santa Lúcia'),\n ('sao_cristovao_e_neves', 'São Cristóvão e Neves'),\n ('sao_marinho', 'São Marinho'),\n ('sao_pedro_e_miquelon', 'São Pedro e Miquelon'),\n ('sao_tome_e_principe', 'São Tomé e Príncipe'),\n ('sao_vicente_e_granadinas', 'São Vicente e Granadinas'),\n ('sara_ocidental', 'Sara Ocidental'),\n ('seicheles', 'Seicheles'),\n ('senegal', 'Senegal'),\n ('serra_leoa', 'Serra Leoa'),\n ('servia', 'Sérvia'),\n ('singapura', 'Singapura'),\n ('siria', 'Síria'),\n ('somalia', 'Somália'),\n ('southern_ocean', 'Southern Ocean'),\n ('spratly_islands', 'Spratly Islands'),\n ('sri_lanca', 'Sri Lanca'),\n ('suazilandia', 'Suazilândia'),\n ('sudao', 'Sudão'),\n ('suecia', 'Suécia'),\n ('suica', 'Suíça'),\n ('suriname', 'Suriname'),\n ('svalbard_e_jan_mayen', 'Svalbard e Jan Mayen'),\n ('tailandia', 'Tailândia'),\n ('taiwan', 'Taiwan'),\n ('tajiquistao', 'Tajiquistão'),\n ('tanzania', 'Tanzânia'),\n ('territorio_britanico_do_oceano_indico', 'Território Britânico do Oceano Índico'),\n ('territorios_austrais_franceses', 'Territórios Austrais Franceses'),\n ('timor_leste', 'Timor Leste'),\n ('togo', 'Togo'),\n ('tokela', 'Tokela'),\n ('tonga', 'Tonga'),\n ('trindade_e_tobago', 'Trindade e Tobago'),\n ('tunisia', 'Tunísia'),\n ('turquemenistao', 'Turquemenistão'),\n ('turquia', 'Turquia'),\n ('tuval', 'Tuval'),\n ('ucrania', 'Ucrânia'),\n ('uganda', 'Uganda'),\n ('uniao_europeia', 'União Europeia'),\n ('uruguai', 'Uruguai'),\n ('usbequistao', 'Usbequistão'),\n ('vanuat', 'Vanuat'),\n ('vaticano', 'Vaticano'),\n ('venezuela', 'Venezuela'),\n ('vietname', 'Vietname'),\n ('wake_island', 'Wake Island'),\n ('wallis_e_futuna', 'Wallis e Futuna'),\n ('west_bank', 'West Bank'),\n ('zambia', 'Zâmbia'),\n ('zimbabue', 'Zimbabué'),\n)\n","sub_path":"base/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":9845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329739924","text":"def fact(num):\n\t if num == '':\n\t \treturn 0\n\t fact = 1\n\t if num < 2:\n\t \treturn 1\n\t elif num>=1000:\n\t \treturn None\n\t elif type(num)==str:\n\t \treturn None\n\t else:\n\t\t while num > 1:\n\t\t \tfact *= num\n\t\t \tnum-=1\n\t if fact < 1000:\n\t \treturn fact\n\t ","sub_path":"TDD/fact.py","file_name":"fact.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313379027","text":"import maya.cmds as cmds\nimport cmt.cqueue.core as core\nimport cmt.cqueue.fields as fields\nfrom cmt.qt import QtWidgets\nimport cmt.shortcuts as shortcuts\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass SwingTwistView(fields.ContainerView):\n \"\"\"Customize the view of the container.\"\"\"\n def widget(self, container):\n # The fields will be arranged in two row containers\n # [[driver, driven], [name, twist, swing, twistAxis]]\n widget = QtWidgets.QFrame()\n widget.setFrameStyle(QtWidgets.QFrame.StyledPanel)\n vbox = QtWidgets.QVBoxLayout(widget)\n\n for attrs in [\n ['driver', 'driven'],\n ['name', 'twist', 'swing', 'twist_axis'],\n ]:\n hbox = QtWidgets.QHBoxLayout(widget)\n vbox.addLayout(hbox)\n hbox.setContentsMargins(0, 0, 0, 0)\n for attr in attrs:\n hbox.addWidget(QtWidgets.QLabel(container[attr].verbose_name))\n hbox.addWidget(container[attr].widget())\n\n return widget\n\n\nclass Component(core.Component):\n \"\"\"A Component that creates swingTwist nodes.\"\"\"\n twist_axis = {\n 0: 'X',\n 1: 'Y',\n 2: 'Z',\n }\n\n swingtwists = fields.ArrayField('swing_twists', add_label_text='Add SwingTwist', display_name=False)\n container = fields.ContainerField('swing_twist', parent=swingtwists, container_view=SwingTwistView())\n driver = fields.MayaNodeField('driver', help_text='The node to drive the swingtwist', parent=container)\n driven = fields.MayaNodeField('driven', help_text='The node to be driven', parent=container)\n name = fields.CharField('name',\n default='swingTwist#',\n help_text='The name of the created swingTwist node.',\n parent=container)\n twist = fields.FloatField('twist',\n default=1.0,\n help_text='The twist amount',\n min_value=-1.0,\n max_value=1.0,\n parent=container)\n swing = fields.FloatField('swing',\n default=1.0,\n help_text='The swing amount',\n min_value=-1.0,\n max_value=1.0,\n parent=container)\n twist_axis = fields.CharField('twist_axis',\n default='X',\n choices=['X', 'Y', 'Z'],\n help_text='The twist axis',\n parent=container)\n\n @classmethod\n def image_path(cls):\n return shortcuts.get_icon_path('swingTwist')\n\n def execute(self):\n cmds.loadPlugin('cmt_py', qt=True)\n for container in self.swingtwists:\n driver = container['driver'].value()\n driven = container['driven'].value()\n if not cmds.objExists(driver) or not cmds.objExists(driven):\n logger.warning('{0} or {1} does not exist.'.format(driver, driven))\n continue\n logger.info('Creating swingtwist on {0} from {1}'.format(driven, driver))\n name = container['name'].value()\n twist = container['twist'].value()\n swing = container['swing'].value()\n twist_axis = 'XYZ'.index(container['twist_axis'].value())\n cmds.swingTwist(driver, driven, name=name, twist=twist, swing=swing, twistAxis=twist_axis)\n\n\n","sub_path":"scripts/cmt/cqueue/components/rig/swingtwist.py","file_name":"swingtwist.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134937255","text":"'''Third version.\n\n * More robust error handling of invalid input.\n * Compound operations supported, as in:\n (+ (* 1 2) (- 3 4))\n'''\n\nimport operator\nimport traceback\nimport pprint\n\n\ndef my_sum(*args):\n '''Returns the sum of the supplied arguments'''\n return sum(arg for arg in args)\n\n\ndef my_prod(*args):\n '''Returns the product of the supplied arguments'''\n ans = 1\n for arg in args:\n ans *= arg\n return ans\n\n\nglobal_env = {\n '+': my_sum,\n '-': operator.sub,\n '*': my_prod,\n '/': operator.truediv,\n '//': operator.floordiv,\n 'quit': exit\n}\n\n\ndef evaluate(x):\n \"Evaluate an expression in the global environment.\"\n if isinstance(x, str): # variable reference\n return global_env[x]\n elif not isinstance(x, list): # constant literal\n return x\n else: # (\"procedure\" exp*)\n exps = [evaluate(exp) for exp in x]\n procedure = exps.pop(0)\n return procedure(*exps)\n\n\ndef parse(s):\n \"Parse a Lisp expression from a string.\"\n return convert_to_list(tokenize(s))\n\n\ndef convert_to_list(tokens):\n \"Converts a sequence of tokens into a list\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF while reading')\n token = tokens.pop(0)\n if '(' == token:\n lst = []\n while tokens[0] != ')':\n lst.append(convert_to_list(tokens))\n tokens.pop(0) # pop off ')'\n return lst\n elif ')' == token:\n raise SyntaxError('unexpected )')\n else:\n return atomize(token)\n\n\ndef atomize(token):\n \"Converts individual tokens to numbers if possible\"\n for conversion in [int, float]:\n try:\n return conversion(token)\n except ValueError:\n pass\n return token\n\n\ndef tokenize(s):\n \"Convert a string into a list of tokens.\"\n return s.replace(\"(\", \" ( \").replace(\")\", \" ) \").split()\n\n\ndef read_expression():\n '''Reads an expression from a prompt'''\n prompt = 'repl> '\n prompt2 = ' ... '\n inp = input(prompt)\n open_parens = inp.count(\"(\") - inp.count(\")\")\n while open_parens > 0:\n inp += ' ' + input(prompt2)\n open_parens = inp.count(\"(\") - inp.count(\")\")\n if inp.startswith(\"parse \"):\n pprint.pprint(parse(inp[6:]))\n return None\n return inp\n\n\ndef handle_error():\n \"\"\"\n Simple error handling.\n \"\"\"\n\n print(\"an error occurred. Here's the Python traceback:\\n\")\n traceback.print_exc()\n print()\n\n\ndef repl():\n \"A read-eval-print loop.\"\n while True:\n inp = read_expression()\n if not inp:\n continue\n try:\n val = evaluate(parse(inp))\n if val is not None:\n print(val)\n except (KeyboardInterrupt, SystemExit):\n print(\"\\nExiting petit_lisp\\n\")\n exit()\n except:\n handle_error()\n\nif __name__ == '__main__':\n repl()\n","sub_path":"petit_lisp/v3/petit_lisp.py","file_name":"petit_lisp.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47000333","text":"'''\n=====================================================================================\nGets user input\nDisplays game state\n-------------------------------------------------------------------------------------\n'''\n\n\n\nimport numpy as np\nimport pygame as pg\n\n\nfrom constants import *\nimport engine\n\n'''\nDraws the squares and pieces on the board\n'''\ndef draw_gs(screen, gs, row=None, col=None):\n\tdraw_board(screen) #Draw the squares\n\tdraw_selection(screen, row, col) #Draw the selected square\n\tdraw_pieces(screen, gs.board)\n\t#add in piece highlighting\n\t#add in move suggestions\n\n'''\n=====================================================================================\nDraw the squares on the board\n-------------------------------------------------------------------------------------\n'''\ndef draw_board(screen):\n\tcolours = [LIGHT_SQUARE, DARK_SQUARE]\n\tfor row in range(DIMENSION):\n\t\tfor col in range(DIMENSION):\n\t\t\t#Sets the colour for the square\n\t\t\tc = colours[(row+col)%2]\n\t\t\t#Places a square in the correct position\n\t\t\tsquare = pg.Rect(col*SQ_SIZE, row*SQ_SIZE, SQ_SIZE, SQ_SIZE)\n\t\t\t#Draw it on the screen\n\t\t\tpg.draw.rect(screen, c, square)\n\n'''\n=====================================================================================\nDraw selection square\n-------------------------------------------------------------------------------------\n'''\ndef draw_selection(screen, row, col):\n\tif row and col:\n\t\t#Places a square in the correct position\n\t\tsquare = pg.Rect(col*SQ_SIZE, row*SQ_SIZE, SQ_SIZE, SQ_SIZE)\n\t\t#Draw it on the screen\n\t\tpg.draw.rect(screen, RED_SQUARE, square)\n\n'''\n=====================================================================================\nDraw the pieces on the squares\n-------------------------------------------------------------------------------------\n'''\ndef draw_pieces(screen, board):\n\t\n\tfor row in range(DIMENSION):\n\t\tfor col in range(DIMENSION):\n\t\t\tpiece = board[row,col]\n\t\t\tif piece != '--':\n\t\t\t\t#Places a piece in the correct position\n\t\t\t\tp = pg.Rect(col*SQ_SIZE, row*SQ_SIZE, SQ_SIZE, SQ_SIZE)\n\t\t\t\tscreen.blit(IMAGES[piece], p)\n\n'''\n=====================================================================================\nAutomates importing images used for each piece\n-------------------------------------------------------------------------------------\n'''\ndef import_images():\n\t#Create dict to store all images with piece name\n\timgs = {}\n\tpieces = ['wp','wR','wN','wB','wQ','wK','bp','bR','bN','bB','bQ','bK']\n\tfor piece in pieces:\n\t\tfilename = 'img/{}.png'.format(piece)\n\t\timg = pg.image.load(filename).convert_alpha()\n\t\timg = pg.transform.smoothscale(img, (SQ_SIZE, SQ_SIZE))\n\t\timgs[piece] = img\n\treturn imgs\n\n'''\n=====================================================================================\nMain Function\n-------------------------------------------------------------------------------------\n'''\nif __name__ == '__main__':\n\t#Initialise the game\n\tpg.init()\n\t#Set screen size\n\tscreen = pg.display.set_mode((WIDTH, HEIGHT))\n\t#Update cycle\n\tclock = pg.time.Clock()\n\t#Set a background\n\tscreen.fill(pg.Color(*LIGHT_SQUARE))\n\n\t# Creates dict of images with naming scheme e.g. 'wK' = white King\n\t# Would have in constants.py but needs to be done after game initialised. \n\tglobal IMAGES\n\tIMAGES = import_images()\n\n\t# Generates a fresh board\n\tgs = engine.GameState()\n\t# Generate all the valid moves initially\n\tvalid_moves = gs.get_possible_moves()\n\t# valid_moves = gs.get_valid_moves()\n\t# Flag for tracking when move is made, stops valid_moves from being calculated constantly\n\tmove_made = False\n\t#Flag for keeping the game running\n\trunning = True\n\n\t# Keeps track of last square clicked on by user\n\tsq_selected = ()\n\t# Keeps track of previously selected squares\n\tprev_selected = []\n\twhile running:\n\t\t# If user quits, shut game down\n\t\tfor e in pg.event.get():\n\t\t\tif e.type == pg.QUIT:\n\t\t\t\trunning = False\n\n\t\t\telif e.type == pg.MOUSEBUTTONDOWN:\n\t\t\t\t# Retrieve x,y coords of mouse\n\t\t\t\tlocation = pg.mouse.get_pos()\n\t\t\t\tcol = location[0] // SQ_SIZE\n\t\t\t\trow = location[1] // SQ_SIZE\n\n\t\t\t\t# Deselect square if same as previously selected\n\t\t\t\tif sq_selected == (row, col):\n\t\t\t\t\tsq_selected = ()\n\t\t\t\t\tprev_selected = []\n\t\t\t\t# Otherwise select square\n\t\t\t\telse:\n\t\t\t\t\tsq_selected = (row, col)\n\t\t\t\t\tprev_selected.append(sq_selected)\n\n\n\t\t\t\t# If player selected a different square to the first \n\t\t\t\tif len(prev_selected) == 2:\n\t\t\t\t\tprint([x.moveID for x in valid_moves])\n\t\t\t\t\tmove = engine.Move(start=prev_selected[0], end=prev_selected[1], board=gs.board)\n\t\t\t\t\tprint(move.moveID)\n\t\t\t\t\tif move in valid_moves:\n\t\t\t\t\t\tgs.make_move(move)\n\t\t\t\t\t\tmove_made = True\n\t\t\t\t\t\tsq_selected = ()\n\t\t\t\t\t\tprev_selected = []\n\t\t\t\t\telse:\n\t\t\t\t\t\tprev_selected = [sq_selected]\n\n\t\t\t# If user wants to undo\n\t\t\telif e.type == pg.KEYDOWN:\n\t\t\t\tif e.key == pg.K_u:\n\t\t\t\t\t# Undo the move\n\t\t\t\t\tgs.undo_move()\n\t\t\t\t\t# Recalculate the valid moves\n\t\t\t\t\tmove_made = True\n\t\tif move_made:\n\t\t\tvalid_moves = gs.get_possible_moves()\n\t\t\t# valid_moves = gs.get_valid_moves()\n\t\t\tif gs.whitetomove:\n\t\t\t\tprint(\"White's turn\")\n\t\t\telse:\n\t\t\t\tprint(\"Black's turn\")\n\t\t\tmove_made = False\n\n\t\tif sq_selected:\n\t\t\tdraw_gs(screen, gs, row=row, col=col)\n\t\telse:\n\t\t\tdraw_gs(screen, gs)\n\n\t\tclock.tick(MAX_FPS)\n\t\tpg.display.flip()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86633828","text":"import sys\r\nsys.path.insert(0, './resnet')\r\nimport os\r\nimport tensorflow as tf\r\nimport numpy as np\r\n#from resnet152 import get_resnet\r\n# from convert2 import load_image\r\nfrom generator_v3 import combine_embeddings, generator_me\r\nfrom discriminator import discriminator\r\n#from rnn_module import rnn_module\r\nfrom get_data_v2 import load_data2, get_training_batch\r\nimport tensorflow.contrib.rnn as rnn_cell\r\n\r\n\r\n######## CONSTANTS #######\r\nloss_vals = []\r\nlossg_vals = []\r\nacc_vals = []\r\nepsilon = 1e-3\r\n\r\nbatch_size=50 \r\ndropout_rate=0.2\r\ndrop_out_rate=dropout_rate\r\n\r\ninput_embedding_size=200\r\nvocabulary_size=15881\r\nrnn_size=512\r\nrnn_layer=2\r\nmax_words_q=22\r\ndim_hidden=1024\r\nnum_output = 1000\r\n##########################\r\n\r\nsess = tf.InteractiveSession()\r\n\r\nsave_ver = '3.2.2'\r\n\r\n\r\nweights = np.load('weights_g_v2_nonoise.npy').item()\r\n\r\n\r\nwith tf.variable_scope(\"rnn_module1\"):\r\n #tf.get_variable_scope().reuse_variables()\r\n var_dict = {\r\n 'cemcnnfcW1': tf.Variable(weights['cemcnnfcW1'], name='cemcnnfcW1'),\r\n 'cemcnnfcb1': tf.Variable(weights['cemcnnfcb1'], name='cemcnnfcb1'),\r\n 'ceacnnfcW1': tf.Variable(weights['ceacnnfcW1'], name='ceacnnfcW1'),\r\n 'ceacnnfcb1': tf.Variable(weights['ceacnnfcb1'], name='ceacnnfcb1'),\r\n 'cemrnnfcW1': tf.Variable(weights['cemrnnfcW1'], name='cemrnnfcW1'),\r\n 'cemrnnfcb1': tf.Variable(weights['cemrnnfcb1'], name='cemrnnfcb1'),\r\n 'cearnnfcW1': tf.Variable(weights['cearnnfcW1'], name='cearnnfcW1'),\r\n 'cearnnfcb1': tf.Variable(weights['cearnnfcb1'], name='cearnnfcb1'),\r\n 'gfcW1': tf.Variable(tf.truncated_normal([4096, 4096]), name='gfcW1'),\r\n 'gfcb1': tf.Variable(tf.constant(0.1, shape=[4096]), name='gfcb1'),\r\n 'gfcW2': tf.Variable(tf.truncated_normal([4096, 4096]), name='gfcW2'),\r\n 'gfcb2': tf.Variable(tf.constant(0.1, shape=[4096]), name='gfcb2'),\r\n 'gfcW3': tf.Variable(tf.truncated_normal([4096, 2048]), name='gfcW3'),\r\n 'gfcb3': tf.Variable(tf.constant(0.1, shape=[2048]), name='gfcb3'),\r\n 'gfcW4': tf.Variable(tf.truncated_normal([2048, 1000]), name='gfcW4'),\r\n 'gfcb4': tf.Variable(tf.constant(0.1, shape=[1000]), name='gfcb4'),\r\n 'rnnqW': tf.Variable(weights['rnnqW'], name='embed_ques_W'),\r\n #'rnnsW': tf.Variable(weights['rnnsW'], name='embed_state_W'),\r\n #'rnnsb': tf.Variable(weights['rnnsb'], name='embed_state_b'),\r\n #'rnnoutbeta': tf.Variable(tf.zeros([2048])),\r\n #'rnnoutscale': tf.Variable(tf.ones([2048])),\r\n #'cnnoutbeta': tf.Variable(tf.zeros([2048])),\r\n #'cnnoutscale': tf.Variable(tf.ones([2048])),\r\n #'featbeta': tf.Variable(tf.zeros([4096])),\r\n #'featscale': tf.Variable(tf.ones([4096])),\r\n #'gbeta': tf.Variable(tf.zeros([1000])),\r\n #'gscale': tf.Variable(tf.ones([1000]))\r\n }\r\n\r\n # question-embedding\r\n #embed_ques_W = tf.Variable(tf.random_uniform([vocabulary_size, input_embedding_size], -0.08, 0.08), name='embed_ques_W')\r\n\r\n # encoder: RNN body\r\n lstm_1 = rnn_cell.LSTMCell(rnn_size, input_embedding_size, use_peepholes=True, state_is_tuple=False)\r\n lstm_dropout_1 = rnn_cell.DropoutWrapper(lstm_1, output_keep_prob = 1 - dropout_rate)\r\n lstm_2 = rnn_cell.LSTMCell(rnn_size, rnn_size, use_peepholes=True, state_is_tuple=False)\r\n lstm_dropout_2 = rnn_cell.DropoutWrapper(lstm_2, output_keep_prob = 1 - dropout_rate)\r\n stacked_lstm = rnn_cell.MultiRNNCell([lstm_dropout_1, lstm_dropout_2], state_is_tuple=False)\r\n\r\n\r\n image = tf.placeholder(tf.float32, [batch_size, 2048])\r\n question = tf.placeholder(tf.int32, [batch_size, max_words_q])\r\n answers_true = tf.placeholder(tf.float32, (batch_size, 1000))\r\n noise = tf.placeholder(tf.float32, [batch_size, 4096])\r\n\r\n answers_false = tf.placeholder(tf.float32, (None, 1000))\r\n image_false = tf.placeholder(tf.float32, (None, 2048))\r\n question_false = tf.placeholder(tf.int32, [batch_size, max_words_q])\r\n\r\n \r\n #state = tf.zeros([batch_size, stacked_lstm.state_size])\r\n state = stacked_lstm.zero_state(batch_size, tf.float32)\r\n loss = 0.0\r\n for i in range(max_words_q): \r\n if i==0:\r\n ques_emb_linear = tf.zeros([batch_size, input_embedding_size])\r\n else:\r\n tf.get_variable_scope().reuse_variables()\r\n ques_emb_linear = tf.nn.embedding_lookup(var_dict['rnnqW'], question[:,i-1])\r\n #ques_emb_linear = tf.gather(var_dict['rnnqW'], question[:,i-1])\r\n\r\n ques_emb_drop = tf.nn.dropout(ques_emb_linear, 1-drop_out_rate)\r\n ques_emb = tf.tanh(ques_emb_drop)\r\n\r\n output, state = stacked_lstm(ques_emb, state)\r\n\r\n\r\n cnn_mean, cnn_var = tf.nn.moments(image, [0])\r\n cnn_out_true_n = tf.nn.batch_normalization(image,cnn_mean,cnn_var,None,None,epsilon) #,var_dict['cnnoutbeta'],\r\n #var_dict['cnnoutscale'],epsilon)\r\n\r\n rnn_mean, rnn_var = tf.nn.moments(state, [0])\r\n rnn_out_true_n = tf.nn.batch_normalization(state,rnn_mean,rnn_var,None,None,epsilon) #var_dict['rnnoutbeta'],\r\n #var_dict['rnnoutscale'],epsilon)\r\n\r\n features = combine_embeddings(cnn_out_true_n, rnn_out_true_n, var_dict)\r\n #features = tf.concat([image,state], 1)\r\n #features = tf.concat([state,image], 1)\r\n\r\n #features2 = tf.concat([features,noise], 1)\r\n features2 = tf.add(features,noise)\r\n scores_emb = generator_me(features2, var_dict)\r\n\r\n #v1 = tf.Variable(tf.truncated_normal([2048,1000]))\r\n #v2 = tf.Variable(tf.constant(0.1, shape=[1000]))\r\n #scores_emb = tf.nn.relu_layer(state, v1, v2)\r\n\r\n\r\n saver = tf.train.Saver()\r\n\r\n saver.restore(sess, \"./train_g_v3.2.2\")\r\n\r\n\r\n #var_dict['gbeta'] = tf.Variable(tf.zeros([1000]), name='gbeta')\r\n #var_dict['gscale'] = tf.Variable(tf.ones([1000]), name='gscale')\r\n var_dict['dfcW1'] = tf.Variable(tf.truncated_normal([5096, 4096]), name='dfcW1')\r\n var_dict['dfcb1'] = tf.Variable(tf.constant(0.1, shape=[4096]), name='dfcb1')\r\n var_dict['dbeta1'] = tf.Variable(tf.zeros([4096]), name='dbeta1')\r\n var_dict['dscale1'] = tf.Variable(tf.ones([4096]), name='dscale1')\r\n var_dict['dfcW2'] = tf.Variable(tf.truncated_normal([4096, 2048]), name='dfcW2')\r\n var_dict['dfcb2'] = tf.Variable(tf.constant(0.1, shape=[2048]), name='dfcb2')\r\n var_dict['dbeta2'] = tf.Variable(tf.zeros([2048]), name='dbeta2')\r\n var_dict['dscale2'] = tf.Variable(tf.ones([2048]), name='dscale2')\r\n var_dict['dfcW3'] = tf.Variable(tf.truncated_normal([2048, 512]), name='dfcW3')\r\n var_dict['dfcb3'] = tf.Variable(tf.constant(0.1, shape=[512]), name='dfcb3')\r\n var_dict['dbeta3'] = tf.Variable(tf.zeros([512]), name='dbeta3')\r\n var_dict['dscale3'] = tf.Variable(tf.ones([512]), name='dscale3')\r\n var_dict['dfcW4'] = tf.Variable(tf.truncated_normal([512, 128]), name='dfcW4')\r\n var_dict['dfcb4'] = tf.Variable(tf.constant(0.1, shape=[128]), name='dfcb4')\r\n var_dict['dbeta4'] = tf.Variable(tf.zeros([128]), name='dbeta4')\r\n var_dict['dscale4'] = tf.Variable(tf.ones([128]), name='dscale4')\r\n var_dict['dfcW5'] = tf.Variable(tf.truncated_normal([128,32]), name='dfcW5')\r\n var_dict['dfcb5'] = tf.Variable(tf.constant(0.1, shape=[32]), name='dfcb5')\r\n var_dict['dbeta5'] = tf.Variable(tf.zeros([32]), name='dbeta5')\r\n var_dict['dscale5'] = tf.Variable(tf.ones([32]), name='dscale5')\r\n var_dict['dfcW6'] = tf.Variable(tf.truncated_normal([32, 1]), name='dfcW6')\r\n var_dict['dfcb6'] = tf.Variable(tf.constant(0.1, shape=[1]), name='dfcb6')\r\n\r\n\r\n g_mean, g_var = tf.nn.moments(scores_emb, [0])\r\n g_true_n = tf.nn.batch_normalization(scores_emb,g_mean,g_var,None,None,epsilon) #var_dict['gbeta'],var_dict['gscale'],epsilon)\r\n at_mean, at_var = tf.nn.moments(answers_true, [0])\r\n answers_true_n = tf.nn.batch_normalization(answers_true,at_mean,at_var,None,None,epsilon) #var_dict['gbeta'],var_dict['gscale'],epsilon)\r\n #answers_true_n = answers_true\r\n #af_mean, af_var = tf.nn.moments(answers_false, [0])\r\n #answers_false_n = tf.nn.batch_normalization(answers_false,af_mean,af_var,var_dict['gbeta'],var_dict['gscale'],epsilon)\r\n\r\n\r\n state2 = stacked_lstm.zero_state(batch_size, tf.float32)\r\n loss = 0.0\r\n for i in range(max_words_q): \r\n if i==0:\r\n ques_emb_linear2 = tf.zeros([batch_size, input_embedding_size])\r\n else:\r\n tf.get_variable_scope().reuse_variables()\r\n ques_emb_linear2 = tf.nn.embedding_lookup(var_dict['rnnqW'], question_false[:,i-1])\r\n #ques_emb_linear = tf.gather(var_dict['rnnqW'], question[:,i-1])\r\n\r\n ques_emb_drop2 = tf.nn.dropout(ques_emb_linear2, 1-drop_out_rate)\r\n ques_emb2 = tf.tanh(ques_emb_drop2)\r\n\r\n output, state2 = stacked_lstm(ques_emb2, state2)\r\n\r\n\r\n cnn_mean, cnn_var = tf.nn.moments(image_false, [0])\r\n cnn_out_true_n_f = tf.nn.batch_normalization(image_false,cnn_mean,cnn_var,None,None,epsilon) #var_dict['cnnoutbeta'],\r\n #var_dict['cnnoutscale'],epsilon)\r\n\r\n rnn_mean, rnn_var = tf.nn.moments(state2, [0])\r\n rnn_out_true_n_f = tf.nn.batch_normalization(state2,rnn_mean,rnn_var,None,None,epsilon) #var_dict['rnnoutbeta'],\r\n #var_dict['rnnoutscale'],epsilon)\r\n\r\n features_false = combine_embeddings(cnn_out_true_n_f, rnn_out_true_n_f, var_dict)\r\n\r\n\r\n # load discriminator network\r\n s_r, fc6, fc5n, fc4n = discriminator(features, answers_true_n, var_dict)\r\n s_w, fc6, fc5n, fc4n = discriminator(features_false, answers_true_n, var_dict)\r\n s_f, fc6, fc5n, fc4n = discriminator(features, g_true_n, var_dict) #g_true\r\n\r\n ones = tf.constant(1.0, shape=[50,1], dtype=tf.float32)\r\n loss = -tf.reduce_mean(tf.log(s_r) + tf.log(tf.subtract(ones,s_w) + 1e-5*ones)/2.0 + tf.log(tf.subtract(ones,s_f) + 1e-5*ones)/2.0)\r\n lossg = -tf.reduce_mean(tf.log(s_f))\r\n\r\n\r\n #v1 = tf.Variable(tf.truncated_normal([2048,1000]))\r\n #v2 = tf.Variable(tf.constant(0.1, shape=[1000]))\r\n #scores_emb = tf.nn.relu_layer(state, v1, v2)\r\n\r\n #loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=scores_emb, labels=answers_true))\r\n\r\n\r\n\r\ntvars = tf.trainable_variables()\r\nopt_g = tf.train.AdamOptimizer(learning_rate=1e-4)\r\nopt_d = tf.train.AdamOptimizer(learning_rate=1e-4)\r\n\r\nd_vars = []\r\n\r\ng_var_names = ['rnn_module1/cemcnnfcW1:0','rnn_module1/cemcnnfcb1:0',\r\n 'rnn_module1/ceacnnfcW1:0','rnn_module1/ceacnnfcb1:0','rnn_module1/cemrnnfcW1:0',\r\n 'rnn_module1/cemrnnfcb1:0','rnn_module1/cearnnfcW1:0','rnn_module1/cearnnfcb1:0',\r\n 'rnn_module1/gfcW1:0','rnn_module1/gfcb1:0','rnn_module1/gfcW2:0','rnn_module1/gfcb2:0',\r\n 'rnn_module1/gfcW3:0','rnn_module1/gfcb3:0','rnn_module1/gfcW4:0','rnn_module1/gfcb4:0',\r\n 'rnn_module1/embed_ques_W:0','rnn_module1/multi_rnn_cell/cell_0/lstm_cell/weights:0',\r\n 'rnn_module1/multi_rnn_cell/cell_0/lstm_cell/biases:0',\r\n 'rnn_module1/multi_rnn_cell/cell_0/lstm_cell/w_f_diag:0',\r\n 'rnn_module1/multi_rnn_cell/cell_0/lstm_cell/w_i_diag:0',\r\n 'rnn_module1/multi_rnn_cell/cell_0/lstm_cell/w_o_diag:0',\r\n 'rnn_module1/multi_rnn_cell/cell_1/lstm_cell/weights:0',\r\n 'rnn_module1/multi_rnn_cell/cell_1/lstm_cell/biases:0',\r\n 'rnn_module1/multi_rnn_cell/cell_1/lstm_cell/w_f_diag:0',\r\n 'rnn_module1/multi_rnn_cell/cell_1/lstm_cell/w_i_diag:0',\r\n 'rnn_module1/multi_rnn_cell/cell_1/lstm_cell/w_o_diag:0']\r\nd_var_names = ['rnn_module1/dfcW1:0','rnn_module1/dfcb1:0','rnn_module1/dbeta1:0',\r\n 'rnn_module1/dscale1:0','rnn_module1/dfcW2:0','rnn_module1/dfcb2:0',\r\n 'rnn_module1/dbeta2:0','rnn_module1/dscale2:0','rnn_module1/dfcW3:0',\r\n 'rnn_module1/dfcb3:0','rnn_module1/dbeta3:0','rnn_module1/dscale3:0',\r\n 'rnn_module1/dfcW4:0','rnn_module1/dfcb4:0','rnn_module1/dbeta4:0',\r\n 'rnn_module1/dscale4:0','rnn_module1/dfcW5:0','rnn_module1/dfcb5:0',\r\n 'rnn_module1/dbeta5:0','rnn_module1/dscale5:0','rnn_module1/dfcW6:0',\r\n 'rnn_module1/dfcb6:0']\r\n\r\n\r\nd_vars = []\r\ng_vars = []\r\n\r\nfor i in range(len(tvars)):\r\n if tvars[i].name in g_var_names:\r\n g_vars.append(tvars[i])\r\n print('g - ' + tvars[i].name)\r\n elif tvars[i].name in d_var_names:\r\n d_vars.append(tvars[i])\r\n print('d - ' + tvars[i].name)\r\n else:\r\n print('error')\r\nprint(d_vars)\r\nprint(g_vars)\r\n\r\n# # gradient clipping\r\n# gvs = opt.compute_gradients(loss,tvars)\r\n\r\n# #for i in range(len(tvars)):\r\n# # print(tvars[i].name)\r\n# clipped_gvs = [(tf.clip_by_value(grad, -10.0, 10.0), var) for grad, var in gvs]\r\n# train_op = opt.apply_gradients(clipped_gvs)\r\n\r\n# gradient clipping\r\ngvs_d = opt_d.compute_gradients(loss,d_vars)\r\n\r\n#for i in range(len(tvars)):\r\n# print(tvars[i].name)\r\nclipped_gvs_d = [(tf.clip_by_value(grad, -10.0, 10.0), var) for grad, var in gvs_d]\r\ntrain_op_d = opt_d.apply_gradients(clipped_gvs_d)\r\n\r\n\r\ngvs_g = opt_g.compute_gradients(lossg,g_vars)\r\n\r\n#for i in range(len(tvars)):\r\n# print(tvars[i].name)\r\nclipped_gvs_g = [(tf.clip_by_value(grad, -10.0, 10.0), var) for grad, var in gvs_g]\r\ntrain_op_g = opt_g.apply_gradients(clipped_gvs_g)\r\n\r\n\r\n\r\n# for i in range(len(tvars)):\r\n# print(tvars[i].name)\r\n\r\n\r\n#train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)\r\n\r\n# init variables\r\n#sess.run(tf.global_variables_initializer())\r\n\r\n# init2 = tf.variables_initializer([var_dict['gbeta'],var_dict['gscale'],var_dict['dfcW1'],\r\n# var_dict['dfcb1'],var_dict['dbeta1'],var_dict['dscale1'],var_dict['dfcW2'],\r\n# var_dict['dfcb2'],var_dict['dbeta2'],var_dict['dscale2'],var_dict['dfcW3'],\r\n# var_dict['dfcb3'],var_dict['dbeta3'],var_dict['dscale3'],var_dict['dfcW4'],\r\n# var_dict['dfcb4'],var_dict['dbeta4'],var_dict['dscale4'],var_dict['dfcW5'],\r\n# var_dict['dfcb5'],var_dict['dbeta5'],var_dict['dscale5'],var_dict['dfcW6'],\r\n# var_dict['dfcb6']])\r\n# sess.run(init2)\r\n\r\n\r\nuninitialized_vars = []\r\nfor var in tf.all_variables():\r\n try:\r\n sess.run(var)\r\n except tf.errors.FailedPreconditionError:\r\n uninitialized_vars.append(var)\r\n\r\ninit_new_vars_op = tf.initialize_variables(uninitialized_vars)\r\nsess.run(init_new_vars_op)\r\n\r\nsaver = tf.train.Saver(var_list=tvars)\r\n\r\n#saver.restore(sess, \"./train_d_v3.2.1\")\r\n\r\n\r\n\r\nprint('new version8')\r\nprint('answers 1000....')\r\nprint('loading data...\\n\\n')\r\nqa_data = load_data2()\r\nprint('done loading data...\\n\\n')\r\nbatch_size = 50\r\n#while batch_no*batch_size < len(qa_data['training']):\r\nfor train_loops in range(10):\r\n batch_no = 0\r\n while batch_no*batch_size < len(qa_data['training']) - batch_size:\r\n print('batch = ' + str(batch_no))\r\n (questions_in_true, answer_in_true, im_feat_true) = get_training_batch(batch_no, batch_size, qa_data)\r\n\r\n if batch_no*batch_size < len(qa_data['training']) - 1:\r\n (questions_in_false, answer_in_false, im_feat_false) = get_training_batch(batch_no+1, batch_size, qa_data)\r\n else:\r\n (questions_in_false, answer_in_false, im_feat_false) = get_training_batch(0, batch_size, qa_data)\r\n\r\n\r\n noise_in = np.random.normal(scale=0.3, size=[batch_size,4096])\r\n\r\n\r\n _, loss_val = sess.run([train_op_d, loss], feed_dict={\r\n noise: noise_in,\r\n answers_true: answer_in_true,\r\n answers_false: answer_in_false,\r\n image_false: im_feat_false,\r\n question_false: questions_in_false,\r\n image: im_feat_true,\r\n question: questions_in_true,\r\n })\r\n\r\n _, lossg_val, g_out = sess.run([train_op_g, lossg, scores_emb], feed_dict={\r\n noise: noise_in,\r\n answers_true: answer_in_true,\r\n answers_false: answer_in_false,\r\n image_false: im_feat_false,\r\n question_false: questions_in_false,\r\n image: im_feat_true,\r\n question: questions_in_true,\r\n })\r\n\r\n print('loss = ' + str(loss_val))\r\n print('loss g = ' + str(lossg_val))\r\n loss_vals.append(loss_val)\r\n lossg_vals.append(lossg_val)\r\n np.save('loss_vals_d_simple_v' + save_ver, loss_vals)\r\n np.save('lossg_vals_d_simple_v' + save_ver, lossg_vals)\r\n\r\n\r\n answers_out = np.argmax(g_out, axis=1)\r\n answers_idx_true = np.argmax(answer_in_true, axis=1)\r\n error = float(np.sum(answers_out == answers_idx_true)) / float(batch_size)\r\n acc_vals.append(error)\r\n np.save('acc_vals_d_v' + save_ver, acc_vals)\r\n print('error = ' + str(error))\r\n\r\n if batch_no % 25 == 0:\r\n #weights_save = {}\r\n #for key in var_dict:\r\n # weights_save[key] = var_dict[key].eval()\r\n #weights_save['batch_no'] = batch_no\r\n #np.save('weights_simple_v' + save_ver, weights_save)\r\n saver.save(sess, 'train_d_v' + save_ver)\r\n\r\n batch_no += 1\r\n\r\n ","sub_path":"train_d_v3.2.py","file_name":"train_d_v3.2.py","file_ext":"py","file_size_in_byte":16351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309514626","text":"from paste.deploy.converters import asbool\r\nfrom paste.script.templates import Template, var\r\n\r\nfrom tempita import paste_script_template_renderer\r\n\r\nclass PySUITTemplate(Template):\r\n \"\"\"Based on ``pylons.util.PylonsTemplate``.\"\"\"\r\n _template_dir = 'templates/suit'\r\n summary = 'Pylons default_project with SUIT as the templating engine.'\r\n template_renderer = staticmethod(paste_script_template_renderer)\r\n vars = [\r\n var('sqlalchemy', 'True/False: Include SQLAlchemy 0.5 configuration',\r\n default=False)\r\n ]\r\n ensure_names = ['description', 'author', 'author_email', 'url']\r\n\r\n def pre(self, command, output_dir, vars):\r\n \"\"\"Called before template is applied.\"\"\"\r\n package_logger = vars['package']\r\n if package_logger == 'root':\r\n # Rename the app logger in the rare case a project is named 'root'\r\n package_logger = 'app'\r\n vars['package_logger'] = package_logger\r\n vars['babel_templates_extractor'] = '' # Not yet\r\n # Ensure these exist in the namespace\r\n for name in self.ensure_names:\r\n vars.setdefault(name, '')\r\n\r\n vars['version'] = vars.get('version', '0.1')\r\n vars['zip_safe'] = asbool(vars.get('zip_safe', 'false'))\r\n vars['sqlalchemy'] = asbool(vars.get('sqlalchemy', 'false'))","sub_path":"suit-pylons/trunk/suit_pylons/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636237261","text":"\nfrom keras.layers import Dropout, Activation, BatchNormalization, Input\nfrom keras.layers import Conv2D, MaxPooling2D, UpSampling2D, SeparableConv2D\nfrom keras.layers.merge import Concatenate, Add\nimport keras.backend as K\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nimport keras.callbacks\n\n\n\ndef conv_block2(n_filter, n1, n2,\n activation=\"relu\",\n border_mode=\"same\",\n dropout=0.0,\n batch_norm=True,\n init=\"glorot_uniform\",\n ):\n def _func(lay):\n s = Conv2D(n_filter, (n1, n2), padding=border_mode, kernel_initializer=init)(lay)\n if batch_norm:\n s = BatchNormalization()(s)\n s = Activation(activation)(s)\n if dropout > 0:\n s = Dropout(dropout)(s)\n return s\n\n return _func\n\n\n\ndef unet_block(n_depth=2, n_filter_base=16, n_row=3, n_col=3, n_conv_per_depth=2,\n activation=\"relu\",\n batch_norm=True,\n dropout=0.0,\n last_activation=None):\n \"\"\"\"\"\"\n\n if last_activation is None:\n last_activation = activation\n\n if K.image_dim_ordering() == \"tf\":\n channel_axis = -1\n else:\n channel_axis = 1\n\n\n def _func(input):\n skip_layers = []\n layer = input\n\n # down ...\n for n in range(n_depth):\n for i in range(n_conv_per_depth):\n layer = conv_block2(n_filter_base * 2 ** n, n_row, n_col,\n dropout=dropout,\n activation=activation,\n batch_norm=batch_norm)(layer)\n skip_layers.append(layer)\n layer = MaxPooling2D((2, 2))(layer)\n\n\n # middle\n for i in range(n_conv_per_depth - 1):\n layer = conv_block2(n_filter_base * 2 ** n_depth, n_row, n_col,\n dropout=dropout,\n activation=activation,\n batch_norm=batch_norm)(layer)\n\n layer = conv_block2(n_filter_base * 2 ** (n_depth - 1), n_row, n_col,\n dropout=dropout,\n activation=activation,\n batch_norm=batch_norm)(layer)\n\n # ...and up with skip layers\n for n in reversed(range(n_depth)):\n layer = Concatenate(axis = channel_axis)([UpSampling2D((2, 2))(layer), skip_layers[n]])\n for i in range(n_conv_per_depth - 1):\n layer = conv_block2(n_filter_base * 2 ** n, n_row, n_col,\n dropout=dropout,\n activation=activation,\n batch_norm=batch_norm)(layer)\n\n layer = conv_block2(n_filter_base * 2 ** max(0, n - 1), n_row, n_col,\n dropout=dropout,\n activation=activation if n > 0 else last_activation,\n batch_norm=batch_norm)(layer)\n\n return layer\n\n return _func\n\n\n# def my_binary_crossentropy(weights =(1., 1.)):\n# def _func(y_true, y_pred):\n# return -(weights[0] * K.mean((1-y_true)*K.log((1-y_pred)+K.epsilon())) +\n# weights[1] * K.mean(y_true*K.log(y_pred+K.epsilon())))\n# return _func\n\n\ndef my_binary_crossentropy(weights =(1., 1.)):\n def _func(y_true, y_pred):\n return -(weights[0] * K.mean((1-y_true)*K.log((1-y_pred)+K.epsilon())) +\n weights[1] * K.mean(y_true*K.log(y_pred+K.epsilon())))\n return _func\n\ndef my_binary_crossentropy_mod(weights =(1., 1.)):\n def _func(y_true, y_pred):\n return -(weights[0] * K.mean(K.cast(K.greater(0.25, y_true),dtype='float32')*K.log((1-y_pred)+K.epsilon())) +\n weights[1] * K.mean(K.cast(K.greater(y_true,0.25),dtype='float32')*K.log(y_pred +K.epsilon())))\n return _func\n\ndef build_model_unet(input_shape, dropout=.2):\n input = Input(input_shape)\n\n unet = unet_block(2, 8, 3, 3, activation=\"relu\")(input)\n final = Conv2D(1, (1, 1), activation='sigmoid')(unet)\n\n model = Model(inputs=input, outputs=final)\n\n return model\n\ndef acc1(y_true, y_pred):\n \n \n nom = K.mean(K.cast(K.cast(K.equal(K.round(y_pred),y_true), dtype='float32')*K.cast(K.equal(y_true,1),dtype='float32'),dtype='float32'))\n denom = K.mean(y_true)\n# nom = K.cast(nom, dtype='float32')\n# denom = K.cast(denom, dtype='float32')\n return nom/denom\n# return K.shape(y_true)[0]\n\n\n\ndef acc0(y_true, y_pred):\n \n nom = K.mean(K.cast(K.cast(K.equal(K.round(y_pred),y_true), dtype='float32')*K.cast(K.equal(y_true,0),dtype='float32'),dtype='float32'))\n denom = K.mean(K.cast(K.equal(y_true,0),dtype='float32'))\n# nom = K.cast(nom, dtype='float32')\n# denom = K.cast(denom, dtype='float32')\n return nom/denom\n\ndef acc1_mod(y_true, y_pred):\n \n \n nom = K.mean(K.cast(K.cast(K.greater(0.05,K.abs(y_pred-y_true)), dtype='float32')*K.cast(K.greater(y_true,0.2),dtype='float32'),dtype='float32'))\n denom = K.mean(K.cast(K.greater(y_true,0.2),dtype='float32'))\n# nom = K.cast(nom, dtype='float32')\n# denom = K.cast(denom, dtype='float32')\n return nom/denom\n# return K.shape(y_true)[0]\n\n\n\ndef acc0_mod(y_true, y_pred):\n \n nom = K.mean(K.cast(K.cast(K.equal(K.round(y_pred),y_true), dtype='float32')*K.cast(K.equal(y_true,0),dtype='float32'),dtype='float32'))\n denom = K.mean(K.cast(K.equal(y_true,0),dtype='float32'))\n# nom = K.cast(nom, dtype='float32')\n# denom = K.cast(denom, dtype='float32')\n return nom/denom","sub_path":"UNet/externalmodels/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257776618","text":"# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nPython Package for controlling Tesla API.\n\nFor more details about this api, please refer to the documentation at\nhttps://github.com/zabuldon/teslajsonpy\n\"\"\"\nimport time\n\nfrom teslajsonpy.vehicle import VehicleDevice\n\n\nclass SentrySwitch(VehicleDevice):\n \"\"\"Home-Assistant class for the sentry mode of a Tesla VehicleDevice.\"\"\"\n\n def __init__(self, data, controller):\n \"\"\"Initialize the Sentry Mode Switch.\n\n Parameters\n ----------\n data : dict\n The base state for a Tesla vehicle.\n https://tesla-api.timdorr.com/vehicle/commands/sentrymode\n controller : teslajsonpy.Controller\n The controller that controls updates to the Tesla API.\n\n Returns\n -------\n None\n\n \"\"\"\n super().__init__(data, controller)\n self.__manual_update_time = 0\n self.__sentry_state = False\n self.type = \"sentry switch\"\n self.hass_type = \"switch\"\n self.name = self._name()\n self.uniq_name = self._uniq_name()\n\n async def async_update(self):\n \"\"\"Update the vehicle state of the Tesla Vehicle.\"\"\"\n await super().async_update()\n last_update = self._controller.get_last_update_time(self._id)\n if last_update >= self.__manual_update_time:\n data = self._controller.get_state_params(self._id)\n self.__sentry_state = (data and data[\"sentry_mode\"])\n\n async def enable_sentry(self):\n \"\"\"Enable the Tesla Vehicle Sentry Mode.\"\"\"\n if not self.__sentry_state:\n data = await self._controller.command(\n self._id, \n \"set_sentry_mode\", \n {\"on\": True},\n wake_if_asleep=True\n )\n if data and data[\"response\"][\"result\"]:\n self.__sentry_state = True\n self.__manual_update_time = time.time()\n\n async def disable_sentry(self):\n \"\"\"Disable the Tesla Vehicle Sentry Mode.\"\"\"\n if self.__sentry_state:\n data = await self._controller.command(\n self._id, \n \"set_sentry_mode\", \n {\"on\": False},\n wake_if_asleep=True\n )\n if data and data[\"response\"][\"result\"]:\n self.__sentry_state = False\n self.__manual_update_time = time.time()\n\n def get_value(self):\n \"\"\"Return whether the Tesla Sentry Mode is enabled.\"\"\"\n return self.__sentry_state\n\n @staticmethod\n def has_battery():\n \"\"\"Return whether the Tesla charger has a battery.\"\"\"\n return False\n","sub_path":"teslajsonpy/sentry.py","file_name":"sentry.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412164600","text":"from artwork.models import Artwork\nfrom django import forms\nfrom models import FearUser\nfrom profiles.models import ExpertiseItem, UserExpertise, BackgroundSetting, BG_CHOICES\nfrom django.forms import ModelForm\nfrom profiles.models import ExpertiseItem, LinkItem, UserExpertise\n\n\nclass UploadProfilePictureForm(ModelForm):\n\n profile_picture = forms.ImageField(label='Select an Image')\n\n class Meta:\n model = FearUser\n fields = ['profile_picture']\n\n\nclass UpdateAboutForm(ModelForm):\n about = forms.CharField(widget = forms.Textarea, label=\"\", required = False)\n\n class Meta:\n model = FearUser\n fields = ['about']\n\n\nclass EditExpertiseForm(forms.ModelForm):\n \"\"\"\n Add/Edit Expertise form from profile view\n \"\"\"\n expertise_items = forms.ModelMultipleChoiceField(queryset=ExpertiseItem.objects.all(), label=\"\", required=False,\n widget=forms.CheckboxSelectMultiple)\n\n class Meta:\n model = UserExpertise\n exclude = ['owner']\n\n\nclass EditLinkItemForm(forms.ModelForm):\n \"\"\"\n Add/Edit Link form from profile view\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(EditLinkItemForm, self).__init__(*args, **kwargs)\n for key, field in self.fields.iteritems():\n self.fields[key].required = False\n\n class Meta:\n model = LinkItem\n exclude = ['owner']\n\nclass UpdateBasicInfoForm(ModelForm):\n\n \"\"\"\n Add/Edit Basic Info from profile view\n \"\"\"\n class Meta:\n model=FearUser\n fields=['first_name','last_name','city']\n\n\nclass UpdateBackgroundSettingsForm(ModelForm):\n \"\"\"\n Add/Edit Background from profile view\n \"\"\"\n\n background_type = forms.ChoiceField(choices=BG_CHOICES, widget=forms.RadioSelect)\n\n class Meta:\n model=BackgroundSetting\n fields=['background_type','profile_background_image','profile_background_color']\n\n\n","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"646764129","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom time import clock\n\n'Python to write faster function!'\n\n__author__ = 'JoshuaMK1992'\n\nexec_times = 10000\norignal_str = 'python faster method test : slowest_replace slow_replace fast_replace fastest_replace'\n\ndef main():\n _time_analyze_(slowest_replace)\n _time_analyze_(slow_replace)\n _time_analyze_(fast_replace)\n _time_analyze_(fastest_replace)\n\n with Timer(True) as tm:\n for i in range(exec_times):\n slowest_replace()\n with Timer(True) as tm:\n for i in range(exec_times):\n slow_replace()\n with Timer(True) as tm:\n for i in range(exec_times):\n fast_replace()\n with Timer(True) as tm:\n for i in range(exec_times):\n fastest_replace()\n\n\ndef slowest_replace():\n replace_list = []\n for i, char in enumerate(orignal_str):\n c = char if char != \" \" else \"-\"\n replace_list.append(c)\n return \"\".join(replace_list)\n\ndef slow_replace():\n replace_str = \"\"\n for i, char in enumerate(orignal_str):\n c = char if char != \" \" else \"-\"\n replace_str += c\n return replace_str\n\ndef fast_replace():\n return \"-\".join(orignal_str.split())\n\ndef fastest_replace():\n return orignal_str.replace(\" \", \"-\")\n\ndef _time_analyze_(func):\n start = clock()\n for i in range(exec_times):\n func()\n finish = clock()\n print('{:<20}{:10.6} s'.format(func.__name__ + \":\", finish - start))\n\nclass Timer(object):\n def __init__(self, verbose = False):\n self.verbose = verbose\n\n def __enter__(self):\n self.start = clock()\n return self\n\n def __exit__(self, *args):\n self.end = clock()\n self.secs = self.end - self.start\n self.msecs = self.secs * 1000 # millisecs\n if self.verbose:\n print('elapsed time: {0} ms'.format(self.msecs))\n\n# testing code\nif __name__ == '__main__':\n main()\n","sub_path":"learn-python2.7/language basis/faster_method_test.py","file_name":"faster_method_test.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439165544","text":"# Fix paths for imports to work in unit tests ----------------\n\nif __name__ == \"__main__\":\n \n from _fix_paths import fix_paths\n fix_paths()\n\n# ------------------------------------------------------------\n\n# Load libraries ---------------------------------------------\n\nfrom typing import Dict\n\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom ssa_sim_v2.simulator.modules.simulator_module import SimulatorModule\n\n# ------------------------------------------------------------\n\n\nclass MultiStateSimulatorModule(object):\n \"\"\"\n Base class for all multi state simulator modules.\n\n :ivar int seed: Seed for the random number generator.\n :ivar np.random.RandomState rng: Random number generator.\n :ivar pd.DataFrame priors: DataFrame with columns for the state\n (e.g. date, hour_of_week) and the prior column. The last column\n defines priors (in the form of dictionaries) for every state.\n :ivar pd.DataFrame base_classes: DataFrame with three columns: date, hour_of_week,\n base_class. The last column defines class for for given dates and hours of week.\n :ivar Dict[str, SimulatorModule] models: Dictionary of single state modules for every valid pair\n of date and hour of week.\n \"\"\"\n\n Params = namedtuple('Params', [])\n\n def __init__(self, priors=None, base_classes=None, seed=9):\n \"\"\"\n :param pd.DataFrame priors: DataFrame with columns for the state\n (e.g. date, hour_of_week) and the prior column. The last column\n defines priors (in the form of dictionaries) for every state.\n :param pd.DataFrame base_classes: DataFrame with three columns: date, hour_of_week,\n base_class. The last column defines class for given dates and hours of week.\n :param int seed: Seed for the random number generator.\n \"\"\"\n\n self.seed = seed\n self.rng = np.random.RandomState(seed)\n self.priors = priors\n self.base_classes = base_classes\n\n seed_min = 100000\n seed_max = 999999\n seeds = self.rng.randint(low=seed_min, high=seed_max, size=len(self.priors))\n\n base_df = priors.copy()\n base_df.loc[:, \"base_class\"] = base_classes[\"base_class\"]\n base_df.loc[:, \"seed\"] = seeds\n\n self.models = {} # type: Dict[str, SimulatorModule]\n\n for index, row in base_df.iterrows():\n self.models[\"{}.{}\".format(row[\"date\"], row[\"hour_of_week\"])] = row[\"base_class\"](row[\"prior\"], row[\"seed\"])\n\n def get_models(self):\n \"\"\"\n Returns a dictionary of underlying click probability models.\n\n :return: Dictionary of models.\n :rtype: Dict[str, SimulatorModule]\n \"\"\"\n return self.models\n","sub_path":"ssa_sim_v2/simulator/modules/multi_state_simulator_module.py","file_name":"multi_state_simulator_module.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348546831","text":"\nimport tensorflow as tf\nimport numpy as np\nimport time\n\nif __name__ != '__main__':\n from styx_msgs.msg import TrafficLight\n\n#===================================================================================\n# Utility Functions\n\n#-------------------------------------------------------------------------\ndef load_graph(graph_file):\n \"\"\"Loads a frozen inference graph\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph\n\n#-------------------------------------------------------------------------\ndef filter_boxes(min_score, boxes, scores, classes):\n \"\"\"Return boxes with a confidence >= `min_score`\"\"\"\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes\n\n#-------------------------------------------------------------------------\ndef convert_to_image_coordinates(boxes, height, width):\n \"\"\"\n The original box coordinate output is normalized, i.e [0, 1].\n \n This converts it back to the original coordinate based on the image\n size.\n \"\"\"\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * height\n box_coords[:, 1] = boxes[:, 1] * width\n box_coords[:, 2] = boxes[:, 2] * height\n box_coords[:, 3] = boxes[:, 3] * width\n \n return box_coords\n\n#-------------------------------------------------------------------------\nfrom PIL import ImageDraw\nfrom PIL import ImageColor\n# Colors (one for each class)\ncmap = ImageColor.colormap\n#print(\"Number of colors =\", len(cmap))\nCOLOR_LIST = sorted([c for c in cmap.keys()])\n\ndef draw_boxes(image, boxes, classes, thickness=4):\n \"\"\"Draw bounding boxes on the image\"\"\"\n draw = ImageDraw.Draw(image)\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i])\n color = COLOR_LIST[class_id]\n draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color)\n\n#-------------------------------------------------------------------------\ndef convert_enum_traffic_light_color(classes, detected_colour):\n pass\n \n\n#===================================================================================\nclass TLClassifier(object):\n def __init__(self, boIsContextRealCar, boDebugMode, ConfidenceThreshold):\n \n self.graph = tf.Graph()\n self.detection_confidence = ConfidenceThreshold\n self.is_context_real_car = boIsContextRealCar\n self.is_debug_mode = boDebugMode\n \n #TODO load classifier\n if (self.is_context_real_car==False):\n GraphFilePath_SSD = 'light_classification/model_udsim/ssd_inception_v2_inference_graph.pb'\n ##GraphFilePath_SSD = \"models/frozen_ssd_sim_20190114_10K_Steps/frozen_inference_graph.pb\"'\n ##GraphFilePath_SSD = 'light_classification/model_udsim/frozen_inference_graph.pb'\n self.detection_graph = load_graph(GraphFilePath_SSD)\n\n # The input placeholder for the image.\n # `get_tensor_by_name` returns the Tensor with the associated name in the Graph.\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n\n # Each box represents a part of the image where a particular object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n\n # The classification of the object (integer id).\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n\n self.sess = tf.Session(graph=self.detection_graph)\n else:\n pass\n \n#-----------------------------------------------------------------------------------\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #TODO implement light color prediction\n \n with self.detection_graph.as_default():\n if(self.is_debug_mode == True):\n ### For Test Only\n from PIL import Image\n image_sample = Image.open(image)\n image_np = np.expand_dims(np.asarray(image_sample, dtype=np.uint8), 0)\n else:\n image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)\n\n # Actual detection.\n time_start = time.time()\n (boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes], \\\n feed_dict={self.image_tensor: image_np})\n time_end = time.time()\n detection_time = (time_end - time_start)##*1000\n \n # Remove unnecessary dimensions\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes)##.astype(np.int32)\n\n print()\n print(\"Detection Time(sec):{0:f} | Score:{1:f} | Class:{2:f}\".format(detection_time, scores[0], classes[0]) )\n ##print(\"Detection Time(sec):\", detection_time)\n ##print(\"Score:\", scores)\n ##print(\"Class\", classes)\n \n if(self.is_debug_mode == True):\n\n # Filter boxes with a confidence score less than `confidence_cutoff`\n boxes, scores, classes = filter_boxes(self.detection_confidence, boxes, scores, classes)\n\n # The current box coordinates are normalized to a range between 0 and 1.\n # This converts the coordinates actual location on the image.\n import matplotlib.pyplot as plt\n import matplotlib.image as mpimg\n width, height = image_sample.size\n box_coords = convert_to_image_coordinates(boxes, height, width)\n\n # Each class with be represented by a differently colored box\n draw_boxes(image_sample, box_coords, classes)\n mpimg.imsave(\"./processed_image/processed_\"+image+\".png\", image_sample, format='png')\n ##plt.style.use('ggplot')\n ##plt.figure(figsize=(12, 8))\n ##plt.imshow(image_sample)\n\n classes = classes.astype(np.int32)\n \n detected_colour = 999\n \n if (scores[0] > self.detection_confidence):\n if (classes[0] == 2):\n print('Red')\n if __name__ != '__main__': detected_colour = TrafficLight.RED\n elif (classes[0] == 3):\n print('Yellow')\n if __name__ != '__main__': detected_colour = TrafficLight.YELLOW\n elif (classes[0] == 1):\n print('Green')\n if __name__ != '__main__': detected_colour = TrafficLight.GREEN\n else:\n print('No Traffic Light')\n if __name__ != '__main__': detected_colour = TrafficLight.UNKNOWN\n else:\n print('No Traffic Light')\n if __name__ != '__main__': detected_colour = TrafficLight.UNKNOWN\n \n return detected_colour\n \n#===================================================================================\nif __name__ == '__main__':\n\n light_classifier = TLClassifier(boIsContextRealCar = False, \n boDebugMode = True, \n ConfidenceThreshold = 0.5)\n \n light_classifier.get_classification(\"green1.jpg\")\n light_classifier.get_classification(\"green2.jpg\")\n light_classifier.get_classification(\"red1.jpg\")\n light_classifier.get_classification(\"red2.jpg\")\n light_classifier.get_classification(\"yellow1.jpg\")\n light_classifier.get_classification(\"yellow2.jpg\")","sub_path":"ros_backup/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":8615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212907153","text":"import torch\nimport numpy as np\nfrom random import shuffle\nfrom utils.integral_validation import Sampler, evaluate_integral\n\n\nclass VegasSampler(Sampler):\n def __init__(self, integrator, integrand, train=True, n_survey_steps=10, n_batch=100000):\n \"\"\"\n\n Parameters\n ----------\n integrator\n train\n n_survey_steps\n n_batch\n \"\"\"\n\n self.integrator = integrator\n self.point_iterator = None\n self.actual_n_batch = 0\n self.integrand = integrand\n\n if train:\n self.train_integrator(n_survey_steps, n_batch)\n\n def reset_point_iterator(self):\n self.actual_n_batch = len(list(self.integrator.random()))\n self.point_iterator = self.integrator.random()\n\n def get_point(self):\n \"\"\"Sample a single point from the vegas integrator with point pdfs normalized to 1.\n\n Yields\n -------\n x, px: tuple of float\n point and its pdf\n \"\"\"\n if self.point_iterator is None:\n self.reset_point_iterator()\n try:\n x, wx = next(self.point_iterator)\n except StopIteration:\n self.reset_point_iterator()\n x, wx = next(self.point_iterator)\n # x is originally a view: map it to an array\n # furthermore the C backend of vegas.Integrator.random\n # reuses the same location in memory to store points: we need to copy\n x = np.asarray(x).copy()\n wx = float(np.asarray(wx))*self.actual_n_batch\n return x, 1 / wx\n\n def train_integrator(self, n_survey_steps, n_batch):\n \"\"\"Train the integrator before sampling\n\n Parameters\n ----------\n n_survey_steps: int\n if train is `True`, how many survey steps to use for training\n n_batch: int\n maximum number of function evaluations per survey step\n \"\"\"\n self.integrator(self.integrand, nitn=n_survey_steps, neval=n_batch)\n # integrating changes how points are sampled, the iterator should be reset\n self.reset_point_iterator()\n\n def sample(self, f, n_batch=10000, *args, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n f: batch callable\n function mapping numpy arrays to numpy arrays\n n_batch\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n xs = []\n pxs = []\n while len(xs) <= n_batch:\n xi, pxi = self.get_point()\n xs.append(xi)\n pxs.append(pxi)\n x = np.array(xs)\n px = torch.tensor(pxs)\n fx = f(x)\n x = torch.tensor(x)\n\n return x, px, fx\n\n\ndef evaluate_integral_vegas(f, integrator, n_batch=10000, train=True, n_survey_steps=10, n_batch_survey=10000):\n \"\"\"Validate a known integral using a VEGAS integrator as a sampler\n\n Parameters\n ----------\n f: utils.integrands.KnownIntegrand\n integrator: zunis.integration.adaptive_survey_integrator.AdaptiveSurveyIntegrator\n n_batch: int\n train: bool\n whether to train the integrator using `integrator.survey`\n n_survey_steps: int\n positional `integrator.survey` argument\n n_batch_survey: int\n\n\n Returns\n -------\n utils.record.EvaluationRecord\n \"\"\"\n sampler = VegasSampler(integrator, f, train=train, n_survey_steps=n_survey_steps, n_batch=n_batch_survey)\n sampler.reset_point_iterator()\n\n return evaluate_integral(f, sampler, n_batch)\n","sub_path":"experiments/benchmarks/utils/vegas_integrals.py","file_name":"vegas_integrals.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556358054","text":"BreakFast = [\"卤肉卷\",\"鸡蛋饼\",\"肉夹馍\",\"小笼包\",\"鸡蛋灌饼\"]\nLunch = [\"天府快餐\",\"石锅拌饭\",\"六元套餐\",\"生煎包\",\"盖浇饭\",\"黄焖鸡\",\"锡纸烧\"]\nDinner = [\"天府快餐\",\"重庆小面\",\"盖浇饭\",\"酸菜鱼\",\"石锅拌饭\",\n \"老潼关肉夹馍\",\"黄焖鸡\",\"锡纸烧\"]\nRich = [\"杨国福麻辣烫\",\"张亮麻辣烫\",\"冒菜\",\"小火锅\",\"小四川\"]\nSnack = [\"鸡排\",\"烤冷面\",\"煎饼\"]\n\n\n\n\ndata = {\n \"早餐\": BreakFast,\n \"中餐\": Lunch,\n \"晚餐\": Dinner,\n \"富贵饮食\": Rich,\n \"小吃\": Snack\n }\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560907451","text":"import csv\r\nimport __main__ as Simulator\r\n\r\ndef load_gps_paths():\r\n with open('gps_path.txt', 'r') as gpsfile:\r\n p = csv.reader(gpsfile, delimiter='\\t')\r\n for row in p:\r\n if len(row) >= 3:\r\n r = [float(row[0]), float(row[1]), float(row[2])] \r\n Simulator.gps_paths.append(r)\r\n Simulator.L.info(\"GPS paths: %d rows\" % len(Simulator.gps_paths))\r\n\r\n","sub_path":"workload/dockers/IoT/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576331184","text":"import numpy as np\nimport tensorflow as tf\nimport time\nfrom scipy.misc import logsumexp\nfrom network.network import construct_network\nfrom .loss_functions import reconstruction_loss\nfrom .loss_functions import log_prior\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST')\n\nimport pdb\nfrom scipy.optimize import minimize\n\n#q1_star = tf.placeholder(tf.float32, shape=[])\nq1_star1 = tf.placeholder(tf.float32, shape=[])#we need to pass 1.0-q0_star, because of float32 and numerical issues\n\nq0_star = 1.0-1e-6\n\ndef root_qELBO(var, ratio_np, eq_term, num_samples):\n #pdb.set_trace()\n q_min = var\n op_node_qELBO_loss_np = (1.0 - q_min) * ratio_np\n\n ratio_qELBO_loss_np = logsumexp(op_node_qELBO_loss_np, axis=0) # will have dimensions=[batch_size]\n\n S_qELBO_loss_np = np.exp(-np.log(float(num_samples)) + ratio_qELBO_loss_np) # we need to account for N_z_IS, when approximating the expectation with a Monte Carlo estimate\n\n qELBO_loss_np = (S_qELBO_loss_np - 1.0)/( 1.0 - q_min)\n mean_qELBO_loss_np = np.mean(qELBO_loss_np) # take the mean over the batch size\n\n return (mean_qELBO_loss_np-eq_term)**2\n\ndef compute_qELBO(q_min, ratio_np, num_samples):\n #pdb.set_trace()\n op_node_qELBO_loss_np = (1.0 - q_min) * ratio_np\n\n ratio_qELBO_loss_np = logsumexp(op_node_qELBO_loss_np, axis=0) # will have dimensions=[batch_size]\n\n S_qELBO_loss_np = np.exp(-np.log(float(num_samples)) + ratio_qELBO_loss_np) # we need to account for N_z_IS, when approximating the expectation with a Monte Carlo estimate\n\n qELBO_loss_np = (S_qELBO_loss_np - 1.0)/( 1.0 - q_min)\n mean_qELBO_loss_np = np.mean(qELBO_loss_np) # take the mean over the batch size\n\n return mean_qELBO_loss_np\n\ndef root_qELBO_no_exp(var, ratio_np1, eq_term):\n #pdb.set_trace()\n q_min = var\n Nq = ratio_np1.shape[0]\n\n F1 = (1.0 - q_min) * ratio_np1 # see my notebook for the notation of F1 and F2;\n # ratio_np1=means 1 sample from the batch; for each sample in the batch, we compute a q_opt and then average over the q\n\n F2 = logsumexp(F1) # will have dimensions=1\n # we multiply here with 100, to increase the size of the operands, to make the optimization process easier, i.e. increase the accuracy of the solution\n\n LHS_term = np.log((1.0 - q_min) * eq_term + 1) + np.log(float(Nq))\n\n return (LHS_term - F2)**2\n\ndef variational_lowerbound(x, encoder, decoder, num_samples, batch_size, \\\n alpha = 1.0, backward_pass = 'full'):\n \"\"\"\n Compute the loss function of VR lowerbound\n \"\"\"\n #logpxz, logqzx, z_list = reconstruction_loss(x, encoder, decoder, num_samples)\n logpxz = 0.0\n logqzx = 0.0\n L = len(encoder.S_layers)\n x_rep = tf.tile(x, [num_samples, 1]) \n input = x_rep\n\n # do encoding\n samples = []\n for l in range(L):\n output, logq = encoder.S_layers[l].encode_and_log_prob(input)\n logqzx = logqzx + logq\n samples.append(output)\n input = output\n\n # do decoding\n samples = list(reversed(samples))\n samples.append(x_rep)\n for l in range(L):\n _, logp = decoder.S_layers[l].encode_and_log_prob(samples[l], eval_output = samples[l+1])\n logpxz = logpxz + logp\n\n logpz = log_prior(output, encoder.S_layers[l].get_prob_type())\n logF = logpz + logpxz - logqzx \n\n ########################################################################################################################\n ############################################################################################\n # # Test the combination of tf.tile and tf.reshape\n # x_tr = tf.get_variable(name='x_tile_reshape',\n # initializer=tf.truncated_normal(shape=[4, 6], mean=15.01, stddev=0.01, dtype=dtype_var))\n # x_rep = tf.tile(x_tr, [7, 1])\n # mvn_tr = tfd.MultivariateNormalDiag(loc=15.01 * tf.ones(shape=(6,), dtype=dtype_var),\n # scale_diag=0.01 * tf.ones(shape=(6,), dtype=dtype_var))\n # log_tr = mvn_tr.log_prob(x_rep)\n # x_back47 = tf.reshape(log_tr, shape=[4, 7])\n # x_back74 = tf.reshape(log_tr, shape=[7, 4])#this reshaping is correct\n # #pdb.set_trace()\n logF_reshaped = tf.reshape(logF,shape=[num_samples,batch_size]) #will have dimensions [num_samples,batch_size]\n\n #q1_star = 1.0 -1e-6\n\n #op_node_qELBO_loss = tf.multiply(1.0 - q1_star, logF_reshaped)\n op_node_qELBO_loss = tf.multiply(q1_star1, logF_reshaped)\n\n ratio_qELBO_loss = tf.reduce_logsumexp(op_node_qELBO_loss, axis=0) # will have dimensions=[batch_size]\n\n S_qELBO_loss = tf.exp(-tf.log(float(num_samples)) + ratio_qELBO_loss) # we need to account for N_z_IS, when approximating the expectation with a Monte Carlo estimate\n #qELBO_loss = tf.div(S_qELBO_loss - 1.0, 1.0 - q1_star)\n qELBO_loss = tf.div(S_qELBO_loss - 1.0, q1_star1)\n\n #qELBO_loss = tf.exp(-tf.log(float(num_samples)) + ratio_qELBO_loss - tf.log(1-q1_star))\n mean_qELBO_loss = tf.reduce_mean(qELBO_loss) # take the mean over the batch size\n\n CUBO = 0.5 * (-tf.log(float(num_samples)) + tf.reduce_logsumexp(2.0 * logF_reshaped, axis=0)) # standard CUBO, n=2\n #CUBO = 0.5 * (-tf.log(float(1000)) + tf.reduce_logsumexp(2.0 * logF_reshaped, axis=0)) # standard CUBO, n=2\n\n mean_CUBO = tf.reduce_mean(CUBO) # mean over the batch size\n ########################################################################################################################\n \n if backward_pass == 'max': \n logF = tf.reshape(logF, [num_samples, batch_size]) \n logF = tf.reduce_max(logF, 0)\n lowerbound = tf.reduce_mean(logF)\n elif backward_pass == 'min':\n logF = tf.reshape(logF, [num_samples, batch_size])\n logF = tf.reduce_min(logF, 0)\n lowerbound = tf.reduce_mean(logF)\n elif np.abs(alpha - 1.0) < 10e-3:\n #lowerbound = tf.reduce_mean(logF)\n lowerbound = mean_qELBO_loss\n else:\n logF = tf.reshape(logF, [num_samples, batch_size])\n logF = logF * (1 - alpha) \n logF_max = tf.reduce_max(logF, 0) \n logF = tf.log(tf.clip_by_value(tf.reduce_mean(tf.exp(logF - logF_max), 0), 1e-9, np.inf))\n logF = (logF + logF_max) / (1 - alpha)\n lowerbound = tf.reduce_mean(logF)\n \n #lowerbound = mean_qELBO_loss\n return lowerbound, logF, mean_qELBO_loss, mean_CUBO#, logpz, logpxz, logqzx\n \ndef make_functions_vae(models, input_size, num_samples, batch_size, \\\n alpha = 1.0, backward_pass = 'full'): \n encoder, decoder = models \n \n input = tf.placeholder(tf.float32, [batch_size, input_size])\n\n lowerbound, logF, mean_qELBO_loss, mean_CUBO = variational_lowerbound(input, encoder, decoder, num_samples, batch_size, \\\n alpha, backward_pass)\n \n learning_rate_ph = tf.placeholder(tf.float32, shape = [])\n optimizer = \\\n tf.train.AdamOptimizer(learning_rate=learning_rate_ph, \\\n beta1=0.9, beta2=0.999, epsilon=10e-8 \\\n ).minimize(-lowerbound)\n \n def updateParams(sess, X, learning_rate = 0.0005):\n\n #opt, cost0 = sess.run((optimizer, lowerbound), feed_dict={input: X,\n # q1_star: 1.0 - 1e-6,\n # learning_rate_ph: learning_rate})\n global q0_star\n #print(\"\\n q0_star=\",q0_star)\n cost, logF_np, mqelbol, mcubo = sess.run((lowerbound, logF, mean_qELBO_loss, mean_CUBO),\n feed_dict={input: X,\n q1_star1: 1.0-q0_star,\n learning_rate_ph:learning_rate})\n\n Nq = logF_np.shape[0]\n\n eq_term = cost + 0.5 * (mcubo - cost)#these elements do not depend on q1_star\n\n q0 = 1.0 + 1.0 / eq_term + 1e-10\n var0 = [q0]\n #var_opt3 = minimize(root_qELBO, var0, args=(logF_np, eq_term, num_samples), bounds=[(1.0 + 1.0 / eq_term, None)])\n\n #var_opt3 = minimize(root_qELBO_no_exp, var0, args=(logF_np, eq_term, num_samples), method='L-BFGS-B',\n # bounds=[(1.0 + 1.0 / eq_term, None)], options={'ftol':1e-4,'gtol':1e-4,'eps':1e-6})\n\n # var_opt3 = minimize(root_qELBO_no_exp, var0, args=(logF_np, eq_term), method='L-BFGS-B',\n # bounds=[(1.0 + 1.0 / eq_term, None)], options={'ftol': 1e-9, 'gtol': 1e-9, 'eps': 1e-10})\n # q0_star = var_opt3.x[0]\n logF_np_reshaped = np.reshape(logF_np, (num_samples, batch_size))\n\n var_opt3 = minimize(root_qELBO, var0, args=(logF_np_reshaped, eq_term, num_samples), method='L-BFGS-B',\n bounds=[(1.0 + 1.0 / eq_term, 1.1)], options={'ftol': 1e-9, 'gtol': 1e-9, 'eps': 1e-10})\n q0_star = var_opt3.x[0]\n #pdb.set_trace()\n\n #eq_term_comp = compute_qELBO(q0_star, logF_np_reshaped, num_samples)\n\n\n cost_q, mqelbol_q, mcubo_q = sess.run((lowerbound, mean_qELBO_loss, mean_CUBO),\n feed_dict={input: X,\n q1_star1: 1.0-q0_star,\n learning_rate_ph:learning_rate})\n\n opt = sess.run((optimizer), feed_dict={input: X,\n q1_star1: 1.0 - q0_star,\n learning_rate_ph: learning_rate})\n\n # #var_opt3 = minimize(root_qELBO_no_exp, var0, args=(logF_np, eq_term, num_samples), method='L-BFGS-B',\n # # options={'ftol': 1e-4, 'gtol': 1e-4, 'eps': 1e-6})\n #\n # rez0 = root_qELBO_no_exp(q0, logF_np, eq_term)\n # rez_min = root_qELBO_no_exp(var_opt3.x, logF_np, eq_term)\n #\n # #pdb.set_trace()\n # q_min = q0\n # F10 = (1.0 - q_min) * logF_np # see my notebook for the notation of F1 and F2;\n # # # ratio_np1=means 1 sample from the batch; for each sample in the batch, we compute a q_opt and then average over the q\n # #\n # F20 = logsumexp(F10) # will have dimensions=1\n # logS0 = -np.log(float(Nq)) + F20\n # S0 = np.exp(logS0)\n #\n # LHS0 = np.log((1.0 - q_min) * eq_term + 1) + np.log(float(Nq))\n #\n # eq_term0 = (S0-1)/(1-q_min)\n #\n # q_min = var_opt3.x\n # F1 = (1.0 - q_min) * logF_np # see my notebook for the notation of F1 and F2;\n # # # ratio_np1=means 1 sample from the batch; for each sample in the batch, we compute a q_opt and then average over the q\n # #\n # F2 = logsumexp(F1) # will have dimensions=1\n # logS = -np.log(float(Nq)) + F2\n # S = np.exp(logS)\n #\n # LHS2 = np.log((1.0 - q_min) * eq_term + 1) + np.log(float(Nq))\n #\n # eq_term2 = (S - 1) / (1 - q_min)\n #\n # pdb.set_trace()\n #\n # # # we multiply here with 100, to increase the size of the operands, to make the optimization process easier, i.e. increase the accuracy of the solution\n # #\n # # LHS_term = np.log((1.0 - q_min) * eq_term + 1) + np.log(float(num_samples))\n # #\n # # return (LHS_term - F2) ** 2\n #\n # #print(\"\\n bound on q=\",1.0 + 1.0 / eq_term)\n # #print(\"\\n q_opt=\",var_opt3.x[0])\n #\n # #pdb.set_trace()\n\n\n # cost_q, mqelbol_q, mcubo_q = sess.run((lowerbound, mean_qELBO_loss, mean_CUBO),\n # feed_dict={input: X,\n # q1_star: var_opt3.x[0],\n # learning_rate_ph:learning_rate})\n\n '''\n print(\"\\n Before q-optimization: mean_CUBO=\", mcubo)\n print(\"\\n Before q-optimization: lowerbound=\", cost)\n print(\"\\n Before q-optimization: mqelbol=\", mqelbol)\n\n print(\"\\n eq_term=\", eq_term)\n print(\"\\n bound on q with eq_term=\", 1.0 + 1.0 / eq_term)\n print(\"\\n q_opt=\", var_opt3.x[0])\n\n print(\"\\n After q-optimization: mean_CUBO=\", mcubo_q)\n print(\"\\n lowerbound_q=\", cost_q)\n print(\"\\n mqelbol_q=\", mqelbol_q)\n\n #compute_mqelbo = compute_qELBO(var_opt3.x[0], logF_np, num_samples)\n #print(\"\\n computed_mqelbol_q=\", compute_mqelbo)\n\n #pdb.set_trace()\n '''\n return cost, logF_np, mqelbol, mcubo_q, cost_q, mqelbol_q\n\n return updateParams, lowerbound, logF, mean_qELBO_loss\n \ndef init_optimizer(models, input_size, batch_size = 100, num_samples = 1, **kwargs):\n \n encoder = models[0]; decoder = models[1]\n # vae\n if 'alpha' not in kwargs:\n alpha = 1.0\n else:\n alpha = kwargs['alpha']\n if 'backward_pass' not in kwargs:\n backward_pass = 'full'\n else:\n backward_pass = kwargs['backward_pass']\n updateParams, lowerbound, logF, mean_qELBO_loss = \\\n make_functions_vae(models, input_size, \\\n num_samples, batch_size, \\\n alpha, backward_pass)\n\n def fit(sess, X, n_iter = 100, learning_rate = 0.0005, verbose = True):\n # first make batches of source data\n [N, dimX] = X.shape \n N_batch = N / batch_size\n if np.mod(N, batch_size) != 0:\n N_batch += 1 \n print(\"training the model for %d iterations with lr=%f\" % \\\n (n_iter, learning_rate))\n\n begin = time.time()\n for iteration in range(1, n_iter + 1):\n iteration_lowerbound = 0\n iteration_mcubo_q = 0\n iteration_mqelbol_q = 0\n #ind_s = np.random.permutation(range(N))\n\n for j in range(0, int(N_batch)):\n # indl = int(j * batch_size)\n # indr = int((j+1) * batch_size)\n # ind = ind_s[indl:min(indr, N)]\n # if indr > N:\n # ind = np.concatenate((ind, ind_s[:(indr-N)]))\n # batch = X[ind]\n batch_label = mnist.train.next_batch(batch_size)\n batch = batch_label[0]\n\n lowerbound_np, logF_np, mqelbol, mcubo_q, cost_q, mqelbol_q = updateParams(sess, batch, learning_rate)\n\n #lowerbound_np should be equal to lb_q\n #iteration_lowerbound += lowerbound_np * batch_size\n iteration_mcubo_q += mcubo_q * batch_size\n iteration_lowerbound += cost_q * batch_size\n iteration_mqelbol_q += mqelbol_q * batch_size\n #pdb.set_trace()\n if verbose:\n end = time.time()\n #print(\"mean_CUBO=\",mcubo_q)\n print(\"Iteration %d, mcubo_q = %.2f, time = %.2fs\"\n % (iteration, iteration_mcubo_q / N, end - begin))\n print(\"Iteration %d, lowerbound_cost_q = %.2f, time = %.2fs\"\n % (iteration, iteration_lowerbound / N, end - begin))\n print(\"Iteration %d, lowerbound_mqelbo_q = %.2f, time = %.2fs\"\n % (iteration, iteration_mqelbol_q / N, end - begin))\n print(\"\\n\")\n\n #print(\"\\n ratio=logF_np=\",logF_np, \"mean_qELBO_loss=\",mqelbol)\n \n '''\n print(\"\\n Before optimization of q: mean_CUBO=\", mcubo)\n print(\"\\n Before optimization of q: lowerbound=\", lowerbound_np)\n print(\"\\n Before optimization of q: mean_qELBO_loss=\", mqelbol)\n print(\"\\n After optimization of q: lowerbound_q=\", lb_q)\n print(\"\\n After optimization of q: mean_qELBO_loss_q=\", mqelbol_q)\n #pdb.set_trace()\n ''' \n begin = end\n \n \n def eval_test_ll(sess, X, num_samples):\n #lowerbound_np, logF_np, mean_qELBO_loss_np, mean_CUBO_np = sess.run(variational_lowerbound(X, encoder, decoder, num_samples, X.shape[0], 0.0))\n global q0_star\n #print(\"q0_star=\",q0_star)\n #pdb.set_trace()\n \n cost, logF_np, mqelbol, mcubo = sess.run(variational_lowerbound(X, encoder, decoder, num_samples, X.shape[0], 1.0),\n feed_dict={q1_star1: 1.0-q0_star})\n\n\n Nq = logF_np.shape[0]\n eq_term = cost + 0.5 * (mcubo - cost) # these elements do not depend on q1_star\n q0 = 1.0 + 1.0 / eq_term + 1e-10\n var0 = [q0]\n #var_opt3 = minimize(root_qELBO_no_exp, var0, args=(logF_np, eq_term), method='L-BFGS-B',\n # bounds=[(1.0 + 1.0 / eq_term, None)], options={'ftol': 1e-9, 'gtol': 1e-9, 'eps': 1e-10})\n #q0_star = var_opt3.x[0]\n logF_np_reshaped = np.reshape(logF_np, (num_samples, batch_size))\n\n var_opt3 = minimize(root_qELBO, var0, args=(logF_np_reshaped, eq_term, num_samples), method='L-BFGS-B',\n bounds=[(1.0 + 1.0 / eq_term, 1.1)], options={'ftol': 1e-9, 'gtol': 1e-9, 'eps': 1e-10})\n q0_star = var_opt3.x[0]\n \n lowerbound_np, logF_np, mean_qELBO_loss_np, mean_CUBO_np = sess.run(\n variational_lowerbound(X, encoder, decoder, num_samples, X.shape[0], 0.0),\n feed_dict={q1_star1: 1.0-q0_star})\n #lowerbound_np==log_px_IS, mean_qELBO_loss_np=mqelbo\n\n return lowerbound_np, logF_np, mean_qELBO_loss_np, mean_CUBO_np\n\n def score(sess, X, num_samples = 100):\n \"\"\"\n Computer lower bound on data, following the IWAE paper.\n \"\"\"\n \n begin = time.time()\n print('num. samples for eval:', num_samples)\n \n # compute log_q\n log_px_IS_total = 0\n mcubo_total = 0\n mqelbo_total = 0\n\n num_data_test = X.shape[0]\n if num_data_test % batch_size == 0:\n num_batch = num_data_test / batch_size\n else:\n num_batch = num_data_test / batch_size + 1\n \n for i in range(int(num_batch)):\n # indl = int(i*batch_size)\n # indr = int(min((i+1)*batch_size, num_data_test))\n # minibatch = X[indl:indr]\n batch_label = mnist.test.next_batch(batch_size)\n minibatch = batch_label[0]\n\n lowerbound, logF_np, mean_qELBO_loss_np, mean_CUBO_np = eval_test_ll(sess, minibatch, num_samples)\n #lowerbound_total += lowerbound * (indr - indl)\n log_px_IS_total += lowerbound * batch_size\n mcubo_total += mean_CUBO_np * batch_size\n mqelbo_total += mean_qELBO_loss_np * batch_size\n\n #print(\"\\n Test set: mean_CUBO=\", mean_CUBO_np)\n #print(\"\\n Test set: lowerbound=\", lowerbound)\n #print(\"\\n Test set: mean_qELBO_loss=\", mean_qELBO_loss_np)\n\n\n end = time.time()\n time_test = end - begin\n log_px_IS_total = log_px_IS_total / float(num_data_test)\n mcubo_total = mcubo_total / float(num_data_test)\n mqelbo_total = mqelbo_total / float(num_data_test)\n\n print(\"\\n Test set: mean_CUBO=\", mcubo_total)\n print(\"\\n Test set: log_px_IS=\", log_px_IS_total)\n print(\"\\n Test set: mean_qELBO_loss=\", mqelbo_total)\n\n return log_px_IS_total, time_test\n \n return fit, score \n","sub_path":"models/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":19370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491268524","text":"# -*- coding: utf-8 -*-\n\"\"\"\nprograma que calcula a redução da expectativa de vida para os fumantes\n\n@author: Francisco Janela\n\"\"\"\n\n#calcula a redução em dias\ndef reducao_tempo_de_vida(cigarros,anos):\n por_ano=cigarros*365\n perda_minutos=10*por_ano*anos\n perda_dias=perda_minutos/1440\n return perda_dias\n\n#input do usuário sobre seus hábitos\ncigarros_por_dia=input('Quantos cigarros fuma por dia? ')\nanos_fumando=input('Faz quantos anos que fuma? ')\n\n#imprime o valor de dias inteiros\nprint('sua vida foi reduzida com sucesso em {0} dias'.format(reducao_tempo_de_vida(int(cigarros_por_dia),int(anos_fumando))))","sub_path":"backup/user_333/ch22_2020_03_02_23_05_47_372444.py","file_name":"ch22_2020_03_02_23_05_47_372444.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139215103","text":"#!algotrading/bin/python3\n\n'''\nCreates a time series from all 500 stocks\nin the S&P\n\n'''\n\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom securityList import SecurityList\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef scrape_list(site):\n\n print('Scraping tickers')\n hdr = {'User-Agent': 'Mozilla/5.0'}\n http = urllib3.PoolManager()\n response = http.request('GET',site)\n soup = BeautifulSoup(response.data, 'html.parser')\n\n table = soup.find('table', {'class': 'wikitable sortable'})\n sector_tickers = dict()\n for row in table.findAll('tr'):\n col = row.findAll('td')\n if len(col) > 0:\n sector = str(col[3].string.strip()).lower().replace(' ', '_')\n ticker = str(col[0].string.strip())\n for i in range(len(ticker)):\n if ticker[i] == '.':\n new = ticker[:i]+'_'+ticker[(i+1):]\n ticker = new\n if sector not in sector_tickers:\n sector_tickers[sector] = list()\n sector_tickers[sector].append(ticker)\n return sector_tickers\n\ndef main():\n\n tickers = scrape_list('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies');\n lticks = list()\n for l in tickers.values():\n for tick in l:\n lticks.append(tick)\n sl = SecurityList(lticks)\n start = datetime.datetime(1994,9,29)\n end = datetime.datetime(2017,4,5)\n sl.downloadQuandl(start, end)\n ts = sl.genTimeSeries()\n plt.plot(np.arange(len(ts)),ts)\n plt.show()\n\nmain()\n","sub_path":"test_500.py","file_name":"test_500.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"174268209","text":"import re\n\nfolder = '/Users/snownontrace/Desktop/RNAseq/'\n# datafile = folder + \"genes_Ensembl_WBcel235.gtf\"\ndatafile = folder + \"genes_Ensembl_WBcel235_converted_chromosome.gtf\"\n# datafile = folder + \"genes_ucsc_ce10.gtf\"\n# datafile = folder + \"test_genes.gtf\"\n\n# romanNumeral = re.compile('^[MDCLXVI]+$') #this is non-strict Roman numerals that do not check order\nromanNumeral = re.compile('^(?=[MDCLXVI])M*(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[XV]|V?I{0,3})$') #this is strict Roman numerals that check order\n\nnewGTFfile = open(datafile.split('.')[0]+'_converted_chromosome.gtf','w')\n\nwith open(datafile) as f:\n# chrlist = [r.split()[0] for r in f]\n# seen = set()\n# uniq = []\n# for chrName in chrlist:\n# if chrName not in seen:\n# uniq.append(chrName)\n# seen.add(chrName)\n# print seen #this gives all chromosome names seen in the file\n\n for r in f:\n if romanNumeral.match(r.split()[0]):\n newGTFfile.write('chr'+r)\n else:\n newGTFfile.write('chrM'+'\\t'+'\\t'.join(r.split()[1:])+'\\n')\n","sub_path":"RNAseq-scripts-AdinaPaper/convert_chromosome_name.py","file_name":"convert_chromosome_name.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618611763","text":"#-*- encoding: UTF-8 -*-\nimport cPickle\nimport numpy as np\n\nclass Cifar(object):\n\n\tfile_path = '/home_local/henk_di/datasets/cifar-10-batches-py'\n\n\tfiles = [file_path + '/data_batch_%d' %i for i in range(1,6)]\n\tX = np.zeros((0,3072), dtype=np.uint8)\n\ty = np.zeros(0, dtype=np.uint8)\n\tfor file in files:\n\t\twith open(file) as f:\n\t\t\tcifar = cPickle.load(f)\n\t\t\tX = np.vstack((X, cifar['data']))\n\t\t\ty = np.hstack((y, cifar['labels']))\n\tX = np.ascontiguousarray(X.reshape(-1,3,32,32).transpose((0,2,3,1)))\n\tXTest = None\n\tyTest = None\n\twith open(file_path + '/test_batch') as f:\n\t\tcifar = cPickle.load(f)\n\t\tXTest = np.ascontiguousarray(cifar['data'].reshape(-1,3,32,32).transpose((0,2,3,1)))\n\t\tyTest = np.array(cifar['labels'])","sub_path":"dataset_viewer/v3/Cifar.py","file_name":"Cifar.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447324589","text":"import os, sys, logging, gc, time, math\nimport tensorflow as tf\nimport numpy as np\nfrom numpy import array, argmax\nfrom numpy import linalg as LA\nfrom keras.models import Sequential, Model\nfrom keras.layers import LSTM, CuDNNGRU, Dense, RepeatVector, TimeDistributed, Input, GRU\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import cosine\nsys.path.append('../')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nclass AttentionWithGRU():\n def __init__(self, questionWordList, storyWordList, answerList, gru_units, model_fit_epochs, hops):\n self.questionWordList = questionWordList\n self.storyWordList = storyWordList\n self.answerList = answerList\n self.gru_units = gru_units\n self.model_fit_epochs = model_fit_epochs\n self.hops = hops\n \n def AttentionWithGRUMain(self):\n \n # QuestionBidirectionalGRU input Vector\n forwardV, backwardV = self.OneHotEncoding(self.questionWordList), self.OneHotEncoding(list(reversed(self.questionWordList)))\n #print(\"forV len:\",len(forwardV))\n # StoryBidirectionalGRU \n storyV = self.BidirectionalStoryGRU(self.storyWordList)\n #print(\"storyVector len:\", len(storyV))\n # QuestionBidirectionalGRU\n questionV = self.BidirectionalGRU(forwardV, backwardV)\n #print(questionV.shape)\n # hops for n iteration\n for h in range(self.hops):\n print(\"Start processing hops summed.\")\n # AttentionValue\n #print(\"storyVector len:\", len(storyV))\n attentionValueV = self.AttentionValue(storyV, questionV)\n #print(\"attentionValueVector length:\",len(attentionValueV))\n # WordLevelAttetion\n storyWordLevelV = self.WordLevelAttention(storyV, attentionValueV)\n #print(storyWordLevelV.shape)\n # hops, VQn and VSn+1 summed to form a new question Vector VQn+1\n if len(questionV)> len(storyWordLevelV):\n summend_len = len(storyWordLevelV)\n else:\n summend_len = len(questionV)\n\n for j in range(summend_len):\n storyWordLevelV[j] += questionV[j]\n # use final attention VS vector as next VQ vector\n forwardV, backwardV = storyWordLevelV, np.flip(storyWordLevelV, axis = 0)\n # QuestionBidirectionalGRU\n questionV = self.BidirectionalGRU(forwardV, backwardV)\n print(\"Finished {} hops summed!\".format(h+1))\n\n # guess answer\n print(\"Start calculate answer vector.\")\n highestScoreAnswer = 0\n guessAnswer = 1\n ind = 1\n for a in self.answerList:\n # AnswerBidirectionalGRU input Vector\n ansForwardV, ansBackwardV = self.OneHotEncoding(a), self.OneHotEncoding(list(reversed(a)))\n # AnswerBidirectionalGRU\n answerV = self.BidirectionalGRU(ansForwardV, ansBackwardV)\n # use final attention VS vector as FINAL VQ vector\n # guess answer by calculate cosine value between storyV and answerV\n #tempScoreAnswer = cosine(questionV, answerV)\n tempScoreAnswer = cosine_similarity(questionV.reshape(1,-1), answerV.reshape(1,-1))\n if highestScoreAnswer < tempScoreAnswer:\n highestScoreAnswer = tempScoreAnswer\n guessAnswer = ind\n #print(\"CurrentAnswer score\",tempScoreAnswer)\n #print(\"HighestScoreAnswer score\",highestScoreAnswer)\n ind += 1\n\n print(\"GuessAnswer: \", guessAnswer)\n return guessAnswer\n\n def WordLevelAttention(self, storyVector, attentionValueVector):\n\n storyVector = np.ravel(array(storyVector))\n storyVector = storyVector.tolist()\n wordLevelStoryVector = np.array([(storyVector[i] + storyVector[i+1]) * attentionValueVector[i] for i in range(len(attentionValueVector))])\n \n return wordLevelStoryVector\n\n def AttentionValue(self, storyVector, questionVector):\n # calculate AttentionValue, using cosine similarity between storyVector and questionVector^2\n # transpose question vector length to match up storyVector for calculate cosine similarity\n attentionValue = []\n \n for index in range(len(storyVector)):\n storyVectorElem = storyVector[index].reshape(1,-1)\n questionVector = np.square(questionVector).reshape(1, -1)\n if math.isnan(cosine_similarity(storyVectorElem, questionVector)):\n attentionValue.append(0)\n else:\n attentionValue.append(cosine_similarity(storyVectorElem, questionVector))\n # AttentionValue normalization (actually is softmax in this paper...)\n exps = [np.exp(i) for i in attentionValue]\n sum_of_exps = sum(exps)\n attentionValue_softmax = [j/sum_of_exps for j in exps]\n\n return attentionValue_softmax\n\n def BidirectionalGRU(self, forwardV, backwardV):\n # forward vector hidden state\n f_all_hidden_state, f_final_hidden_state = self.GRU(forwardV)\n # backward vector hidden state\n b_all_hidden_state, b_final_hidden_state = self.GRU(backwardV)\n # concat forward vector and backward vector\n forwardVector, backwardVector = f_final_hidden_state, b_final_hidden_state\n # print(forwardVector.shape)\n # print(backwardVector.shape)\n quesitonVector = np.concatenate((forwardVector,backwardVector), axis=None)\n # print(quesitonVector.shape)\n \n return quesitonVector\n\n def BidirectionalStoryGRU(self, storyWordList):\n # forward vector\n forwardV = self.OneHotEncoding(storyWordList)\n f_all_hidden_state, f_final_hidden_state = self.GRU(forwardV)\n # print(f_all_hidden_state.shape)\n # backward vector\n backwardV = self.OneHotEncoding(list(reversed(storyWordList)))\n b_all_hidden_state, b_final_hidden_state = self.GRU(backwardV)\n # print(b_all_hidden_state.shape)\n # The word vector representation of the t-th word St is constructed \n # by concatenating the hidden layer outputs of forward and backward GRU networks\n storyVector = []\n for index in range(len(f_all_hidden_state[0])):\n storyVector.append(np.concatenate((f_all_hidden_state[0][index],b_all_hidden_state[0][index]), axis=None))\n \n return storyVector\n\n def GRU(self, inputV):\n # define timesteps\n seqlen = len(inputV)\n # define model, save GRU all hidden state and final hidden state for question vector representation\n inputs = Input(shape=(seqlen,1))\n temp_all_hidden_state, temp_final_hidden_state = CuDNNGRU(self.gru_units, return_sequences=True, return_state=True)(inputs)\n model = Model(inputs=inputs, outputs=[temp_all_hidden_state, temp_final_hidden_state])\n # define input data\n data = inputV.reshape((1,seqlen,1))\n # train model using encoder method\n model.compile(optimizer='adam', loss='mean_squared_error')\n # train model\n #model.fit(data, data, epochs = self.model_fit_epochs)\n # \n mp = model.predict(data, verbose = 1)\n \n all_hidden_state, final_hidden_state = mp[0], mp[1]\n\n return all_hidden_state, final_hidden_state\n\n def GRUModelEvalute(self, inputV):\n # keras GRU cell\n # reshape input into [samples, timesteps, features]\n train_x = inputV\n print(inputV.shape)\n n_in = len(inputV)\n train_x = train_x.reshape((1, n_in, 1))\n # define model\n model = Sequential()\n model.add(GRU(self.gru_units, activation='relu', input_shape=(n_in,1)))\n model.add(RepeatVector(n_in))\n model.add(GRU(self.gru_units, activation='relu', return_sequences=True, return_state=True))\n model.add(TimeDistributed(Dense(1, activation='relu')))\n model.compile(optimizer='adam', loss='mean_squared_error')\n\n print(model.summary())\n #history = model.fit(train_x, train_x, self.model_fit_epochs)\n print()\n # Plot training & validation loss values\n # plt.plot(history.history['loss'])\n # #plt.plot(history.history['val_loss'])\n # plt.title('Model loss')\n # plt.ylabel('Loss')\n # plt.xlabel('Epoch')\n # plt.legend(['Train', 'Test'], loc='upper left')\n # plt.show()\n print()\n\n def OneHotEncoding(self, WordList):\n # dict transfer to array\n values = array(WordList)\n # integer encode\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(values)\n # create one-dim oneHotVector, only need to take maximum number building one-hot vector \n # from array then merge all one hot encodeing vector to one-dim vector.\n oneHotV = np.zeros((len(integer_encoded), integer_encoded.max()+1))\n oneHotV[np.arange(len(integer_encoded)), integer_encoded] = 1\n oneHotV = oneHotV.ravel()\n \n return oneHotV\n\n\n\n# Plot training & validation accuracy values\n # plt.plot(history.history['acc'])\n # plt.plot(history.history['val_acc'])\n # plt.title('Model accuracy')\n # plt.ylabel('Accuracy')\n # plt.xlabel('Epoch')\n # plt.legend(['Train', 'Test'], loc='upper left')\n # plt.show()","sub_path":"Developement/AttentionWithGRU/AttentionWithGRU.py","file_name":"AttentionWithGRU.py","file_ext":"py","file_size_in_byte":9455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425955073","text":"import mock\n\nfrom shub import config\n\n\ndef mock_conf(testcase, target=None, attr=None, conf=None):\n if not conf:\n conf = config.ShubConfig()\n conf.projects.update({\n 'default': 1,\n 'prod': 2,\n 'vagrant': 'vagrant/3',\n })\n conf.endpoints.update({\n 'vagrant': 'https://vagrant_ep/api/scrapyd/',\n })\n conf.apikeys.update({\n 'default': 32 * '1',\n 'vagrant': 32 * '2',\n })\n conf.version = 'version'\n if target:\n if attr:\n patcher = mock.patch.object(target, attr, return_value=conf,\n autospec=True)\n else:\n patcher = mock.patch(target, return_value=conf, autospec=True)\n else:\n patcher = mock.patch('shub.config.load_shub_config', return_value=conf,\n autospec=True)\n patcher.start()\n testcase.addCleanup(patcher.stop)\n return conf\n","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177182402","text":"'''\nCreated on Apr 17, 2018\n\n@author: mac\n'''\nimport csv\nfrom StdSuites.Table_Suite import row\n\ndef createList(fileName):\n backUpList = []\n with open(fileName, 'r') as f:\n openFile = csv.reader(f)\n for row in openFile:\n print(row[0])\n data=row[0]\n backUpList.append(data)\n print(backUpList)\n return(backUpList)\n\ndef main(backUpGoalies,NHLdata):\n checkName = createList(backUpGoalies)\n data = open(NHLdata, 'r')\n output = open(\"NHLDataClean.csv\", 'w')\n writer = csv.writer(output)\n print(checkName)\n \n for row in csv.reader(data):\n name=row[15]\n #type(name)\n for i in range(0,54):\n if checkName[i]==name:\n writer.writerow(row) \n \nmain(\"NHLbackups.csv\", \"NHLgoalieData.csv\")\n \n \n \n ","sub_path":"backUpGoaliesOnly.py","file_name":"backUpGoaliesOnly.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255839689","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndf = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)\ny = df.iloc[0:100, 4].values\ny = np.where(y == 'Iris-setosa', -1, 1)\nx = df.iloc[0:100, [0,2]].values\nplt.scatter(x[:50, 0], x[:50, 1], color='red', marker='o', label='satosa')\nplt.scatter(x[50:100, 0], x[50:100, 1], color='b', marker='x', label='versicolor')\nplt.xlabel('sepal length[cm]')\nplt.ylabel('petal length[cm]')\nplt.legend(loc='upper left')\nplt.show()\n","sub_path":"2/p28_get_data.py","file_name":"p28_get_data.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208664247","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nfrom webcam import webcam_snap\nimport RPi.GPIO as GPIO\nfrom blinkstick import blinkstick\n\nbstick = blinkstick.find_first()\n\nif bstick is None:\n sys.exit(\"BlinkStick not found...\")\n\nif len(sys.argv) == 2:\n\tcolor = sys.argv[1]\nelse:\n\tcolor = 'red'\n\nfor _ in range(3):\n bstick.set_color(name=color)\n time.sleep(0.25)\n bstick.turn_off()\n time.sleep(0.25)\n","sub_path":"demo_blinkstick.py","file_name":"demo_blinkstick.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87638264","text":"import uuid\nfrom boto3.dynamodb.conditions import Key, Attr\n\nfrom utilities.GameState import GameState\nfrom utilities.errors import NotFound, Conflict\nfrom DropTokenGame import DropTokenGame\n\n\nclass DropTokenSession(object):\n \"\"\"\n Class that handles interactions and transformations from the database\n \"\"\"\n def __init__(self, db, event):\n self.db = db\n self.event = event\n self.game_data = None\n\n def create_game(self) -> str:\n \"\"\"\n Creates a game session with a unique identifier, and the payload that the user submitted. Additional validation\n Could be performed here\n :return: string of game ID\n \"\"\"\n game_id = str(uuid.uuid4()) # Generate random unique identifier\n self.db.put_item(\n Item={\n 'gameId': game_id,\n 'state': GameState.ACTIVE.val(),\n 'players': self.event['body']['players'],\n 'rows': self.event['body']['rows'],\n 'columns': self.event['body']['columns'],\n 'moves': [],\n 'winner': ''\n }\n )\n return game_id\n\n def get_active_games(self) -> []:\n \"\"\"\n Retrieves all active games\n Does a table scan (can be slow, but works fine given the requirements)\n In the future if these needed to scale a lot, would need a continuation token / while loop\n :return: Array of game IDs\n \"\"\"\n # Retrieve results\n response = self.db.scan(\n FilterExpression=Attr('state').eq(GameState.ACTIVE.val())\n )\n # Parse for just game ids\n game_ids = []\n for game in response['Items']:\n game_ids.append(game['gameId'])\n\n return game_ids\n\n def get_game(self) -> dict:\n \"\"\"\n Retrieves the game, and will throw a 404 error if it cannot locate the game ID\n :return: dict of all game data (unparsed)\n \"\"\"\n try:\n response = self.db.query(\n KeyConditionExpression=Key('gameId').eq(self.event['gameId'])\n )\n self.game_data = response['Items'][0]\n return response['Items'][0]\n except Exception as _:\n raise NotFound(\"Game not found.\")\n\n def retrieve_moves(self, start: int = None, until: int = None) -> []:\n \"\"\"\n Retrieves the game moves, with optional query string parameters that can select a subset of moves in the array\n :param start: the starting position of the slice of the moves array\n :param until: the ending position of the slice array\n :return: [] - All moves or subset of moves played\n \"\"\"\n # Takes a slice of the moves, if start and until are blank, it selects the whole array\n moves = self.game_data['moves'][start:until]\n if len(moves) == 0:\n raise NotFound('No moves found.')\n\n for item in moves:\n item['column'] = int(item['column'])\n return moves\n\n def quit_game(self) -> None:\n \"\"\"\n User selects to quit the game, which posts a move without a column and a QUIT 'type'\n :return: Nothing\n \"\"\"\n # Append new move to move array (potentially make 'moves' a model)\n self.game_data['moves'].append({\n 'type': 'QUIT',\n 'player': str(self.event['playerId'])\n })\n # Update game state and moves array\n self.db.update_item(\n Key={'gameId': self.event['gameId']},\n UpdateExpression=\"set moves=:m, #st=:s\",\n ExpressionAttributeValues={\n ':m': self.game_data['moves'],\n ':s': GameState.COMPLETE.val()\n },\n ExpressionAttributeNames={\n \"#st\": \"state\"\n }\n )\n\n def create_move(self):\n \"\"\"\n Creates a move to be saved into the database. The move is evaluated on whether or not it is the winning move\n :return: {} - Reference to the location of the move (part of a URL to invoke via the API)\n \"\"\"\n # Retrieve latest move number and player, validate that it is in fact the person's turn\n last_player, num = self.get_latest_move()\n if last_player == self.event['playerId']:\n raise Conflict(\"It is not this player's turn yet.\")\n\n # Append move to game session data\n self.game_data['moves'].append({\n \"type\": \"MOVE\",\n \"player\": self.event['playerId'],\n \"column\": self.event['body']['column']\n })\n\n # Retrieve the resultant state of the move just made\n board_state, winner, state = self.get_win_state()\n\n # If this is the final move, and a winner hasn't been declared, we will mark the game as DONE\n total_possible_moves = int(self.game_data['columns']) * int(self.game_data['rows'])\n if len(winner) == 0 and num + 1 >= total_possible_moves:\n state = GameState.COMPLETE.val()\n\n # Update the database accordingly\n self.db.update_item(\n Key={'gameId': self.event['gameId']},\n UpdateExpression=\"set moves=:m, #st=:s, winner=:w, board_state=:b\",\n ExpressionAttributeValues={\n ':m': self.game_data['moves'],\n ':s': state,\n ':w': winner,\n ':b': board_state\n },\n ExpressionAttributeNames={\n \"#st\": \"state\"\n }\n )\n return {\n 'move': f\"{self.event['gameId']}/moves/{num}\"\n }\n\n def get_win_state(self):\n \"\"\"\n Validates the current state of the board to determine if the board has entered in a win condition\n :return: Tuple - board state, winning player (if any), and game state (i.e. DONE, IN_PROGRESS)\n \"\"\"\n # Get board array, and set to None so game machine can create it\n board_state = self.game_data['board_state'] if 'board_state' in self.game_data else None\n\n # Designate the player as either 0 or 1, based on their position in the array\n current_player_token = 0 if self.game_data['players'][0] == self.event['playerId'] else 1\n\n # Instantiate a model of the game, so we can check for a win state\n dt = DropTokenGame(board_state, int(self.game_data['columns']), int(self.game_data['rows']))\n dt.set_player(current_player_token)\n dt.set_move(self.event['body']['column'])\n\n # Perform check only if there are the required amount of moves to win\n if len(self.game_data['moves']) >= (len(self.game_data['players']) * dt.win_length) - 1:\n win_state = dt.get_win_state()\n else:\n win_state = False\n\n return (dt.board_state, self.event['playerId'], GameState.COMPLETE.val()) if win_state is True \\\n else (dt.board_state, '', GameState.ACTIVE.val())\n\n def get_latest_move(self):\n \"\"\"\n Retrieves the last player to make a move and the total count of moves made\n :return: Tuple - last player ID, current total move count\n \"\"\"\n count = len(self.game_data['moves'])\n # Check if this is the first move being made\n if count > 0:\n last_player = self.game_data['moves'][count-1]['player']\n else:\n last_player = ''\n\n return last_player, count\n","sub_path":"services/drop_token/DropTokenSession.py","file_name":"DropTokenSession.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458961620","text":"'''\n Visualize some basic statistics of the dataset and try to understand it before building a model to classify it.\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load some of the data from folders and convert the data into a pandas dataframe\n\na=pd.DataFrame()\nb=pd.DataFrame()\nc=pd.DataFrame()\n\nfor num in range(0,43):\n for i in range(1,6):\n\n # letter 'a'\n # Columns 1 to 6 represent the six different features in the dataset.\n file_a=pd.read_csv(\"./unnamed_train_data/student\"+str(num)+\"/\"+\"a\"+\"_\"+str(i)+\".csv\",header=None,delimiter=',',usecols=[1,2,3,4,5,6])\n a=pd.concat([a,file_a])\n\n # letter 'b'\n file_b = pd.read_csv(\"./unnamed_train_data/student\" + str(num) + \"/\" + \"b\" + \"_\" + str(i) + \".csv\", header=None,delimiter=',', usecols=[1, 2, 3, 4, 5, 6])\n b = pd.concat([b, file_b])\n\n # letter 'c'\n file_c = pd.read_csv(\"./unnamed_train_data/student\" + str(num) + \"/\" + \"c\" + \"_\" + str(i) + \".csv\", header=None,delimiter=',', usecols=[1, 2, 3, 4, 5, 6])\n c = pd.concat([c, file_c])\n\n\n\na_mean=a.mean()\nb_mean=b.mean()\nc_mean=c.mean()\n\na_std=a.std()\nb_std=b.std()\nc_std=c.std()\n\n#plotting\n\nx=['ax','ay','az','wx','wy','wz']\nx_pos=[i for i, _ in enumerate(x)]\n\nplt.figure()\nplt.bar(x_pos,a_mean,yerr=a_std)\nplt.xlabel(\"Features\")\nplt.ylabel(\"Mean\")\nplt.title(\"Mean and Standard Deviation for Gesture a\")\nplt.xticks(x_pos,x)\nplt.savefig('a_mean.png')\n\nplt.figure()\nplt.bar(x_pos,b_mean,yerr=b_std)\nplt.xlabel(\"Features\")\nplt.ylabel(\"Mean\")\nplt.title(\"Mean and Standard Deviation for Gesture b\")\nplt.xticks(x_pos,x)\nplt.savefig('b_mean.png')\n\nplt.figure()\nplt.bar(x_pos,c_mean,yerr=c_std)\nplt.xlabel(\"Features\")\nplt.ylabel(\"Mean\")\nplt.title(\"Mean and Standard Deviation for Gesture c\")\nplt.xticks(x_pos,x)\nplt.savefig('c_mean.png')","sub_path":"bin_data.py","file_name":"bin_data.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492100076","text":"# Copyright 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\n\"\"\"\nHelpers to discover information about platforms as defined by fbcode\n\"\"\"\n\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\n \"@fbcode_macros//build_defs/config:read_configs.bzl\",\n \"read_boolean\",\n \"read_string\",\n)\nload(\"@fbcode_macros//build_defs/lib:default_platform.bzl\", _get_default_platform = \"get_default_platform\")\nload(\"@fbcode_macros//build_defs:compiler.bzl\", \"compiler\")\nload(\"@fbcode_macros//build_defs:config.bzl\", \"config\")\nload(\"@fbcode_macros//build_defs:platform_overrides.bzl\", \"platform_overrides\")\nload(\"@fbcode_macros//build_defs:third_party_config.bzl\", \"third_party_config\")\nload(\"@fbsource//tools/build_defs:host_arch.bzl\", \"host_arch\")\n\n_all_platforms = third_party_config[\"platforms\"].keys()\n\ndef __get_platforms_for_architecture(arch):\n return sorted([\n name\n for name, details in third_party_config[\"platforms\"].items()\n if details[\"architecture\"] == arch\n ])\n\n_all_platforms_for_current_architecture = __get_platforms_for_architecture(host_arch.HOST_ARCH_STR)\n\ndef _transform_platform_overrides(cell_to_path_to_platforms_mapping):\n \"\"\"\n Takes a mapping of cell/path/platform and validates and transforms it\n\n The original form: {cell: {path: [plat1, plat2]}} is turned into\n {cell: {path: {arch1: plat1, arch2: plat2}}}. Platforms are also\n validated to ensure that they are present in the overal configuration\n\n Fails if a platform is invalid or if no platforms are present for\n a given directory\n\n Args:\n cell_to_path_to_platforms_mapping: A mapping of {cell: {path:\n [platform...]}} as described above\n\n Returns:\n A validated mapping of {cell: {path: {arch: platform}}}, with\n architectures omitted if they don't have any platforms\n \"\"\"\n ret = {}\n for cell, paths_to_platforms in cell_to_path_to_platforms_mapping.items():\n ret[cell] = {}\n for path, platforms in paths_to_platforms.items():\n for platform in platforms:\n if platform not in third_party_config[\"platforms\"]:\n fail(\n \"Path %s has invalid platform %s. Must be one of %s\" % (\n path,\n platform,\n \", \".join(sorted(_all_platforms)),\n ),\n )\n platform_arch = \\\n third_party_config[\"platforms\"][platform][\"architecture\"]\n if path not in ret[cell]:\n ret[cell][path] = {}\n ret[cell][path][platform_arch] = platform\n continue\n\n if platform_arch in ret[cell][path]:\n fail(\n \"Path %s has both platform %s and %s for architecture %s\" % (\n path,\n ret[cell][path][platform_arch],\n platform,\n platform_arch,\n ),\n )\n else:\n ret[cell][path][platform_arch] = platform\n return ret\n\n_platform_overrides = _transform_platform_overrides(platform_overrides)\n\ndef _get_platform_overrides():\n \"\"\"\n Gets a validated and modified version of platform_overrides\n\n Returns:\n Overrides in @fbcode_macros//build_defs:platform_overrides.bzl\n transformed by _transform_platform_overrides\n \"\"\"\n return _platform_overrides\n\ndef _get_platform_override():\n \"\"\" Returns the user-specified fbcode platform override \"\"\"\n return native.read_config(\"fbcode\", \"platform\")\n\ndef _get_platform_for_base_path(base_path):\n \"\"\"\n Returns `get_platform_for_cell_path_and_arch()` for the given base_path\n\n Args:\n base_path: The base path within the default repository\n \"\"\"\n return _get_platform_for_cell_path_and_arch(\n config.get_current_repo_name(),\n base_path,\n host_arch.HOST_ARCH_STR,\n )\n\ndef _get_platform_for_current_buildfile():\n \"\"\" Returns `get_platform_for_cell_path_and_arch()` for the build file that calls this method \"\"\"\n return _get_platform_for_cell_path_and_arch(\n config.get_current_repo_name(),\n native.package_name(),\n host_arch.HOST_ARCH_STR,\n )\n\ndef _get_platform_for_cell_path_and_arch(cell, path, arch):\n \"\"\"\n Get the platform for a given cell and path within that cell.\n\n Args:\n cell: The cell name (specifed by buckconfig value fbcode.current_repo)\n path: The relative path within the repository. This should not include\n any file names, just the directory\n arch: An architecture string. This is x86_64 or aarch64 right now\n\n Returns:\n The deepest nested subdirectory from\n @fbcode_macros//build_defs:platform_overrides.bzl that matches `path`\n and is valid for the current host architecture. If nothing matches, the\n default platform is returned\n \"\"\"\n\n platform_override = _get_platform_override()\n if platform_override != None:\n return platform_override\n\n per_cell_overrides = _platform_overrides.get(cell)\n if per_cell_overrides != None:\n # Make \"foo\" loop twice. Once for \"foo\", once for \"\". foo/bar gets you\n # foo/bar, foo, and \"\"\n count = path.count(\"/\") + 2\n for _ in range(count):\n ret = per_cell_overrides.get(path)\n if ret != None and arch in ret:\n return ret[arch]\n path = paths.dirname(path)\n\n # If we require a platform to be found, fail at this point.\n if read_boolean(\"fbcode\", \"require_platform\", False):\n fail(\n \"Cannot find fbcode platform to use for architecture {}\"\n .format(arch),\n )\n\n return _get_default_platform()\n\ndef _to_buck_platform(platform, compiler):\n \"\"\"\n Convert a given fbcode platform name into the Buck (C++) platform name.\n As the latter is compiler-family-specific, while the former is not, it\n at least takes into account the compiler chosen by the build mode.\n \"\"\"\n\n fmt = read_string(\"fbcode\", \"buck_platform_format\", \"{platform}\")\n return fmt.format(platform = platform, compiler = compiler)\n\ndef _get_buck_platform_for_base_path(base_path):\n \"\"\"\n Return the Buck platform to use for a deployable rule at the given base\n path, running some consistency checks as well.\n \"\"\"\n\n return _to_buck_platform(\n _get_platform_for_base_path(base_path),\n compiler.get_compiler_for_base_path(base_path),\n )\n\ndef _get_buck_platform_for_current_buildfile():\n return _get_buck_platform_for_base_path(native.package_name())\n\ndef _get_fbcode_and_buck_platform_for_current_buildfile():\n \"\"\"\n Returns both the general fbcode platform and the buck platform as a tuple\n\n The fbcode platform is used for things like paths and build info stamping\n The buck platform is used internally in buck to specify which toolchain\n settings to use.\n\n e.g. One might get gcc-5-glibc-2.23, gcc-5-glibc-2.23-clang back.\n gcc-5-glibc-2.23 would be used when finding third-party packages, but\n gcc-5-glibc-2.23-clang would be used in cxx_binary rules to force clang\n compiler and build flags to be used for a binary.\n\n This method just reduces some duplicate work that would be done if both\n get_platform_for_current_buildfile() and get_buck_platform_for_current_buildfile()\n were run.\n \"\"\"\n package = native.package_name()\n fbcode_platform = _get_platform_for_base_path(package)\n buck_platform = _to_buck_platform(fbcode_platform, compiler.get_compiler_for_base_path(package))\n return fbcode_platform, buck_platform\n\ndef _get_platform_architecture(platform):\n \"\"\" Gets the architecture for a specific platform \"\"\"\n if platform == \"default\": # We're using the native platform, bail!\n return host_arch.HOST_ARCH_STR\n return third_party_config[\"platforms\"][platform][\"architecture\"]\n\ndef _get_platforms_for_host_architecture():\n return _all_platforms_for_current_architecture\n\ndef _get_platforms_for_architecture(arch):\n return __get_platforms_for_architecture(arch)\n\ndef _get_all_platforms():\n return _all_platforms\n\ndef _get_buck_python_platform(platform, major_version, flavor = \"\"):\n \"\"\"\n Gets the platform string to pass to buck\n\n Args:\n platform: The fbcode platform\n major_version: The major version of the platform\n flavor: The type of interpreter\n\n Returns:\n A buck-compatible platform string\n \"\"\"\n return \"{flavor}py{major}-{platform}\".format(\n flavor = flavor + \"_\" if flavor else \"\",\n major = major_version,\n platform = platform,\n )\n\ndef _escape(platform):\n \"\"\"Escapes platform characters colliding with RegEx special characters.\n\n This is needed when passing platform name to attributes expecting regular\n expressions, e.g., platform_deps.\n\n Even though regular expressions do have much more special characters,\n fbcode platforms do not use any except \".\". This logic would have to be\n updated if this assumption is invalidated.\n\n Args:\n platform: The fbcode platform\n\n Returns:\n A platform name with all RegEx special characters escaped.\n \"\"\"\n return platform.replace(\".\", \"\\\\.\")\n\nplatform_utils = struct(\n escape = _escape,\n get_all_platforms = _get_all_platforms,\n get_buck_platform_for_base_path = _get_buck_platform_for_base_path,\n get_buck_platform_for_current_buildfile = _get_buck_platform_for_current_buildfile,\n get_default_platform = _get_default_platform,\n get_fbcode_and_buck_platform_for_current_buildfile = _get_fbcode_and_buck_platform_for_current_buildfile,\n get_platform_architecture = _get_platform_architecture,\n get_platform_for_base_path = _get_platform_for_base_path,\n get_platform_for_cell_path_and_arch = _get_platform_for_cell_path_and_arch,\n get_platform_for_current_buildfile = _get_platform_for_current_buildfile,\n get_platform_overrides = _get_platform_overrides,\n get_platforms_for_architecture = _get_platforms_for_architecture,\n get_platforms_for_host_architecture = _get_platforms_for_host_architecture,\n to_buck_platform = _to_buck_platform,\n get_buck_python_platform = _get_buck_python_platform,\n)\n","sub_path":"infra_macros/fbcode_macros/build_defs/platform_utils.bzl","file_name":"platform_utils.bzl","file_ext":"bzl","file_size_in_byte":10628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360394704","text":"#===istalismanplugin===\n# -*- coding: utf-8 -*-\n\n\nJOINSF={}\n\ndef leave_nick_stat(groupchat,nick,code,reason):\n if len(nick)>19:\n return\n if groupchat in GROUPCHATS:\n if not groupchat+nick in JOINSF:\n JOINSF[groupchat+nick] = {'joins':time.time()}\n else:\n JOINSF[groupchat+nick]['joins'] = time.time()\n\n\ndef show_joins(type,source,parameters):\n if not source[1] in GROUPCHATS:\n return\n if not parameters:\n reply(type,source,u'кого?')\n return\n else:\n if source[1]+parameters in JOINSF and GROUPCHATS[source[1]][parameters]['ishere']==0:\n seen=int(time.time() - JOINSF[source[1]+parameters]['joins'])\n mem = timeElapsed(seen)\n reply(type,source,u'пользователя '+parameters+u' видел '+mem+u' назад')\n return\n else:\n if parameters in GROUPCHATS[source[1]]:\n if GROUPCHATS[source[1]][parameters]['ishere']==1:\n reply(type,source,u'он все еще здесь!')\n return\n else:\n reply(type,source,u'небыло тут таких!')\n \n \n\nregister_leave_handler(leave_nick_stat)\nregister_command_handler(show_joins, 'seen', ['все'], 0, 'показывает время последнего визита пользователя', 'seen ', ['seen abyba'])\n\n","sub_path":"plugins/seen_plugin.py","file_name":"seen_plugin.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443886919","text":"import pymysql\ndb = pymysql.connect(\n \"localhost\",\n \"root\",\n \"Zqt_1997\",\n \"test\",\n use_unicode=True,\n charset=\"utf8\")\ncursor = db.cursor()\n\n\ndef load_data():\n users = {}\n sql = 'select * from user'\n cursor.execute(sql)\n rows = cursor.fetchall()\n for row in rows:\n userid = row[0]\n data = row[1:-3]\n kind = row[-1]\n users[userid] = {'data': data, 'kind': kind}\n return users\n\n\ndef show(users, kind, size):\n show_list = []\n for userid in users.keys():\n user_kind = users[userid]['kind']\n if user_kind == kind:\n user_data = users[userid]['data']\n show_list.append({'userid': userid, 'data': user_data})\n for i in range(size):\n userid = show_list[i]['userid']\n data = show_list[i]['data']\n\n\nif __name__ == '__main__':\n users = load_data()\n","sub_path":"show_users.py","file_name":"show_users.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467971095","text":"from flask import Flask\r\nfrom flask import render_template\r\nfrom flask import request\r\nimport sqlite3\r\nfrom flask import json, session\r\nfrom validate_email import validate_email\r\nimport re\r\nimport flask\r\napp = Flask(__name__)\r\n\r\n#Connect to database\r\nDatabase = 'ca_firm.db'\r\n\r\n#On start go to registration page\r\n@app.route('/', methods=['POST', 'GET'])\r\ndef home():\r\n\t\t\treturn render_template(\"ClientRegister.html\")\r\n\r\n\r\n#To insert values to a table in a db\r\ndef insert(table, fields=(), values=()):\r\n # g.db is the database connection\r\n con = sqlite3.connect(Database)\r\n\r\n cur = con.cursor()\r\n print(\"connected\")\r\n query = 'INSERT INTO %s (%s) VALUES (%s)' % (\r\n table,\r\n ', '.join(fields),\r\n ', '.join(['?'] * len(values))\r\n )\r\n cur.execute(query, values)\r\n\r\n id = cur.lastrowid\r\n cur.close()\r\n con.commit()\r\n return id\r\n\r\n#To query a table in db\r\ndef query_db(query, args=(), one=False):\r\n con = sqlite3.connect(Database)\r\n cur = con.execute(query, args)\r\n rv = cur.fetchall()\r\n cur.close()\r\n con.commit()\r\n return (rv[0] if rv else None) if one else rv\r\n\r\n#Sign up code with validation of the fields\r\n@app.route('/signUp',methods=['POST'])\r\ndef signUp():\r\n print(\"here\")\r\n print(request.form)\r\n # read the posted values from the UI\r\n _name = request.form['username']\r\n _fname = request.form['firstname']\r\n _lname = request.form['lastname']\r\n _email = request.form['email']\r\n _password = request.form['password']\r\n _confpassword = request.form['confpassword']\r\n _num = request.form['number']\r\n flag = 0\r\n print(request.form['usertype'])\r\n if(request.form['usertype']==\"0\"):\r\n \ta = request.form['aadhar']\r\n \tp = request.form['pan']\r\n \tprint(a and p)\r\n \tif( a and p ):\r\n \t\tflag = 1\r\n else:\r\n \tif(request.form['employeeID']):\r\n \t\tflag = 1\r\n # validate the received values\r\n\r\n #Check if uname exists:\r\n exists = [0 , 0,0,0,0] #utype , uname, emai, aadhar, pan,\r\n if(request.form['usertype']==\"0\"):\r\n n = query_db('SELECT * FROM client WHERE username = ?',\r\n [_name], one=True)\r\n #exists[0] = int(request.form['usertype'])\r\n if n is not None:\r\n \tprint(\"This username exists\")\r\n \texists[0] = 1\r\n e = query_db(\"SELECT * FROM client WHERE email_id = ?\", [_email], one=True)\r\n if e is not None:\r\n \tprint(\"This email exists\")\r\n \texists[1] = 1\r\n \r\n a = query_db(\"SELECT * FROM client WHERE aadhar_no = ?\", [request.form['aadhar']], one=True)\r\n if a is not None:\r\n \tprint(\"This aadhar exists\")\r\n \texists[2] = 1\r\n \r\n p = query_db(\"SELECT * FROM client WHERE pan_no = ?\", [request.form['pan']], one=True)\r\n if p is not None:\r\n \tprint(\"This pan exists\")\r\n \texists[3] = 1\r\n elif(request.form['usertype']==\"1\"):\r\n print(\"here in check emp\")\r\n n = query_db('SELECT * FROM employee WHERE username = ?',\r\n [_name], one=True)\r\n #exists[0] = int(request.form['usertype'])\r\n if n is not None:\r\n print(\"This username exists\")\r\n exists[0] = 1\r\n e = query_db(\"SELECT * FROM employee WHERE email_id = ?\", [_email], one=True)\r\n if e is not None:\r\n print(\"This email exists\")\r\n exists[1] = 1 \r\n print(exists)\r\n else:\r\n n = query_db('SELECT * FROM partner WHERE username = ?',\r\n [_name], one=True)\r\n #exists[0] = int(request.form['usertype'])\r\n if n is not None:\r\n print(\"This username exists\")\r\n exists[0] = 1\r\n e = query_db(\"SELECT * FROM partner WHERE email_id = ?\", [_email], one=True)\r\n if e is not None:\r\n print(\"This email exists\")\r\n exists[1] = 1 \r\n\r\n\r\n\r\n\r\n print(_name, _lname, validate_email(_email) , (_password == _confpassword), flag)\r\n if _name and _lname and _num and _fname and _email and _password and validate_email(_email) and (_password == _confpassword) and flag and not(exists[0]==1 or exists[1]==1 or exists[2] ==1 or exists[3] == 1):\r\n print(\"ALL OK\")\r\n \t#INSERT TO DATABASE\r\n if(request.form['usertype']==\"0\"): \r\n print(\"here in client\")\r\n cols = (\"username\", \"password\", \"first_name\", \"last_name\",\"email_id\", \"company\", \"contact_no\", \"aadhar_no\", \"pan_no\")\r\n vals = (_name, _password , _fname, _lname , _email , int(request.form['clientType']), _num , request.form['aadhar'] , request.form['pan'])\r\n insert(\"client\" , cols, vals)\r\n desc = request.form.getlist('filedesc')\r\n print(desc)\r\n file = request.files.getlist('files[]')\r\n i = 0\r\n print(file)\r\n for f in file:\r\n print(f)\r\n filename = secure_filename(f.filename)\r\n cols = (\"user\" , \"document\", \"description\" , \"filename\")\r\n vals = (_name , sqlite3.Binary(f.read()) , desc[i], filename)\r\n i+=1\r\n insert(\"client_files\" , cols, vals)\r\n print(file)\r\n print(request.form)\r\n print(request.files)\r\n\r\n\r\n\r\n\r\n\r\n\r\n print(\"done\")\r\n\t \t#print(rows1)\r\n elif(request.form['usertype']==\"1\"):\r\n print(\"here in employee\")\r\n cols = (\"username\", \"password\", \"first_name\", \"last_name\",\"email_id\", \"contact_no\", \"employee_id\")\r\n vals = (_name, _password , _fname, _lname , _email , _num , request.form['employeeID'] )\r\n insert(\"employee\" , cols, vals)\r\n print(\"done\")\r\n else:\r\n print(\"here in partner\")\r\n cols = (\"username\", \"password\", \"first_name\", \"last_name\",\"email_id\", \"contact_no\", \"employee_id\")\r\n vals = (_name, _password , _fname, _lname , _email , _num , request.form['employeeID'] )\r\n insert(\"partner\" , cols, vals)\r\n print(\"done\")\r\n\r\n return json.dumps({'html':'All fields good !!','status':0})\r\n elif(exists[0]==1 or exists[1]==1 or exists[2] ==1 or exists[3] == 1):\r\n \treturn json.dumps({'html':'All fields good !!','status':2, \r\n \t\t'username':exists[0],\r\n \t\t'email':exists[2],\r\n \t\t'aadhar':exists[2],\r\n \t\t'pan':exists[3],\r\n \t\t})\r\n else:\r\n return json.dumps({'html':'Enter the required fields','status':1})\r\n\r\n#Login and check type of user to redirect to correct page\r\n@app.route('/logIn',methods=['POST'])\r\ndef logIn():\r\n #print(request.form)\r\n t = request.form['typeofuser']\r\n name = request.form['uname']\r\n password = request.form['pwd']\r\n print(\"In log in\")\r\n print(name , password , t)\r\n if(t==\"0\"):\r\n a = query_db(\"SELECT * FROM client WHERE username = ?\", [name], one=True)\r\n print(\"Values\" , a)\r\n if(a is None):\r\n return json.dumps({'status':0})\r\n else:\r\n if(a[1]==password):\r\n \r\n #render_template(\"ClientHome.html\")\r\n session['username'] = name\r\n return json.dumps({'status':1 , 'type':int(a[5]) })\r\n\r\n else:\r\n return json.dumps({'status':2})\r\n elif(t==\"1\"):\r\n a = query_db(\"SELECT * FROM employee WHERE username = ?\", [name], one=True)\r\n print(\"Values\" , a)\r\n if(a is None):\r\n return json.dumps({'status':0})\r\n else:\r\n if(a[1]==password):\r\n \r\n #render_template(\"ClientHome.html\")\r\n session['username'] = name\r\n return json.dumps({'status':1 , 'type':int(t) })\r\n\r\n else:\r\n return json.dumps({'status':2})\r\n else:\r\n a = query_db(\"SELECT * FROM client WHERE partner = ?\", [name], one=True)\r\n print(\"Values\" , a)\r\n if(a is None):\r\n return json.dumps({'status':0})\r\n else:\r\n if(a[1]==password):\r\n \r\n #render_template(\"ClientHome.html\")\r\n session['username'] = name\r\n return json.dumps({'status':1 , 'type':int(t) })\r\n\r\n else:\r\n return json.dumps({'status':2})\r\n\r\n\r\n#To render client page and get the necessary data to display\r\n@app.route('/clientHome')\r\ndef clientHome():\r\n \r\n name = session['username'] \r\n s = query_db(\"SELECT token_no ,current_timestamp, quotation, type_of_service, status_for_client, emp, estimated_time_of_completion FROM service \\\r\n JOIN service_status ON token_no = service_status.token \\\r\n JOIN service_allocation ON token_no = service_allocation.token \\\r\n WHERE user = ?\", [name])\r\n print(s)\r\n #Handle null values in tables\r\n for i in range(len(s)):\r\n x = s[i]\r\n print(x[5])\r\n print(x[6])\r\n if(x[5] is None):\r\n x =list(x)\r\n x[5]=\"Not updated yet\"\r\n s[i] = tuple(x)\r\n if(x[6] is None):\r\n x =list(x)\r\n x[6]=\"Not allocated yet\"\r\n s[i] = tuple(x)\r\n m = query_db(\"SELECT sender , current_timestamp ,message FROM messages \\\r\n WHERE recepient = ? ORDER BY current_timestamp DESC\", [name])\r\n print(m)\r\n files = query_db(\"SELECT token , filename , completed_service_docs.description FROM completed_service_docs \\\r\n JOIN service ON token = token_no \\\r\n WHERE user = ? \", [name])\r\n print(files)\r\n invoice = query_db(\"SELECT token, generated_by , current_timestamp ,filename, invoice_amount FROM completed_service_invoice \\\r\n JOIN service ON token = token_no \\\r\n WHERE user = ? ORDER BY current_timestamp DESC\", [name])\r\n print(invoice)\r\n return render_template(\"ClientHome.html\", username=name, items = s, messages= m, files=files, invoice=invoice)\r\n\r\n\r\n#To submit feedbak of service on click of button\r\n\r\n@app.route('/submitFeedback', methods=['POST'])\r\ndef submitFeedback():\r\n data = request.json\r\n print(\"Submitting feedback\")\r\n print(data)\r\n f = data[\"feedback\"]\r\n print(f)\r\n t = int(data[\"token\"])\r\n print(t)\r\n s = query_db(\"UPDATE service SET feedback = ? \\\r\n WHERE token_no = ?\",[data['feedback'], data['token']])\r\n print(s)\r\n s= query_db(\"SELECT feedback FROM service \\\r\n WHERE token_no = ?\",[ data['token']])\r\n print(s)\r\n\r\n return json.dumps({'status':2})\r\n\r\n\r\n#To upload files for a service for a user\r\n\r\n@app.route('/serviceFileUpload', methods=['POST'])\r\ndef serviceFileUpload():\r\n #data = request.json\r\n d= request.form['serv_desc']\r\n t= request.form['serv_token']\r\n print(d)\r\n f = request.files.getlist('serv_file')[0]\r\n filename =f.filename\r\n print(filename)\r\n cols = (\"token\" , \"document\", \"description\" , \"filename\")\r\n vals = (t , sqlite3.Binary(f.read()), d, filename)\r\n insert(\"service_docs\" , cols, vals)\r\n print(\"Uploaded\")\r\n return json.dumps({'status':2})\r\n\r\n#To send messages from one user to another\r\n\r\n@app.route('/sendMessage', methods=['POST'])\r\ndef sendMessage():\r\n data = request.json\r\n print(\"Submitting feedback\")\r\n print(data)\r\n c = data[\"content\"]\r\n t = data[\"to\"]\r\n f = data[\"from\"]\r\n\r\n z = query_db(\"SELECT * from client where username = ?\", [t])\r\n print(z)\r\n if(not(z)):\r\n z = query_db(\"SELECT * from employee where username = ?\",[t])\r\n\r\n if(not(z)):\r\n z = query_db(\"SELECT * from partner where username = ?\", [t])\r\n if (not(z)):\r\n print(\"To username not there\")\r\n return json.dumps({'status':0})\r\n else:\r\n print(t,f,c)\r\n cols = (\"sender\" , \"recepient\", \"message\")\r\n vals = (f,t,c)\r\n insert(\"messages\" , cols, vals)\r\n\r\n return json.dumps({'status':2})\r\n\r\n#To download files and invoice documents\r\n@app.route('/fileDownload', methods=['POST'])\r\ndef fileDownload():\r\n data = request.json\r\n print(\"Downloading file...\")\r\n print(data)\r\n f = data[\"filename\"]\r\n f = f.strip()\r\n t = int(data[\"token\"])\r\n d = data[\"desc\"]\r\n file = query_db(\"SELECT document FROM completed_service_docs \\\r\n WHERE token = ? AND filename LIKE ? AND description LIKE ? \", [t,f,d])\r\n print(file)\r\n with open(\"files/\"+str(t)+\"_\"+f, 'wb') as output_file:\r\n output_file.write(file[0][0])\r\n return json.dumps(\"{'status':2}\")\r\n\r\n@app.route('/invoiceFileDownload', methods=['POST'])\r\ndef invoiceFileDownload():\r\n data = request.json\r\n print(\"Downloading file...\")\r\n print(data)\r\n f = data[\"filename\"]\r\n f = f.strip()\r\n t = int(data[\"token\"])\r\n g = data[\"gen\"]\r\n regex = re.compile(r'[\\n\\r\\t]')\r\n g = regex.sub(\"\", g)\r\n a = float(data[\"amt\"])\r\n print(f,t,g,a)\r\n file = query_db(\"SELECT invoice_document FROM completed_service_invoice \\\r\n WHERE token = ? AND filename LIKE ?\", [t,f])\r\n print(file)\r\n with open(\"invoice/\"+str(t)+\"_\"+f, 'wb') as output_file:\r\n output_file.write(file[0][0])\r\n return json.dumps({'status':2})\r\n\r\n\r\n#Logout\r\n@app.route('/logout')\r\ndef logout():\r\n # remove the username from the session if it is there\r\n session.pop('username', None)\r\n \r\n return render_template(\"ClientRegister.html\" )\r\n\r\n\r\ndef cover_str(cvr):\r\n cvr = request.files['cover']\r\n if cvr and allowed_file(cvr.filename):\r\n filename = secure_filename(cvr.filename)\r\n cvr = cvr.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n return cvr\r\n\r\n#To submit a request from client\r\n@app.route('/submitRequest',methods=['POST'])\r\ndef submitRequest():\r\n print(\"IN SUBMIT\")\r\n \r\n\r\n\r\n username =session['username']\r\n description = request.form['description']\r\n service = request.form['service']\r\n\r\n cols = (\"user\", \"type_of_service\", \"description\" , \"quotation\" , \"accepted\" , \"allocated\")\r\n vals = (username , service , description, 0.0 , 0, 0)\r\n print(username , service, description)\r\n insert(\"service\" , cols, vals)\r\n token = query_db(\"SELECT * FROM service WHERE user = ? AND type_of_service LIKE ? AND description LIKE ?\", [username , service, description], one=True)\r\n token = token[1]\r\n print(token)\r\n\r\n cols = (\"token\" , \"completed\", \"verified\", \"remarks\", \"status_for_partner\", \"status_for_client\")\r\n vals = (token , 0 , 0 , \"\", \"\" , \"Not Accepted\" )\r\n insert(\"service_status\" , cols, vals)\r\n\r\n cols = tuple([\"token\"])\r\n vals = tuple([token])\r\n insert(\"service_allocation\" , cols, vals)\r\n #Handling file uploads\r\n print(\"here in submit\")\r\n print(service, description)\r\n desc = request.form.getlist('filedesc')\r\n print(desc)\r\n file = request.files.getlist('files[]')\r\n i = 0\r\n for f in file:\r\n print(f)\r\n filename = secure_filename(f.filename)\r\n cols = (\"token\" , \"document\", \"description\" , \"filename\")\r\n vals = (token , sqlite3.Binary(f.read()) , desc[i], filename)\r\n i+=1\r\n insert(\"service_docs\" , cols, vals)\r\n print(file)\r\n print(request.form)\r\n print(request.files)\r\n return json.dumps({\"status\":0, \"token\":token}) \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.secret_key = 'super secret key'\r\n app.run(debug=True, port = 5002)\r\n","sub_path":"client_reg.py","file_name":"client_reg.py","file_ext":"py","file_size_in_byte":15257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440833310","text":"def quersummen(n):\n \"\"\"\n Berechnet die Quersumme von n in allen Darstellungen von Basis 2 bis n\n\n @param: int n Zahl und Grenze der Anzahl der zu beachtenden Basen\n @return: int Durch Leerzeichen getrennte Reihe der Quersummen in aufsteigenden Basen\n \"\"\"\n \n X = []\n rounds = n\n temp = n\n for b in range(2,rounds+1):\n n = temp\n Koeff = []\n while n >= 1:\n i = 0\n while n/(b**i)>=b:\n i = i + 1\n \n k = 0\n if b**i > n:\n n = n\n else:\n while (k+1)*b**i <= n and k + 1 < b:\n k = k + 1\n Koeff.append(k)\n n = n%(k*b**i)\n\n \n x = 0\n for k in range(0,len(Koeff)):\n x = x + Koeff[k]\n X.append(str(x))\n\n \n zahl = str(' '.join(X))\n\n print(zahl)","sub_path":"quersummen.py","file_name":"quersummen.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86030880","text":"import pygame\n\n\nclass Player(pygame.sprite.Sprite):\n \"\"\"\n Main sprite controlled by user\n \"\"\"\n\n \"\"\"how many pixel the hero can move with one directional key down\"\"\"\n MOVE_SPEED = 5\n\n \"\"\"directions for images\"\"\"\n DIRECTION_UP = 'up'\n DIRECTION_RIGHT = 'right'\n DIRECTION_DOWN = 'down'\n DIRECTION_LEFT = 'left'\n\n def __init__(self):\n \"\"\"\n constructor\n \"\"\"\n super(pygame.sprite.Sprite, self).__init__()\n\n self._imageUp = None\n self._imageRight = None\n self._imageDown = None\n self._imageLeft = None\n\n self.surface = None\n self.rect = None\n\n\n def addSpriteImage(self, image, direction):\n \"\"\"\n set image according to a direction\n\n :param image: Surface\n :param direction: string\n :return:\n \"\"\"\n if direction == Player.DIRECTION_UP:\n self._imageUp = image\n elif direction == Player.DIRECTION_RIGHT:\n self._imageRight = image\n elif direction == Player.DIRECTION_DOWN:\n self._imageDown = image\n elif direction == Player.DIRECTION_LEFT:\n self._imageLeft = image\n else:\n raise Exception('wrong sprite image direction')\n\n\n def initRect(self):\n \"\"\"\n init current image and rect\n :return:\n \"\"\"\n\n self.surface = self._imageDown\n self.rect = self.surface.get_rect()\n\n\n def moveUp(self):\n \"\"\"\n move hero to the top\n \"\"\"\n self._move(0, -self.MOVE_SPEED)\n\n\n def moveDown(self):\n \"\"\"\n move hero to the bottom\n \"\"\"\n self._move(0, self.MOVE_SPEED)\n\n\n def moveLeft(self):\n \"\"\"\n move hero to the left\n \"\"\"\n self._move(-self.MOVE_SPEED, 0)\n\n\n def moveRight(self):\n \"\"\"\n move hero to the right\n \"\"\"\n self._move(self.MOVE_SPEED, 0)\n\n\n def _move(self, x, y):\n \"\"\"\n move hero on the screen\n\n :param x: x position\n :param y: y position\n \"\"\"\n\n if x < 0:\n self.surface = self._imageLeft\n elif x > 0:\n self.surface = self._imageRight\n elif y < 0:\n self.surface = self._imageUp\n elif y > 0:\n self.surface = self._imageDown\n\n self.rect = self.rect.move(x, y)","sub_path":"src/aemaeth/common/sprite/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573611436","text":"import logging\n\nfrom google.protobuf.text_format import MessageToString\n\nfrom bonsai.proto.generator_simulator_api_pb2 import ServerToSimulator\nfrom bonsai.proto.generator_simulator_api_pb2 import SimulatorToServer\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DriverState(object):\n UNREGISTERED = 0\n REGISTERING = 10\n REGISTERED = 20\n ACTIVE = 30\n FINISHED = 40\n\n\nclass EmptyMessageError(RuntimeError):\n \"\"\"\n Thrown when receiving an empty message when it shouldn't have been empty.\n \"\"\"\n def __init__(self, message_name):\n super(EmptyMessageError, self).__init__(\n 'Expected a {} message but received nothing... '\n '(sigh) absolutely nothing...'.format(message_name)\n )\n\n\nclass MalformedMessageError(RuntimeError):\n \"\"\"Thrown when receiving a malformed message (i.e. missing a field)\"\"\"\n def __init__(self, missing_field, message):\n super(MalformedMessageError, self).__init__(\n 'Could not locate {} in message {} - got {}'.format(\n missing_field,\n type(message).__name__,\n MessageToString(message, as_one_line=True)\n )\n )\n\n\nclass UnexpectedMessageError(RuntimeError):\n \"\"\"Thrown when an unexpected or unhandled message is received\"\"\"\n def __init__(self, expected, message):\n super(UnexpectedMessageError, self).__init__(\n 'Expected {} but got {}'.format(\n expected,\n MessageToString(message, as_one_line=True)\n )\n )\n\n\nclass Driver(object):\n \"\"\"\n A state-machine-esque entity that handles the logic of coordinating\n messages between a generator or simulator and the BRAIN backend. This will\n be the object that an event loop asynchronous library \"drives\".\n \"\"\"\n\n def __init__(self, **kwargs):\n self._state = DriverState.UNREGISTERED\n self._base_protocol = kwargs.pop('connection')\n\n def next(self, message):\n # type: (ServerToSimulator) -> SimulatorToServer\n \"\"\"\n This is the \"driving\" function, in that given an input that comes from\n a source (likely a websocket), do something with it depending on the\n state of the driver, the produce an output to be sent back to the\n server.\n :param message: Message from the server to be processed. This may be\n None, but how that is handled depends on the state and\n implementation of the derived driver.\n :type message: ServerToSimulator protobuf message.\n :return: A message to be sent back to the server. This may be None,\n indicating that no message needs to be sent back.\n :rtype: SimulatorToServer protobuf message.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def state(self):\n \"\"\"\n Returns the state of the driver.\n :return: State of the driver.\n :rtype: Member of the DriverState enumeration.\n \"\"\"\n return self._state\n\n\nclass SimulatorDriverForTraining(Driver):\n \"\"\"\n Driver used for training with a simulator.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(SimulatorDriverForTraining, self).__init__(**kwargs)\n self._simulator_protocol = kwargs.pop('simulator_connection')\n self._state_funcs = {\n DriverState.UNREGISTERED: self._send_register_message,\n DriverState.REGISTERING: self._handle_registration_acknowledgement,\n DriverState.ACTIVE: self._handle_runtime_message,\n DriverState.FINISHED: self._do_nothing\n }\n self._active_funcs = {\n ServerToSimulator.SET_PROPERTIES:\n self._handle_set_properties_message,\n ServerToSimulator.START: self._handle_start_message,\n ServerToSimulator.STOP: self._handle_stop_message,\n ServerToSimulator.PREDICTION: self._handle_prediction_message,\n ServerToSimulator.RESET: self._handle_reset_message,\n ServerToSimulator.FINISHED: self._handle_finished_message\n }\n\n def _do_nothing(self, _):\n return None\n\n def _send_register_message(self, _):\n \"\"\"\n In the beginning, send a register message.\n :return: A registration message\n :rtype: SimulatorToServer\n \"\"\"\n self._state = DriverState.REGISTERING\n message = SimulatorToServer()\n self._base_protocol.generate_register_message(message)\n return message\n\n def _handle_registration_acknowledgement(self, message):\n \"\"\"\n The server sends back an acknowledgement. Process that acknowledgement\n and send back a ready message to the server.\n :param message: An acknowledge register message.\n :type message: ServerToSimulator protobuf class\n :return: A ready message\n :rtype: SimulatorToServer protobuf message\n \"\"\"\n\n if not message:\n raise EmptyMessageError('ServerToSimulator with '\n 'AcknowledgeRegisterData')\n if not message.HasField('acknowledge_register_data'):\n raise MalformedMessageError('acknowledge_register_data',\n message)\n\n self._state = DriverState.ACTIVE\n self._base_protocol.handle_register_acknowledgement(\n message.acknowledge_register_data)\n reply = SimulatorToServer()\n self._simulator_protocol.generate_ready_message(reply)\n\n if reply.message_type != SimulatorToServer.READY:\n raise UnexpectedMessageError('READY SimulatorToServer message',\n reply)\n\n return reply\n\n def _handle_runtime_message(self, message):\n \"\"\"\n Once the simulator has signalled that it is ready to run, the server\n will issue it active messages, like set properties, start, stop, reset,\n or finish. This is a catch-all for all those active messages, which are\n routed to their own handlers.\n :param message: Message containing the active command.\n :type message: ServerToSimulator protobuf message\n :return: Potentially, a message to send back to the server from the\n simulator. If there isn't anything to send back, None is\n returned.\n :rtype: SimulatorToServer protobuf message or None.\n \"\"\"\n if not message:\n raise EmptyMessageError('ServerToSimulator')\n try:\n active_func = self._active_funcs[message.message_type]\n except KeyError:\n error = 'one of {}'.format(str(self._active_funcs.keys()))\n raise UnexpectedMessageError(error, message)\n\n return active_func(message)\n\n def _handle_set_properties_message(self, message):\n \"\"\"\n The server sent a properties message. Process it, and return back a\n \"ready\" message.\n :param message: The set properties message.\n :type message: ServerToSimulator protobuf message.\n :return: A ready message.\n :rtype: SimulatorToServer protobuf message.\n \"\"\"\n if not message.HasField('set_properties_data'):\n raise MalformedMessageError('set_properties_data', message)\n\n self._base_protocol.handle_set_properties_message(\n message.set_properties_data)\n reply = SimulatorToServer()\n self._simulator_protocol.generate_ready_message(reply)\n\n if reply.message_type != SimulatorToServer.READY:\n raise UnexpectedMessageError('READY SimulatorToServer message',\n reply)\n\n return reply\n\n def _handle_start_message(self, _):\n \"\"\"\n The server sent a Start message. Send back a message containing the\n current state of the simulator.\n :return: States from the simulator\n :rtype: SimulatorToServer message\n \"\"\"\n self._simulator_protocol.handle_start_message()\n reply = SimulatorToServer()\n self._simulator_protocol.generate_state_message(reply)\n\n if reply.message_type != SimulatorToServer.STATE:\n raise UnexpectedMessageError('STATE SimulatorToServer message',\n reply)\n\n if len(reply.state_data) == 0:\n raise MalformedMessageError('state_data', reply)\n\n return reply\n\n def _handle_stop_message(self, _):\n \"\"\"\n The server sent a stop message. Handle it and send back a ready\n message.\n :return: A ready message\n :rtype: SimulatorToServer message\n \"\"\"\n self._simulator_protocol.handle_stop_message()\n\n reply = SimulatorToServer()\n self._simulator_protocol.generate_ready_message(reply)\n\n if reply.message_type != SimulatorToServer.READY:\n raise UnexpectedMessageError('READY SimulatorToServer message',\n reply)\n\n return reply\n\n def _handle_prediction_message(self, message):\n \"\"\"\n The server sent predictions. Process it and send back the simulator's\n state.\n :param message: The message containing the prediction.\n :type message: ServerToSimulator protobuf message\n :return: The simulator state\n :rtype: SimulatorToServer protobuf message\n \"\"\"\n\n if not message:\n raise EmptyMessageError('ServerToSimulator with PredictionData')\n if len(message.prediction_data) == 0:\n raise MalformedMessageError('prediction_data', message)\n\n reply = SimulatorToServer()\n\n for prediction in message.prediction_data:\n self._simulator_protocol.handle_prediction_message(prediction)\n self._simulator_protocol.advance()\n self._simulator_protocol.generate_state_message(reply)\n\n if reply.message_type != SimulatorToServer.STATE:\n raise UnexpectedMessageError('STATE SimulatorToServer '\n 'message', reply)\n return reply\n\n def _handle_reset_message(self, _):\n \"\"\"\n The server sent a reset. Handle it and return a Ready message.\n :return: A ready message\n :rtype: SimulatorToServer protobuf message\n \"\"\"\n self._simulator_protocol.handle_reset_message()\n reply = SimulatorToServer()\n self._simulator_protocol.generate_ready_message(reply)\n\n if reply.message_type != SimulatorToServer.READY:\n raise UnexpectedMessageError('READY SimulatorToServer message',\n reply)\n\n return reply\n\n def _handle_finished_message(self, _):\n \"\"\"\n When this message is recieved, time to exit.\n :return: None\n \"\"\"\n self._simulator_protocol.handle_finish_message()\n self._state = DriverState.FINISHED\n return None\n\n def next(self, message):\n return self._state_funcs[self._state](message)\n\n\nclass SimulatorDriverForPrediction(Driver):\n \"\"\"\n Driver used for prediction with a simulator.\n\n The prediction flow is different from training, in that it doesn't get\n the START/STOP/RESET/FINISH signals from the server and it needs to send\n its on its own the initial simulator state.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(SimulatorDriverForPrediction, self).__init__(**kwargs)\n self._simulator_protocol = kwargs.pop('simulator_connection')\n self._state_funcs = {\n DriverState.UNREGISTERED: self._send_register_message,\n DriverState.REGISTERING: self._handle_registration_acknowledgement,\n DriverState.ACTIVE: self._handle_prediction_message,\n DriverState.FINISHED: self._do_nothing\n }\n\n def _do_nothing(self, _):\n return None\n\n def _send_register_message(self, _):\n \"\"\"\n In the beginning, send a register message.\n :return: A registration message\n :rtype: SimulatorToServer\n \"\"\"\n self._state = DriverState.REGISTERING\n message = SimulatorToServer()\n\n self._base_protocol.generate_register_message(message)\n if message.message_type != SimulatorToServer.REGISTER:\n raise UnexpectedMessageError('REGISTER SimulatorToServer message',\n message)\n\n return message\n\n def _handle_registration_acknowledgement(self, message):\n \"\"\"\n The server sends back an acknowledgement. Process that acknowledgement\n and send back a ready message to the server.\n :param message: An acknowledge register message.\n :type message: ServerToSimulator protobuf class\n :return: A ready message\n :rtype: SimulatorToServer protobuf message\n \"\"\"\n if message.message_type != ServerToSimulator.ACKNOWLEDGE_REGISTER:\n error = 'Expected ACKNOWLEDGE_REGISTER but got {}'.format(\n MessageToString(message))\n raise RuntimeError(error)\n if not message.acknowledge_register_data:\n error = 'Missing data in ACKNOWLEDGE_REGISTER message {}'.format(\n MessageToString(message))\n raise RuntimeError(error)\n self._state = DriverState.ACTIVE\n self._base_protocol.handle_register_acknowledgement(\n message.acknowledge_register_data)\n\n # Difference between training and predicting is here... instead of\n # sending a READY, send an initial STATE.\n reply = SimulatorToServer()\n self._simulator_protocol.generate_state_message(reply)\n return reply\n\n def _handle_prediction_message(self, message):\n \"\"\"\n The server sent predictions. Process it and send back the simulator's\n state.\n :param message: The message containing the prediction.\n :type message: ServerToSimulator protobuf message\n :return: The simulator state\n :rtype: SimulatorToServer protobuf message\n \"\"\"\n\n if not message:\n raise EmptyMessageError('ServerToSimulator with PredictionData')\n if len(message.prediction_data) == 0:\n raise MalformedMessageError('prediction_data', message)\n\n reply = SimulatorToServer()\n\n for prediction in message.prediction_data:\n self._simulator_protocol.handle_prediction_message(prediction)\n self._simulator_protocol.advance()\n self._simulator_protocol.generate_state_message(reply)\n\n if reply.message_type != SimulatorToServer.STATE:\n raise UnexpectedMessageError('STATE SimulatorToServer '\n 'message', reply)\n return reply\n\n def next(self, message):\n return self._state_funcs[self._state](message)\n\n\nclass GeneratorDriverForTraining(Driver):\n # TODO: Implement me!\n pass\n\n\nclass GeneratorDriverForPrediction(Driver):\n # TODO: Implement me!\n pass\n","sub_path":"bonsai/drivers.py","file_name":"drivers.py","file_ext":"py","file_size_in_byte":14942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435620360","text":"import os\nimport sys\nfrom sklearn.utils import Bunch\n\nimport logging # 引入logging模块\nimport time\nimport torch\n\nfrom uer.utils.vocab import Vocab\nfrom uer.utils.tokenizer import BertTokenizer\nfrom uer.utils.optimizers import WarmupLinearSchedule, AdamW\n\nfrom col_spec_yh.store_utils import get_labels_map_from_aida_file_2\n\ndef get_logger(logger_name=\"detail\", dir_name='logs_default', file_name='log_rec_all'):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG) # Log等级总开关\n\n if not os.path.exists(dir_name) or os.path.isfile(dir_name):\n os.makedirs(dir_name)\n rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\n detail_log_name = os.path.join(dir_name, rq) + '.log'\n\n # add a file handler\n fh = logging.FileHandler(detail_log_name, mode='w')\n fh.setLevel(logging.INFO) # 输出到file的log等级的开关\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s [line:%(lineno)d] - %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # add a console handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG) # DEBUG < INFO (more strict)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # another file handler\n fh_2 = logging.FileHandler(file_name, mode='a')\n fh_2.setLevel(logging.WARNING) # 输出到file的log等级的开关\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s [#l:%(lineno)d] - %(message)s\")\n fh_2.setFormatter(formatter)\n logger.addHandler(fh_2)\n return logger\n\n\ndef get_args_aida_task():\n args = Bunch()\n args.seq_len = 64\n args.row_wise_fill = True\n args.mask_mode = 'cross-wise'\n args.additional_ban = 2\n args.table_object = 'first-column'\n args.pooling = 'avg-token'\n\n args.pretrained_model_path = \"./models/bert_model.bin-000\"\n args.vocab_path = 'models/google_uncased_en_vocab.txt'\n args.vocab = Vocab()\n args.vocab.load(args.vocab_path)\n args.emb_size = 768\n args.embedding = 'tab' # before: bert\n args.encoder = 'bertTab'\n args.subword_type = 'none'\n args.tokenizer = 'bert'\n args.tokenizer = globals()[args.tokenizer.capitalize() + \"Tokenizer\"](args)\n\n args.feedforward_size = 3072\n args.hidden_size = 768\n args.heads_num = 12\n args.layers_num = 12\n args.learning_rate = 2e-5\n args.batch_size = 4\n args.dropout = 0.1\n\n # args.target = 'bert'\n return args\n\ndef get_args_minimal():\n args = Bunch()\n args.vocab_path = 'models/google_uncased_en_vocab.txt'\n args.vocab = Vocab()\n args.vocab.load(args.vocab_path)\n args.tokenizer = 'bert'\n args.tokenizer = globals()[args.tokenizer.capitalize() + \"Tokenizer\"](args)\n \n # 'pooling': 'avg-cell-seg', # avg-token, 'avg-cell-seg'\n args.mask_mode = 'cross-wise'\n # 'shuffle_rows': True,\n args.additional_ban = 4 # ?\n # 'row_wise_fill': False,\n args.has_high_level_cls = True\n args.high_level_clses = ['TAB', 'COL', 'CELL', 'NL']\n return args\n\ndef set_args_2():\n # options for model\n args = Bunch()\n args.mask_mode = 'cross-wise' # in ['row_wise', 'col_wise', 'cross_wise', 'cross_and_hier_wise']\n args.additional_ban = 0\n # args.pooling = 'avg-token'\n args.pooling = 'avg-cell-seg'\n args.table_object = 'first-column'\n args.noise_num = 2\n args.seq_len = 100\n args.row_wise_fill = True\n\n args.pretrained_model_path = \"./models/bert_model.bin-000\"\n args.vocab_path = 'models/google_uncased_en_vocab.txt'\n args.vocab = Vocab()\n args.vocab.load(args.vocab_path)\n args.emb_size = 768\n args.embedding = 'tab' # before: bert\n args.encoder = 'bertTab'\n args.subword_type = 'none'\n args.tokenizer = 'bert'\n\n args.feedforward_size = 3072\n args.hidden_size = 768\n args.heads_num = 12\n args.layers_num = 12\n args.learning_rate = 2e-5\n args.warmup = 0.1\n args.batch_size = 32\n args.dropout = 0.1\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n args.train_path = './data/aida/IO/train_samples'\n args.t2d_path = './data/aida/IO/test_samples_t2d'\n args.limaye_path = './data/aida/IO/test_samples_limaye'\n args.wiki_path = './data/aida/IO/test_samples_wikipedia'\n\n\n # other options\n args.report_steps = 100\n args.labels_map = get_labels_map_from_aida_file_2(args.train_path)\n args.labels_num = len(args.labels_map)\n args.tokenizer = globals()[args.tokenizer.capitalize() + \"Tokenizer\"](args)\n return args\n\n\ndef load_or_initialize_parameters(args, model):\n if args.pretrained_model_path is not None:\n print(\"[YH INFO] : Loading pretrained parameters from {}.\".format(args.pretrained_model_path))\n # Initialize with pretrained model.\n model.load_state_dict(torch.load(args.pretrained_model_path), strict=False)\n # special for table\n for i in range(1,5):\n model.state_dict()['embedding.word_embedding.weight'][i] = model.state_dict()['embedding.word_embedding.weight'][101]\n else:\n # Initialize with normal distribution.\n for n, p in list(model.named_parameters()):\n if 'gamma' not in n and 'beta' not in n:\n p.data.normal_(0, 0.02)\n\n\ndef build_optimizer(args, model):\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False)\n #scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.train_steps*args.warmup, t_total=args.train_steps)\n scheduler = None\n return optimizer, scheduler\n\n\ndef batch_loader(batch_size, src, tgt, seg):\n instances_num = src.size()[0]\n for i in range(instances_num // batch_size):\n src_batch = src[i * batch_size : (i + 1) * batch_size, :]\n tgt_batch = tgt[i * batch_size : (i + 1) * batch_size]\n seg_batch = seg[i * batch_size : (i + 1) * batch_size, :]\n yield src_batch, tgt_batch, seg_batch\n if instances_num > instances_num // batch_size * batch_size:\n src_batch = src[instances_num // batch_size * batch_size :, :]\n tgt_batch = tgt[instances_num // batch_size * batch_size :]\n seg_batch = seg[instances_num // batch_size * batch_size :, :]\n yield src_batch, tgt_batch, seg_batch","sub_path":"UER-spider-temp/demos/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621754289","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport MySQLdb\n\nfrom bottle import route, run, debug, template, request, static_file, auth_basic\n\n# index.pyが設置されているディレクトリの絶対パスを取得\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n# テンプレートファイルを設置するディレクトリのパスを指定\nTEMPLATE_PATH = BASE_DIR + \"/views\"\n# coffeeスクリプトを配置するパスを指定\nCOFFEE_PATH = os.environ.get(\"COFFEE_PATH\")\n\nconnector = MySQLdb.connect(host=\"localhost\", db=\"BOT\", user=\"root\", charset=\"utf8\")\n#connector = MySQLdb.connect(host=\"localhost\", db=\"BOT\", user=\"root\", passwd=\"summer\", charset=\"utf8\")\n\n# BASIC認証のユーザ名とパスワード\nUSERNAME = \"16shinsotsu\"\nPASSWORD = \"16shinsotsu\"\n\n\ndef check(username, password):\n return username == USERNAME and password == PASSWORD\n\n@route('/botwords/css/')\ndef css_dir(filename):\n return static_file(filename, root=BASE_DIR+\"/static/css\")\n\n@route('/botwords/js/')\ndef js_dir(filename):\n return static_file(filename, root=BASE_DIR+\"/static/js\")\n\n@route('/botwords/img/')\ndef img_dir(filename):\n return static_file(filename, root=BASE_DIR+\"/static/img\")\n\n@route('/botwords/font/')\ndef font_dir(filename):\n return static_file(filename, root=BASE_DIR+\"/static/fonts\")\n\n@route('/botwords')\ndef index():\n return template(TEMPLATE_PATH+'/index')\n\n@route('/botwords', method='POST')\ndef add_word():\n call = request.forms.get('call')\n response = request.forms.get('response')\n flag = make_script(call,response)\n return flag\n\n@route('/botwords/show')\n@auth_basic(check)\ndef show_db():\n cursor = connector.cursor()\n cursor.execute(\"SELECT * FROM BOT.WORDS\")\n result = cursor.fetchall()\n cursor.close()\n return template(TEMPLATE_PATH+'/show', result=json.dumps(result))\n\ndef make_script(call,response):\n # 話しかける言葉のリスト\n call_list = split_text(replace_reg(call))\n if(len(call_list)==0):\n return 'error'\n\n # 返事のリスト\n response_list = split_text(replace_reg(response))\n if(len(response_list)==0):\n return 'error'\n\n # DBへ書き込み&ファイル書き出し\n write_words(call_list,response_list)\n\n return ''\n\n# 分割し、空白を捨てて、リスト型を返す\ndef split_text(text):\n split_list = text.split('|')\n\n while split_list.count('') > 0:\n split_list.remove('')\n\n return split_list;\n\n# 正規表現な文字のエスケープ\ndef replace_reg(text):\n text = text.replace('*', '\\*')\n text = text.replace('+', '\\+')\n text = text.replace('^', '\\^')\n text = text.replace('$', '\\$')\n text = text.replace('.', '\\.')\n text = text.replace('{', '\\{')\n text = text.replace('}', '\\}')\n text = text.replace('[', '\\[')\n text = text.replace(']', '\\]')\n text = text.replace('(', '\\(')\n text = text.replace(')', '\\)')\n return text;\n\n# DBへ書き込み\ndef write_words(call_list,response_list):\n cursor = connector.cursor()\n for call in call_list:\n for response in response_list:\n cursor.execute('select RESPONSE_WORD from BOT.WORDS where CALL_WORD = \"{0}\" and RESPONSE_WORD = \"{1}\"'.format(call,response))\n words = cursor.fetchall()\n # 言葉の組が存在しなければ追加\n if(len(words)==0):\n sql = 'insert into BOT.WORDS (CALL_WORD,RESPONSE_WORD) values(\"{0}\",\"{1}\")'.format(call,response)\n cursor.execute(sql)\n # ここでDBへ書き込まれる\n connector.commit()\n # ファイル書き出し\n words_to_file(call)\n cursor.close()\n\n# ファイルへ書き出し\ndef words_to_file(call):\n # 話しかけた言葉に対してbotがしゃべる言葉をDBから取得\n cursor = connector.cursor()\n cursor.execute('select ID,RESPONSE_WORD from BOT.WORDS where CALL_WORD = \"{0}\"'.format(call))\n words = cursor.fetchall()\n cursor.close()\n\n # ファイル名は一番上のid\n filename = '{0}.coffee'.format(words[0][0])\n\n # ファイル書き込み準備\n response = '['\n length = len(words)\n for i in range(length):\n response = response + '\"{0}\"'.format(words[i][1].encode('utf-8'))\n if(i\\n')\n writer.write('\\trobot.respond /{0}$/i, (msg) ->\\n \\t\\tmsg.send msg.random {1}\\n \\t\\tmsg.finish()\\n'.format(call,response))\n writer.close()\n\nrun(host='localhost', port=9000)\n","sub_path":"botwords_server.py","file_name":"botwords_server.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180335284","text":"class Dict(dict):\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(r\"'Dict' object has no attribute '%s'\" % key)\n\n\ndef toDict(d):\n D = Dict()\n for i, j in d.items():\n D[i] = toDict(j) if isinstance(j, dict) else j\n return D\n\n\nconfigs = dict(\n sql=dict(\n host='localhost',\n port=3306,\n user='root',\n password='password',\n db='awesome'\n ),\n cookie_key='yt'\n)\n\ncfg = toDict(configs)\n","sub_path":"myweb/www/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198252520","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport urllib\nimport urllib2\nimport json\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nICON_DEFAULT = 'icon.png'\nICON_PHONETIC = 'icon_phonetic.png'\nICON_BASIC = 'icon_basic.png'\nICON_WEB = 'icon_web.png'\n\n\nclass Item(object):\n def __init__(self, title, subtitle, arg=None, valid=True, icon=None, variables=None):\n self.attr = {'title': title, 'subtitle': subtitle, 'valid': valid}\n self.icon = icon\n self.arg = arg\n self.variables = variables\n\n def encode(self):\n variables = {\n \"alfredworkflow\": {\n \"arg\": self.arg,\n \"variables\": self.variables\n }\n }\n self.attr['arg'] = json.dumps(variables)\n self.attr['icon'] = {\"path\": self.icon}\n return self.attr\n\n\ndef output(items):\n items = map(lambda x: x.encode(), items)\n sys.stdout.write(json.dumps({'items': items}))\n\n\ndef get_web_data(query):\n query = urllib.quote(str(query))\n url = 'http://fanyi.youdao.com/openapi.do?keyfrom=%s&key=%s&type=data&doctype=json&version=1.1&q=%s' % (\n os.environ['KEY_FROM'], os.environ['API_KEY'], query)\n try:\n response = urllib2.urlopen(url, timeout=5)\n if response.code == 200:\n json_str = response.read()\n return json.loads(json_str)\n except Exception as e:\n sys.stderr.write('error %s' % e)\n return\n\n\ndef main():\n # print sys.argv\n query = sys.argv[1].strip().replace(\"\\\\\", \"\")\n\n os.environ['word'] = query\n results = []\n if not query:\n return output([Item('有道翻译', '请输入要查询的内容', arg, valid=False)])\n\n s = get_web_data(query)\n\n if s and s.get(\"errorCode\") == 0:\n # “翻译结果”\n translation = ''.join(s[\"translation\"])\n\n results.append(Item(translation, '翻译结果', translation, icon=ICON_DEFAULT, variables={'word': query, 'desc': translation}))\n\n if u'basic' in s.keys():\n # “简明释意”\n for explain in s[\"basic\"][\"explains\"]:\n pieces = explain.split('.')\n if len(pieces) == 2:\n arg = pieces[1].strip()\n else:\n arg = explain\n\n results.append(Item(explain, '简明释意', arg, icon=ICON_BASIC, variables={'word': query}))\n\n # '网络翻译'\n if u'web' in s.keys():\n for web in s[\"web\"]:\n value = ' / '.join(web[\"value\"])\n results.append(Item(value, '网络翻译: ' + web[\"key\"], value, icon=ICON_WEB, variables={'word': query}))\n\n else:\n new_query = '%s 翻译' % query\n results.append(Item('翻译失败', 'Google搜索', new_query, icon=ICON_BASIC, variables={'word': query, 'cmd': 'search'}))\n\n output(results)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"youdao/youdao.py","file_name":"youdao.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124405466","text":"# app.py\n\n# Author : aarontillekeratne\n# Date : 2019-05-08\n\n# This file is part of categorise_transaction.\n\n# categorise_transaction is free software:\n# you can redistribute it and/or modify it under the terms of the GNU General\n# Public License as published by the Free Software Foundation, either version 3\n# of the License, or (at your option) any later version.\n\n# categorise_transaction is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with Foobar. If not, see .\n\nimport os\nfrom flask import Flask\nfrom src.io.gcs import GoogleCloudStorage\n\napp = Flask(__name__)\n\n\n@app.route('/test')\ndef test_page():\n gcs = GoogleCloudStorage()\n\n return f\"Buckets : {gcs.get_buckets()}\"\n\n\n@app.route('/')\ndef index():\n target = os.environ.get('TARGET', 'World')\n\n return f'hello {target}'\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648862553","text":"import sys\ninput = sys.stdin.readline\n\ndef dammit(b, e):\n ans = 1\n while e:\n if e % 2:\n ans = (ans * b) % m\n b = (b * b) % m\n e /= 2\n return ans\n\nn, m, k, v = map(int, input().split())\n\nans_xor = 1\nans_or = 1\nans_and = 1\n\ncute = dammit(2, (n - 1)) % m\ncuteness = (dammit(2, n) - 1) % m\n\nfor i in xrange(k): \n ans_xor = (ans_xor * cute) % m \n if v >> i&1: \n ans_or = (ans_or * cuteness) % m \n else: \n ans_and = (ans_and * cuteness) % m \n\nprint(ans_xor % m)\nprint(ans_or % m)\nprint(ans_and % m)\n","sub_path":"dmoj/globexcup18s3.py","file_name":"globexcup18s3.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154197348","text":"#!/usr/bin/env python3\n\nimport sys\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n if len(sys.argv) >= 2:\n data = sys.argv[1:]\n plt.plot(data)\n plt.show()\n else:\n print(\"graph.py NUMBERS\")\n","sub_path":"bin/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312639296","text":"# taken and cleaned up from:\n# http://stackoverflow.com/questions/6284396/permutations-with-unique-values\n\nclass UniqueElement(object):\n def __init__(self, value, occurrences):\n self.value = value\n self.occurrences = occurrences\n\ndef unique_permutations(l):\n l_unique = [UniqueElement(e, l.count(e)) for e in set(l)]\n return unique_permutations_iterator(l_unique, [0]*len(l), len(l)-1)\n\ndef unique_permutations_iterator(l_unique, result_list, depth):\n if depth < 0:\n yield tuple(result_list)\n else:\n for u in l_unique:\n if u.occurrences > 0:\n result_list[depth] = u.value\n u.occurrences -= 1\n for g in unique_permutations_iterator(l_unique=l_unique,\n result_list=result_list, depth=depth-1):\n yield g\n u.occurrences += 1\n\n","sub_path":"antiquities/unique_permutations.py","file_name":"unique_permutations.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569758008","text":"def main():\n from sys import stdin\n n = int(stdin.readline())\n word = stdin.readline()\n prev = word[-2]\n history = set([word])\n for i in range(n-1):\n word = stdin.readline()\n if word[0] != prev or word in history:\n print(f'Player {i%2 or 2} lost')\n break\n prev = word[-2]\n history.add(word)\n else:\n print('Fair Game')\nmain()\n","sub_path":"shiritori/shiritori2.py","file_name":"shiritori2.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309442431","text":"# # # # # # # # # # # # # # # # #\n# @author Alexander Novikov\n#\n# Some basic geometry definitions for the state-of-the-art\n# custering algorithm\n#\n\nimport math\n\nclass point(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def distanceTo(self, pointOther):\n dx = (self.x - pointOther.x)\n dy = (self.y - pointOther.y)\n return math.sqrt(dx * dx + dy * dy)\n \n def __str__(self):\n return \"(%.2f,%.2f)\" % (self.x,self.y)\n\n\nclass cluster(object):\n def __init__(self):\n self.points = []\n self.sumFy = 0\n self.sumFx = 0\n self.sumxy = 0\n \n def centroid(self):\n #thisCentroid = point(float(self.sumxy)/self.sumFx, float(self.sumxy)/self.sumFy)\n #return thisCentroid\n thisCentroid = point(float(self.sumFy)/len(self.points),float(self.sumFx)/len(self.points))\n return thisCentroid\n \n def stddev_x(self):\n xbar = float(self.sumFy) / len(self.points)\n sigmaX = 0\n for point in self.points:\n sigmaX += (point.x - xbar)*(point.x - xbar)\n return sigmaX\n \n def stddev_y(self):\n ybar = float(self.sumFx) / len(self.points)\n sigmaY = 0\n for point in self.points:\n sigmaY += (point.y - ybar)*(point.y - ybar)\n return sigmaY\n \n def stddev_xy(self):\n return max(self.stddev_x(), self.stddev_y())\n \n def addPoint(self, pt):\n self.points.append(pt)\n self.sumFx += pt.y\n self.sumFy += pt.x\n self.sumxy += pt.y * pt.x\n \n def xcentroidDistanceTo(self, othr):\n thisCentroid = point(float(self.sumxy)/self.sumFx, 0)\n thatCentroid = point(float(othr.sumxy)/othr.sumFx, 0)\n return thisCentroid.distanceTo(thatCentroid)\n \n def centroidDistanceTo(self, othr):\n c_x = 0.0\n c_y = 0.0\n o_x = 0.0\n o_y = 0.0\n if (self.sumxy != 0 or self.sumFx != 0):\n c_x = float(self.sumxy)/self.sumFx\n if (self.sumxy != 0 or self.sumFy != 0):\n c_y = float(self.sumxy)/self.sumFy\n if (othr.sumxy != 0 or othr.sumFx != 0):\n o_x = float(othr.sumxy)/othr.sumFx\n if (othr.sumxy != 0 or othr.sumFy != 0):\n o_y = float(othr.sumxy)/othr.sumFy\n \n thisCentroid = point(c_x, c_y)\n thatCentroid = point(o_x, o_y)\n return thisCentroid.distanceTo(thatCentroid)\n \n def maxDistanceTo(self, clusterOther):\n best = 0\n #TODO: add better optimization here\n for point1 in self.points:\n for point2 in clusterOther.points:\n dist = point1.distanceTo(point2)\n if dist > best:\n best = dist\n return best\n\n def minDistanceTo(self, clusterOther):\n best = float(1<<31)\n #TODO: add better optimization here\n for point1 in self.points:\n for point2 in clusterOther.points:\n dist = point1.distanceTo(point2)\n if dist < best:\n best = dist\n return best\n \n def distanceTo(self, clusterOther):\n return self.centroidDistanceTo(clusterOther) \n \n def __str__(self):\n return \"[\" + \", \".join(map(lambda p: str(p), self.points)) + \"]\"\n\n\n","sub_path":"main/statistics/geom2d.py","file_name":"geom2d.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240484935","text":"#!/usr/bin/env python\n\"\"\"\nCreate three variables: ip_addr1, ip_addr2, ip_addr3 representing three IP addresses.\n\nprint these three variables to standard output using a single print statement.\n\nMake your print statement compatible with both Python2 and Python2 (i.e. you should be able\nto run this same program using either PY2 or PY3).\n\nIf you are using Linux or MacOS make your program executable by adding a shebang line and\nby changing the files permissions (i.e. chmod 755 )\n\"\"\"\nfrom __future__ import print_function\n\nip_addr1 = '192.168.16.1'\nip_addr2 = '10.10.1.1'\nip_addr3 = '172.16.31.17'\n\nprint(ip_addr1, ip_addr2, ip_addr3)\n","sub_path":"learning_python/lesson1/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424573395","text":"import scipy\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.ndimage import map_coordinates\nimport numpy as np\nfrom numpy.random import uniform\nimport torch\n\n\ndef augment_dataset(trainset_labeled, b=100, k=2):\n # Modifies trainset_labeled inline\n X = trainset_labeled.train_data\n Y = trainset_labeled.train_labels\n\n batches = [(X[i:i + b], Y[i:i + b]) for i in xrange(0, len(X), b)]\n\n augmented_data, augmented_labels = [], []\n for i in range(k - 1):\n for img_batch, labels in batches:\n augmented_data.extend(elastic_transform(img_batch, sigma=4, alpha=34))\n\n augmented_labels.extend(labels)\n\n augmented_data = np.array(augmented_data)\n augmented_labels = np.array(augmented_labels)\n\n data = np.concatenate((trainset_labeled.train_data.numpy(), augmented_data))\n labels = np.concatenate((trainset_labeled.train_labels.numpy(), augmented_labels))\n\n # augmented = trainset_labeled.copy()\n\n trainset_labeled.train_data = torch.from_numpy(data)\n trainset_labeled.train_labels = torch.from_numpy(labels)\n\n trainset_labeled.k = data.shape[0]\n\n\n\ndef elastic_transform(img_batch, sigma=5, alpha=34):\n img_batch = img_batch.numpy()\n x_dim = img_batch.shape[1]\n y_dim = img_batch.shape[2]\n pos = np.array([[i, j] for i in range(x_dim) for j in range(y_dim)])\n pos = pos.transpose(1, 0).reshape(2, x_dim, y_dim)\n uniform_random_x = uniform(-1, 1, size=img_batch.shape[1:])\n uniform_random_y = uniform(-1, 1, size=img_batch.shape[1:])\n\n elastic_x = gaussian_filter(alpha * uniform_random_x,\n sigma=sigma, mode='constant')\n elastic_y = gaussian_filter(alpha * uniform_random_y,\n sigma=sigma, mode='constant')\n elastic_distortion_x = pos[0] + elastic_x\n elastic_distortion_y = pos[1] + elastic_y\n elastic = np.array([elastic_distortion_x, elastic_distortion_y])\n\n transformed = []\n batch_size = img_batch.shape[0]\n\n for i in range(batch_size):\n transformed.append(map_coordinates(img_batch[i], elastic, order=0,\n prefilter=False, mode='reflect'))\n return transformed\n","sub_path":"script/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456113137","text":"from django.shortcuts import render, reverse, redirect\nfrom django.views import View\nfrom django.core.mail import send_mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom .models import Category\n\nclass CategorySubscriberView(View):\n model = Category\n template_name = 'category.html'\n context_object_name = 'categorysubscriber'\n\n def get_object (self, **kwargs):\n id = self.kwargs.get('pk')\n return Category.objects.get(pk=id)\n\n def get (self, request, *args, **kwargs):\n return render (request, \"category.html\", {})\n\n def post(self, request, *args, **kwargs):\n id = self.kwargs.get('pk')\n current_user = self.request.user\n current_category = self.request.category_name\n subscription = Category(\n subscribers = current_user.username, category_name = current_category\n )\n subscription.save() \n \nhtml_content = render_to_string(\n 'subscription_created.html',\n {\n 'subscription': subscription,\n }\n)\n\n\nmsg = EmailMultiAlternatives(\n subject = f'{subscription.subscriber.username}',\n body = subscription.category_name,\n from_email = '',\n to =[subscription.subscriber.email]\n)\nmsg.attach_alternative(html_content, \"text/html\")\n\nmsg.send()\n\nCategory.objects.get(pk=id).subscribers.add(current_user)\n\nreturn redirect('news:id')","sub_path":"project/newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403955978","text":"import regex\nfrom reactTypes import *\n\n\nREGEXP = {\n \"Constructor\": regex.compile(r'constructor\\s*(\\(.*\\))\\s*\\{\\s*super\\s*(.*)\\s*;*\\s*(this.state\\s*=\\s*(\\{([^}{]*|(?4))*\\}));*\\s*\\}', regex.MULTILINE),\n \"Variables\": regex.compile(r'(?\\S*)\\s*:\\s*(?(\\{(?:[^}{]*|(?3))*\\})|(?:.*))', regex.MULTILINE)\n}\n\ndef initVars(cComponent: ClassComponent, variables: list) -> list:\n constructor = regex.findall(REGEXP[\"Constructor\"], cComponent.content)\n if (len(constructor)):\n initializedVars = regex.findall(REGEXP[\"Variables\"], constructor[0][3])\n for var in initializedVars:\n variables.append(Variable(\"let\", var[0], var[1]))\n","sub_path":"src/parser/initVars.py","file_name":"initVars.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527020855","text":"import tkinter as tk\nimport cv2\nimport glob\nimport time\nfrom tkinter import filedialog\nfrom concurrent.futures import ThreadPoolExecutor\n\ndef filebrowser(ext = '', directory = ''):\n return [f for f in glob.glob(f\"{directory}**/*{ext}\", recursive=True)]\n\ndef serial_duplicate(image_dir, image_name):\n original = cv2.imread(image_name)\n start_time = time.time()\n global duplicates\n\n for image_ in image_dir:\n try:\n if image_name.replace('\\\\', '/') != image_.replace('\\\\', '/'):\n image_to_compare = cv2.imread(image_)\n if original.shape == image_to_compare.shape:\n \n difference = cv2.subtract(original, image_to_compare)\n b, g, r = cv2.split(difference)\n \n if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:\n print(f'Duplicates Found: {image_} is Duplicate of {image_name}')\n duplicates.append(image_)\n \n sift = cv2.xfeatures2d.SIFT_create()\n kp_1, desc_1 = sift.detectAndCompute(original, None)\n kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)\n \n index_params = dict(algorithm=0, trees=5)\n search_params = dict()\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n \n matches = flann.knnMatch(desc_1, desc_2, k=2)\n \n good_points = []\n for m, n in matches:\n if m.distance < 0.6*n.distance:\n good_points.append(m)\n \n # Define how similar they are\n number_keypoints = 0\n if len(kp_1) <= len(kp_2):\n number_keypoints = len(kp_1)\n else:\n number_keypoints = len(kp_2)\n except Exception as e:\n pass\n \n \n \n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\ndef parallel_duplicate(image_, image_name):\n original = cv2.imread(image_name) \n try:\n if image_name.replace('\\\\', '/') != image_.replace('\\\\', '/'):\n global duplicates\n image_to_compare = cv2.imread(image_)\n # print(image_ + ' image read successfully!')\n if original.shape == image_to_compare.shape:\n difference = cv2.subtract(original, image_to_compare)\n b, g, r = cv2.split(difference)\n \n if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:\n print(f'Duplicates Found: {image_name} is Duplicate of {image_}')\n duplicates.append(image_)\n \n sift = cv2.xfeatures2d.SIFT_create()\n kp_1, desc_1 = sift.detectAndCompute(original, None)\n kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)\n \n index_params = dict(algorithm=0, trees=5)\n search_params = dict()\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n \n matches = flann.knnMatch(desc_1, desc_2, k=2)\n \n good_points = []\n for m, n in matches:\n if m.distance < 0.6*n.distance:\n good_points.append(m)\n \n # Define how similar they are\n number_keypoints = 0\n if len(kp_1) <= len(kp_2):\n number_keypoints = len(kp_1)\n else:\n number_keypoints = len(kp_2)\n except Exception as e:\n pass\n\nwindow = tk.Tk()\n\nfolderSelected = tk.Label(master = window, text = \"Folder selected!\")\nfileSelected = tk.Label(master = window, text = 'Image selected!')\nswitch_variable = tk.StringVar(value = 'Serial')\n\ndef askDirectory():\n folderSelected.pack_forget()\n directoryName = tk.filedialog.askdirectory()\n global image_dir\n image_dir = filebrowser(ext = '.jpeg', directory = directoryName)\n image_dir += filebrowser(ext='.jpg', directory = directoryName)\n folderSelected.pack()\n\ndef openImage():\n fileSelected.pack_forget()\n file = tk.filedialog.askopenfilename()\n global image_name\n image_name = file\n \n fileSelected.pack()\n\ndef find_duplicates():\n global count, duplicates\n duplicates = []\n count = 0\n if switch_variable.get() == 'Serial':\n serial_duplicate(image_dir, image_name)\n else:\n start_time = time.time()\n with ThreadPoolExecutor(max_workers = 5) as executor:\n for image_ in image_dir:\n executor.submit(parallel_duplicate, image_, image_name)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n print('Program Executed Completely and ' + str(len(duplicates)) + ' duplicates found!')\n\nfolderSelectFrame = tk.Frame(master = window)\nfolderSelectFrame.pack()\nlabel = tk.Label(master = folderSelectFrame, text = \"Select folder to check in\",\n width = 50)\nlabel.pack(side = tk.LEFT)\nfolderSelectButton = tk.Button(master = folderSelectFrame, text = 'Open file explorer',\n command = askDirectory)\nfolderSelectButton.pack(side = tk.RIGHT)\n\nimageSelectFrame = tk.Frame(master = window)\nimageSelectFrame.pack()\nlabel = tk.Label(master = imageSelectFrame, text = \"Select image to check duplicates of\",\n width = 50)\nlabel.pack(side = tk.LEFT)\nimageSelectButton = tk.Button(master = imageSelectFrame, text = 'Open file explorer',\n command = openImage)\nimageSelectButton.pack(side = tk.RIGHT)\n\n\nserial = tk.Radiobutton(master = window, text = 'Serial',\n variable = switch_variable, indicatoron = False,\n value = 'Serial', width = 10)\nparallel = tk.Radiobutton(master = window, text = 'Parallel',\n variable = switch_variable, indicatoron = False,\n value = 'Parallel', width = 10)\nserial.pack()\nparallel.pack()\nfinal_button = tk.Button(master = window, text = 'Find duplicates',\n command = find_duplicates)\nfinal_button.pack()\n\nwindow.mainloop()","sub_path":"duplicate_image_detection.py","file_name":"duplicate_image_detection.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157728677","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n#=================================================================================\n# author: Chancerel Codjovi (aka codrelphi)\n# date: 2019-10-02\n# source: https://www.hackerrank.com/challenges/30-more-exceptions/problem\n#=================================================================================\n\nclass Calculator:\n def power(self, n, p):\n if n < 0 or p < 0:\n raise Exception('n and p should be non-negative')\n else:\n return n**p\n\nmyCalculator=Calculator()\nT=int(input())\nfor i in range(T):\n n,p = map(int, input().split())\n try:\n ans=myCalculator.power(n,p)\n print(ans)\n except Exception as e:\n print(e)\n","sub_path":"day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185789479","text":"import pytesseract\nfrom selenium import webdriver\nfrom PIL import Image\n\n\ndef get_capture():\n browser = webdriver.Chrome()\n url = 'https://www.baidu.com'\n # 打开指定网址\n browser.get(url)\n # 对网页进行截图\n browser.save_screenshot('./images/baidu.png')\n # 获取 百度logo 的标签\n logo = browser.find_element_by_xpath('//div[@id=\"lg\"]/img[1]')\n # 用 location 获取图片的位置\n # 左\n left = logo.location['x']\n # 右\n right = left + logo.size['width']\n # 上\n top = logo.location['y']\n # 下\n bottom = top + logo.size['height']\n # 也可以直接手动输入位置,但这种方式要求给出准确的坐标\n rangle = (806, 382, 913, 415)\n\n # 打开已经保存的网页截图\n sc = Image.open('./images/baidu.png')\n # 裁剪图片获取 logo,\n # 这里把图片的位置都 *1.25是因为win10默认缩放文本1.25倍,\n # 因此需要在这里乘回来,否则截图会出错\n logo = sc.crop((left * 1.25, top * 1.25, right * 1.25, bottom * 1.25))\n # logo = sc.crop(rangle)\n logo.save('./images/logo.png')\n browser.quit()\n\n\nget_capture()\n","sub_path":"9.验证码的识别/2.验证码图片获取.py","file_name":"2.验证码图片获取.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251819068","text":"import re;\nimport math;\nimport random;\nimport socket;\nimport json, urllib.request;\n\nimport time;\nimport datetime;\n\nfrom ircbot.util import *;\n\nclass EchobotParser:\n\t\n\tdef __init__(self, bot):\n\t\tself.bot = bot;\n\t\t\n\t\tself.VALID_PLAYER_CHAT = re.compile(r'\\[(\\d{1,3})\\]\\s*(\\S+)?:(.+)');\n\t\tself.VALID_PLAYER_JOIN = re.compile(r'\\[(\\d+)\\]\\s+\\*\\*\\*\\s+?(.+?) has joined the game.+ip:(.+?)\\).+id:(.+)\\)');\n\t\tself.VALID_PLAYER_QUIT = re.compile(r'\\[(\\d+)\\]\\s+\\*\\*\\*\\s+?(.+?)\\shas left the game.');\n\t\t\n\t\tself.owner = None;\n\t\tself.ownerid = -1;\n\t\t\n\t\tself.start_time = datetime.datetime.now();\n\t\t\n\tdef parse(self, author, channel, message):\n\t\t\n\t\tself.parse_player_message(channel, message);\n\t\tself.parse_player_join(channel, message);\n\t\tself.parse_player_quit(channel, message);\n\t\t\n\tdef parse_player_quit(self, channel, message):\n\t\tresult = re.search(self.VALID_PLAYER_QUIT, message);\n\t\t\n\t\tif result:\n\t\t\tid = result.groups()[0];\n\t\t\tname = result.groups()[1];\n\t\t\t\n\t\t\tprint(\"%s (ID: %s) left the game.\" % (name, id));\n\t\t\t\n\t\t\tif id == self.ownerid and self.owner != None:\n\t\t\t\tself.send_message(channel, \"*** Owner %s (id: %s) left.\" % (self.owner, self.ownerid));\n\t\t\t\t\n\t\t\t\tself.owner = None;\n\t\t\t\tself.ownerid = -1;\n\t\t\t\n\tdef parse_player_join(self, channel, message):\n\t\tresult = re.search(self.VALID_PLAYER_JOIN, message);\n\t\t\t\n\t\tif result:\n\t\t\tid = result.groups()[0];\n\t\t\tplayer = result.groups()[1];\n\t\t\tip = result.groups()[2];\n\t\t\tgcpi = result.groups()[3];\n\t\t\t\n\t\t\tprint(\"%s (ID: %s) joined (ip: %s, id: %s)\" % (player, id, ip, gcpi));\n\t\t\t\n\t\t\tif gcpi == self.bot.gcpi:\n\t\t\t\tself.owner = player;\n\t\t\t\tself.ownerid = id;\n\t\t\t\t\n\t\t\t\tself.send_message(channel, \"** Found owner %s (id: %s)\" % (self.owner, self.ownerid));\n\t\t\t\n\t\t\t\n\tdef parse_player_message(self, channel, message):\n\t\tresult = re.search(self.VALID_PLAYER_CHAT, message);\n\n\t\tif result:\n\t\t\tid = result.groups()[0];\n\t\t\tplayer = result.groups()[1];\n\t\t\ttext = result.groups()[2];\n\n\t\t\tprint(\"[%s] %s: %s\" % (id, player, text));\n\n\t\t\tif self.ownerid == -1:\n\t\t\t\treturn;\n\t\t\t\t\n\t\t\tif id == self.ownerid:\t\t\t\t\n\t\t\t\tsplitText = [x.rstrip() for x in text.split(' ')];\n\n\t\t\t\tif splitText[1] == '~say':\n\t\t\t\t\tself.send_message(channel, \"!say \" + \" \".join(splitText[2:]));\n\t\t\t\t\n\t\t\t\tif splitText[1] == '~ping':\n\t\t\t\t\tself.send_message(channel, \"!say pong\");\n\t\t\t\t\n\t\t\t\tif splitText[1] == '~exec':\n\t\t\t\t\ttry:\n\t\t\t\t\t\texec(\" \".join(splitText[2:]));\n\t\t\t\t\t\t\n\t\t\t\t\texcept BaseException as e:\n\t\t\t\t\t\tself.send_message(channel, \"!say failed: \" + str(e));\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\tif splitText[1] == '~uptime':\n\t\t\t\t\ttime_delta = datetime.datetime.now() - self.start_time;\n\t\t\t\t\t\n\t\t\t\t\tself.send_message(channel, \"!say Up for \" + pretty_time_delta(time_delta.seconds) + \".\");\n\t\t\t\t\t\n\t\t\t\tif splitText[1] == '~eval':\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.send_message(channel, \"!say \" + str(eval(\" \".join(splitText[2:]))));\n\t\t\t\t\t\t\n\t\t\t\t\texcept BaseException as e:\n\t\t\t\t\t\tself.send_message(channel, \"!say failed: \" + str(e));\n\t\t\t\t\t\t\n\t\t\t\tif splitText[1] == '~quit':\n\t\t\t\t\tself.send_message(channel, \"!say \\o\");\n\t\t\t\t\ttime.sleep(2);\n\t\t\t\t\tself.bot.close();\n\t\t\t\t\texit(1);\n\t\t\n\t\t\tsplitText = [x.rstrip() for x in text.split(' ')];\n\t\t\t\n\t\t\tif splitText[1] == '~player':\n\t\t\t\tname = \" \".join(splitText[2:]);\n\t\t\t\t\n\t\t\t\tif name == '':\n\t\t\t\t\tself.send_message(channel, \"!msgex FFFFFF USAGE: ~player \");\n\t\t\t\t\treturn;\n\t\t\t\t\t\n\t\t\t\turl = \"http://pass.littlewhiteys.co.uk/stats/stats.php?name=\";\n\n\t\t\t\tcontent = urllib.request.urlopen(url + name);\n\t\t\t\tjson_data = json.loads(content.read().decode());\n\n\t\t\t\tif \"error\" in json_data:\n\t\t\t\t\tself.send_message(channel, \"!msgex FF0000 ERROR: Player '%s' not found.\" % (name));\n\t\t\t\telse:\n\t\t\t\t\tkills = json_data['kills'];\n\t\t\t\t\tdeaths = json_data['deaths'];\n\t\t\t\t\tname = json_data['name'];\n\t\t\t\t\t\n\t\t\t\t\tif deaths == 0:\n\t\t\t\t\t\tdeaths = 1;\n\t\t\t\t\t\t\n\t\t\t\t\tratio = kills / deaths;\n\t\t\t\t\t\n\t\t\t\t\tself.send_message(channel, \"!msgex EE9911 * Player %s: Kills: %d, Deaths: %d, Ratio: %.2f\" % (name, kills, deaths, ratio));\n\t\t\t\t\n\t\t\t\n\tdef send_message(self, channel, message):\n\t\tself.bot.connection.send_channel(channel, message);\n","sub_path":"ircbot/echobot_parse.py","file_name":"echobot_parse.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103368909","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom funcions import*\nimport sys\n\ndef select():\n sel=0\n while sel != 7:\n\n menux()\n sel=input('Introdueix la opcció desitjada del menu: ')\n if sel==1:\n dades(d)\n \n\n elif sel==2:\n delete(d)\n \n elif sel==3:\n menuy()\n change(d)\n \n elif sel==4:\n save(arxiu, d)\n\n elif sel==5:\n arxiu= raw_input('Introdueix nom del fitxer del User.')\n d=obrir(arxiu)\n\n elif sel==6:\n validar(d)\n \n\n else:\n print\n print\n print\n \n\n\nd={}\nselect()\n\n\n \n","sub_path":"eol91info.py","file_name":"eol91info.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360578356","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 13 22:08:51 2019\n\n@author: masudulhasanmasudb\n\"\"\"\nimport threading\nimport time\nfrom subprocess import PIPE, Popen\nimport subprocess\nimport sys, traceback\nimport os.path\nfrom subprocess import check_output\n\n## HIGH I/0 = 1 i/o==6000\n## HIGH disk load = 2 disk load=32\n## HIGH disk load = 3 disk load=16\n## HIGH disk load = 4 disk load=8\n## HIGH disk load = 5 disk load=4\n## HIGH memory load = 6 \n## packet drop = 7 probability = .01\n##packet delay = 8 100ms 20ms\nlabel_value = 8\nshould_run = True\npid = 0\nclass myThread(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n \n def run(self):\n print(\"\\nStarting \" + self.name)\n run_command(self.name)\n print(\"\\nExiting \" + self.name)\n\n\ndef run_command(index):\n present_time = time.time()\n read_so_far = 0\n with open('temp_files/tmp_file_'+index+'.txt', 'rb') as f: \n while True: \n piece = f.read(1024) \n if not piece: \n break\n\nclass fileTransferThread(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n \n def run(self):\n print(\"\\nStarting \" + self.name)\n transfer_file(self.name)\n print(\"\\nExiting \" + self.name)\n\n \ndef transfer_file(i):\n global pid\n output_file = open(\"file_transfer_stat.txt\",\"a+\")\n comm_ss = ['globus-url-copy', '-vb','file:///fsx/files/large_file_'+str(i), 'ftp://172.31.21.192:50505/home/ubuntu/large_file']\n# comm_ss = ['globus-url-copy', '-vb','file:///fsx/files/test_file', 'ftp://172.31.21.192:50505/home/ubuntu/large_file']\n strings = \"\"\n proc = subprocess.Popen(comm_ss, stdout=subprocess.PIPE)\n pid = check_output(['pidof', '-s', 'globus-url-copy'])\n print(check_output(['pidof', '-s','globus-url-copy']))\n output_file.write(\"start time = \"+time.ctime() + \"\\n\")\n start_time = time.time()\n while(True):\n line = str(proc.stdout.readline()).replace(\"\\r\", \"\\n\")\n strings+= line\n if not line.decode(\"utf-8\"):\n break\n strings.replace(\"\\r\", \"\\n\")\n output_file.write(strings+\"\\n\\n\")\n output_file.write(\"end time = \"+time.ctime() + \"\\n\")\n output_file.write(\"total time = \" + str(int(time.time() - start_time))+\"\\n\\n\")\n output_file.flush()\n\ndef set_thread_value(y):\n global current_Thread_number\n current_Thread_number = y\n \n\nclass readbackgroundThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.lock = threading.Lock()\n\n def run(self):\n# global current_Thread_number = 1\n for y in range(1,33):\n jobs = []\n self.lock.acquire()\n try:\n print('Acquired lock')\n set_thread_value(y)\n finally:\n self.lock.release()\n \n for x in range(y):\n thread1 = myThread(str(x))\n thread1.start()\n jobs.append(thread1)\n for t in jobs:\n t.join() \n\ndef process_mds_rpc(mdt_path):\n proc = Popen(['cat', mdt_path+\"/import\"], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n for metric_line in res_parts:\n if \"avg_waittime:\" in metric_line:\n s_index = metric_line.find(\":\")\n e_index = metric_line.find(\"usec\")\n avg_waittime = float(metric_line[s_index+1:e_index].strip())\n# print(avg_waittime)\n \n if \"inflight:\" in metric_line:\n s_index = metric_line.find(\":\")\n inflight = float(metric_line[s_index+1:].strip())\n# print(inflight)\n \n if \"unregistering:\" in metric_line:\n s_index = metric_line.find(\":\")\n unregistering = float(metric_line[s_index+1:].strip())\n# print(unregistering)\n \n if \"timeouts:\" in metric_line:\n s_index = metric_line.find(\":\")\n timeouts = float(metric_line[s_index+1:].strip())\n# print(timeouts)\n return avg_waittime, inflight, unregistering, timeouts\n\ndef collect_system_metrics(pid):\n print(pid)\n proc = Popen(['cat', '/proc/'+str(pid).strip()+'/io'], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n value_list = []\n for line in res_parts:\n if len(line.strip())>0:\n# print(line)\n index= line.rfind(\":\")\n value = int(line[index+1:].strip())\n# print(value)\n value_list.append(value)\n \n proc = Popen(['cat', '/proc/'+str(pid).strip()+'/stat'], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\" \")\n for line in res_parts:\n if len(line.strip())>0:\n# print(line)\n try:\n value = int(line.strip())\n value_list.append(value)\n except:\n traceback.print_exc()\n# print(value_list)\n proc = Popen(['ps','-p', str(pid).strip(), '-o', '%cpu,%mem'], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n for line in res_parts:\n if len(line.strip())>0:\n if \"%CPU\" not in line:\n parts = line.split(\" \")\n# print(parts)\n for x in parts:\n if len(x.strip())>0:\n value_list.append(float(x))\n return value_list\n\n\ndef collect_stat():\n proc = Popen(['ls', '-l', '/proc/fs/lustre/osc'], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n parts = res.split(\"\\n\")\n for x in range(1, len(parts)):\n ost_name_parts = parts[x].split(\" \")\n if \"fsx-OST0009\" in ost_name_parts[len(ost_name_parts) - 1]:\n OST_name = ost_name_parts[len(ost_name_parts) - 1]\n break\n ost_path = '/proc/fs/lustre/osc/' + OST_name\n mdt_path = \"/proc/fs/lustre/mdc/fsx-MDT0000-mdc-ffff88037f720800\"\n \n dst_ip = \"172.31.21.192\"\n comm_ss = ['ss', '-t', '-i', 'state', 'ESTABLISHED', 'dst', dst_ip]\n is_controller_port = True\n \n total_string = \"\"\n start = time.time()\n initial_time = time.time()\n total_rtt_value = 0\n total_pacing_rate = 0\n is_first_time = True\n avg_wait_time = 0\n total_wait_time = 0\n total_cwnd_value = 0\n total_rto_value = 0\n byte_ack = 0\n byte_ack_so_far = 0\n segs_out = 0\n seg_out_so_far = 0\n segs_in = 0\n seg_in_so_far = 0\n retrans = 0\n total_ssthresh_value =0\n total_ost_read = 0\n send = 0\n unacked = 0\n rcv_space = 0\n \n prev_req_wait_time = -1\n prev_active_req = -1\n prev_red_bytes = -1\n prev_ost_read = -1\n prev_ost_connect = -1\n prev_ost_statfs = -1\n prev_ldlm_cancel = -1\n prev_obd_ping = -1\n \n avg_req_number = 0\n total_req_number = 0\n total_read_bytes = 0\n total_ost_read = 0\n total_ost_connect = 0\n total_ost_statfs = 0\n total_ldlm_cancel = 0\n total_obd_ping = 0\n \n total_pending_page = 0\n total_pending_rpc = 0\n total_mss_value = 0\n \n mds_prev_req_wait_time, mds_prev_active_req, prev_mds_getattr, prev_mds_close, prev_mds_readpage, prev_mds_connect,\\\n prev_mds_get_root, prev_mds_statfs, mds_prev_ldlm_cancel, mds_prev_obd_ping = (-1,)*10\n \n mds_total_req_wait_time, mds_total_active_req, total_mds_getattr, total_mds_close, total_mds_readpage, total_mds_connect,\\\n total_mds_get_root, total_mds_statfs, mds_total_ldlm_cancel, mds_total_obd_ping = (0,)*10\n \n prev_md_close, prev_md_getattr, prev_md_intent_lock,\\\n prev_md_read_page, prev_md_revalidate_lock = (-1,)*5\n \n total_md_getattr, total_md_close, total_md_readpage,\\\n total_md_intent_lock, total_md_revalidate_lock = (0,)*5\n \n time_diff = 0\n epoc_time = 0\n has_transfer_started = False\n \n sleep_time = .1\n \n epoc_count = 0\n main_output_string = \"\"\n\n while(1):\n ### NETWORK METRICS ###\n try:\n ss_proc = subprocess.Popen(comm_ss, stdout=subprocess.PIPE)\n line_in_ss = str(ss_proc.stdout.read())\n if line_in_ss.count(\"172.31.21.192\")==2:\n if (is_first_time):\n initial_time = time.time()\n is_first_time = False\n \n parts = line_in_ss.split(\"\\n\")\n \n time_diff+=1\n epoc_time+=1\n \n for x in range(len(parts)):\n if \"172.31.21.192\" in parts[x] and \"50505\" not in parts[x]:\n \n first_parts = parts[x].split(\" \")\n first_list = []\n for item in first_parts:\n if len(item.strip())>0:\n first_list.append(item)\n send_buffer_value = int(first_list[1].strip())\n \n if (is_first_time):\n initial_time = time.time()\n is_first_time = False\n \n metrics_line = parts[x+1].strip(\"\\\\t\").strip()\n metrics_parts = metrics_line.split(\" \")\n # print(metrics_parts)\n for y in range(len(metrics_parts)):\n \n if \"rto\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n total_rto_value+=value\n \n if \"rtt\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n e_index = metrics_parts[y].find(\"/\")\n value = float(metrics_parts[y][s_index+1:e_index])\n total_rtt_value+=value\n \n if \"mss\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n total_mss_value+=value\n \n if \"cwnd\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n total_cwnd_value+=value\n \n if \"ssthresh\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n total_ssthresh_value+=value\n \n if \"bytes_acked\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:]) \n byte_ack+=(value-byte_ack_so_far)\n byte_ack_so_far = value\n \n if \"segs_out\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n segs_out+=(value-seg_out_so_far)\n seg_out_so_far = value\n \n if \"segs_in\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n segs_in+=(value-seg_in_so_far)\n seg_in_so_far = value\n \n if \"send\" in metrics_parts[y]:\n# match = re.compile(\"[^\\W\\d]\").search(s)\n# index = metrics_parts[y+1].find(\"Mbps\")\n# value = float(metrics_parts[y+1][:index])\n value = metrics_parts[y+1].strip()\n send=value\n \n if \"pacing_rate\" in metrics_parts[y]:\n# index = metrics_parts[y+1].find(\"Mbps\")\n# p_rate = float(metrics_parts[y+1][:index])\n value = metrics_parts[y+1].strip()\n total_pacing_rate=value\n \n if \"unacked\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n unacked += value\n \n if \"retrans\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n e_index = metrics_parts[y].find(\"/\")\n value = float(metrics_parts[y][s_index+1:e_index])\n retrans += value\n \n if \"rcv_space\" in metrics_parts[y]:\n s_index = metrics_parts[y].find(\":\")\n value = float(metrics_parts[y][s_index+1:])\n rcv_space += value\n \n ### OST METRICS ###\n \n \n proc = Popen(['cat', ost_path+\"/stats\"], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n for metric_line in res_parts:\n if \"req_waittime\" in metric_line:\n tokens = str(metric_line).split(\" \")\n wait_time = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_req_wait_time == -1):\n prev_req_wait_time = wait_time\n elif(time_diff>1):\n diff = wait_time - prev_req_wait_time\n total_wait_time+=(diff/(1000000))\n prev_req_wait_time = wait_time\n \n if \"req_active\" in metric_line:\n tokens = str(metric_line).split(\" \")\n req_number = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_active_req== -1):\n prev_active_req = req_number\n elif(time_diff>1):\n diff = req_number - prev_active_req\n total_req_number+= diff\n prev_active_req = req_number\n \n if \"read_bytes\" in metric_line:\n tokens = str(metric_line).split(\" \")\n read_bytes = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_red_bytes == -1):\n prev_red_bytes = read_bytes\n elif(time_diff>1):\n diff = read_bytes - prev_red_bytes\n total_read_bytes+= (diff/(1024*1024))\n prev_red_bytes = read_bytes\n \n if \"ost_read\" in metric_line:\n tokens = str(metric_line).split(\" \")\n ost_read = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_ost_read == -1):\n prev_ost_read = ost_read\n elif(time_diff>1):\n diff = ost_read - prev_ost_read\n total_ost_read+= (diff/(1000000))\n prev_ost_read = ost_read\n \n if \"ost_connect\" in metric_line:\n tokens = str(metric_line).split(\" \")\n ost_connect = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_ost_read == -1):\n prev_ost_connect = ost_connect\n elif(time_diff>1):\n diff = ost_connect - prev_ost_connect\n total_ost_connect+= (diff/(1000000))\n prev_ost_connect = ost_connect\n \n if \"ost_statfs\" in metric_line:\n tokens = str(metric_line).split(\" \")\n ost_statfs = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_ost_statfs == -1):\n prev_ost_statfs = ost_statfs\n elif(time_diff>1):\n diff = ost_statfs - prev_ost_statfs\n total_ost_statfs+= (diff/(1000000))\n prev_ost_statfs = ost_statfs\n \n if \"ldlm_cancel\" in metric_line:\n tokens = str(metric_line).split(\" \")\n ldlm_cancel = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_ldlm_cancel == -1):\n prev_ldlm_cancel = ldlm_cancel\n elif(time_diff>1):\n diff = ldlm_cancel - prev_ldlm_cancel\n total_ldlm_cancel+= (diff/(1000000))\n prev_ldlm_cancel = ldlm_cancel\n \n if \"obd_ping\" in metric_line:\n tokens = str(metric_line).split(\" \")\n obd_ping = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_obd_ping == -1):\n prev_obd_ping = obd_ping\n elif(time_diff>1):\n diff = obd_ping - prev_obd_ping\n total_obd_ping+= (diff/(1000000))\n prev_obd_ping = obd_ping\n \n proc = Popen(['cat', ost_path+\"/rpc_stats\"], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n for metric_line in res_parts:\n if \"pending read pages\" in metric_line:\n index = metric_line.find(\":\")\n value = float(metric_line[index+1:])\n total_pending_page+=value\n \n if \"read RPCs in flight\" in metric_line:\n index = metric_line.find(\":\")\n value = float(metric_line[index+1:])\n total_pending_rpc+=value\n \n ### MDT METRICS ###\n \n \n proc = Popen(['cat', mdt_path+\"/stats\"], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n for metric_line in res_parts:\n if \"req_waittime\" in metric_line:\n tokens = str(metric_line).split(\" \")\n wait_time = float(tokens[len(tokens)-2])\n if (time_diff ==1 and mds_prev_req_wait_time == -1):\n mds_prev_req_wait_time = wait_time\n elif(time_diff>1):\n diff = wait_time - mds_prev_req_wait_time\n mds_total_req_wait_time+=(diff/(1000000))\n mds_prev_req_wait_time = wait_time\n \n if \"req_active\" in metric_line:\n tokens = str(metric_line).split(\" \")\n req_number = float(tokens[len(tokens)-2])\n if (time_diff ==1 and mds_prev_active_req== -1):\n mds_prev_active_req = req_number\n elif(time_diff>1):\n diff = req_number - mds_prev_active_req\n mds_total_active_req+= diff\n mds_prev_active_req = req_number\n \n if \"mds_getattr\" in metric_line:\n tokens = str(metric_line).split(\" \")\n mds_getattr = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_mds_getattr == -1):\n prev_mds_getattr = mds_getattr\n elif(time_diff>1):\n diff = mds_getattr - prev_mds_getattr\n total_mds_getattr+= (diff/(1000000))\n prev_mds_getattr = mds_getattr\n \n if \"mds_close\" in metric_line:\n tokens = str(metric_line).split(\" \")\n mds_close = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_mds_close == -1):\n prev_mds_close = mds_close\n elif(time_diff>1):\n diff = mds_close - prev_mds_close\n total_mds_close+= (diff/(1000000))\n prev_mds_close = mds_close\n \n \n if \"mds_readpage\" in metric_line:\n tokens = str(metric_line).split(\" \")\n mds_readpage = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_mds_readpage == -1):\n prev_mds_readpage = mds_readpage\n elif(time_diff>1):\n diff = mds_readpage - prev_mds_readpage\n total_mds_readpage+= (diff/(1000000))\n prev_mds_readpage = mds_readpage\n \n if \"mds_connect\" in metric_line:\n tokens = str(metric_line).split(\" \")\n mds_connect = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_mds_connect == -1):\n prev_mds_connect = mds_connect\n elif(time_diff>1):\n diff = mds_connect - prev_mds_connect\n total_mds_connect+= (diff/(1000000))\n prev_mds_connect = mds_connect\n \n if \"mds_get_root\" in metric_line:\n tokens = str(metric_line).split(\" \")\n mds_get_root = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_mds_get_root == -1):\n prev_mds_get_root = mds_get_root\n elif(time_diff>1):\n diff = mds_get_root - prev_mds_get_root\n total_mds_get_root+= (diff/(1000000))\n prev_mds_get_root = mds_get_root\n \n if \"mds_statfs\" in metric_line:\n tokens = str(metric_line).split(\" \")\n mds_statfs = float(tokens[len(tokens)-2])\n if (time_diff ==1 and prev_mds_statfs == -1):\n prev_mds_statfs = mds_statfs\n elif(time_diff>1):\n diff = mds_statfs - prev_mds_statfs\n total_mds_statfs+= (diff/(1000000))\n prev_mds_statfs = mds_statfs\n \n if \"ldlm_cancel\" in metric_line:\n tokens = str(metric_line).split(\" \")\n ldlm_cancel = float(tokens[len(tokens)-2])\n if (time_diff ==1 and mds_prev_ldlm_cancel == -1):\n mds_prev_ldlm_cancel = ldlm_cancel\n elif(time_diff>1):\n diff = ldlm_cancel - mds_prev_ldlm_cancel\n mds_total_ldlm_cancel+= (diff/(1000000))\n mds_prev_ldlm_cancel = ldlm_cancel\n \n if \"obd_ping\" in metric_line:\n tokens = str(metric_line).split(\" \")\n obd_ping = float(tokens[len(tokens)-2])\n if (time_diff ==1 and mds_prev_obd_ping == -1):\n mds_prev_obd_ping = obd_ping\n elif(time_diff>1):\n diff = obd_ping - mds_prev_obd_ping\n mds_total_obd_ping+= (diff/(1000000))\n mds_prev_obd_ping = obd_ping\n \n proc = Popen(['cat', mdt_path+\"/md_stats\"], universal_newlines=True, stdout=PIPE)\n res = proc.communicate()[0]\n res_parts = res.split(\"\\n\")\n for metric_line in res_parts:\n if \"close\" in metric_line:\n tokens = str(metric_line).split(\" \")\n close = float(tokens[len(tokens)-3])\n if (time_diff == 1 and prev_md_close == -1):\n prev_md_close = close\n elif(time_diff>1):\n diff = close - prev_md_close\n total_md_close +=diff\n prev_md_close = close\n \n if \"getattr\" in metric_line:\n tokens = str(metric_line).split(\" \")\n getattr_ = float(tokens[len(tokens)-3])\n if (time_diff == 1 and prev_md_getattr == -1):\n prev_md_getattr = getattr_\n elif(time_diff>1):\n diff = getattr_ - prev_md_getattr\n total_md_getattr +=diff\n prev_md_getattr = getattr_\n \n if \"intent_lock\" in metric_line:\n tokens = str(metric_line).split(\" \")\n intent_lock = float(tokens[len(tokens)-3])\n if (time_diff == 1 and prev_md_intent_lock == -1):\n prev_md_intent_lock = intent_lock\n elif(time_diff>1):\n diff = intent_lock - prev_md_intent_lock\n total_md_intent_lock +=diff\n prev_md_intent_lock = intent_lock\n \n if \"read_page\" in metric_line:\n tokens = str(metric_line).split(\" \")\n read_page = float(tokens[len(tokens)-3])\n if (time_diff == 1 and prev_md_read_page == -1):\n prev_md_read_page = read_page\n elif(time_diff>1):\n diff = read_page - prev_md_read_page\n total_md_readpage +=diff\n prev_md_read_page = read_page\n \n if \"revalidate_lock\" in metric_line:\n tokens = str(metric_line).split(\" \")\n revalidate_lock = float(tokens[len(tokens)-3])\n if (time_diff == 1 and prev_md_revalidate_lock == -1):\n prev_md_revalidate_lock = revalidate_lock\n elif(time_diff>1):\n diff = revalidate_lock - prev_md_revalidate_lock\n total_md_revalidate_lock +=diff\n prev_md_revalidate_lock = revalidate_lock\n \n \n \n if(time_diff>=(.1/sleep_time)):\n \n avg_rto_value = total_rto_value/time_diff\n avg_rtt_value = total_rtt_value/time_diff\n avg_mss_value = total_mss_value/time_diff\n avg_cwnd_value = total_cwnd_value/time_diff\n avg_ssthresh_value = total_ssthresh_value/time_diff\n avg_byte_ack = byte_ack/(1024*1024)\n avg_seg_out = segs_out\n avg_seg_in = segs_in\n# avg_send_value = send/time_diff\n avg_send_value = send\n# p_avg_value = total_pacing_rate/time_diff\n p_avg_value = total_pacing_rate\n avg_unacked_value = unacked\n avg_retrans = retrans/time_diff\n avg_rcv_space = rcv_space/time_diff\n avg_pending_page = total_pending_page/time_diff\n avg_rpc = total_pending_rpc/time_diff\n \n # print(\"rtt = \"+str(avg_rtt_value)+\" pacing_rate = \"+str(p_avg_value) + \" cwnd = \"+str(avg_cwnd_value)+\" rto = \"+str(avg_rto_value))\n # print(\"byte ack = \"+str(avg_byte_ack)+\" seg out = \"+str(avg_seg_out) + \" Retrans = \"+str(retrans))\n # print(\"mss = \"+str(avg_mss_value)+\" ssthresh = \"+str(avg_ssthresh_value) + \" seg in = \"+str(avg_seg_in))\n # print(\"send = \"+str(avg_send_value)+\" unacked = \"+str(avg_unacked_value) + \" rcv space = \"+str(avg_rcv_space))\n # \n # \n # print(\"wait time: \"+str(total_wait_time)+\" req number \"+str(total_req_number) + \" read bytes \"+str(total_read_bytes))\n # print(\"ost read: \"+str(total_ost_read)+\" ost_connect \"+str(total_ost_connect) + \" ost_statfs \"+str(total_ost_statfs))\n # print(\"ldlm_cancel: \"+str(total_ldlm_cancel)+\" obd_ping \"+str(total_obd_ping))\n # \n # print(\"mds wait time: \"+str(mds_total_req_wait_time)+\" mds req number \"+str(mds_total_active_req) + \" mds_getattr \"+str(total_mds_getattr))\n # print(\"mds close: \"+str(total_mds_close)+\" mds read page \"+str(total_mds_readpage) + \" mds connect \"+str(total_mds_connect))\n # print(\"mds get root: \"+str(total_mds_get_root)+\" mds statfs \"+str(total_mds_statfs)+\" mds ldlm cancel \"+str(mds_total_ldlm_cancel))\n # print(\"mds obd ping: \"+str(mds_total_obd_ping))\n # \n # \n # print(\"md close: \"+str(total_md_close)+\" md read page \"+str(total_md_readpage) + \" mds getattr \"+str(total_md_getattr))\n # print(\"md intent_lock: \"+str(total_md_intent_lock)+\" md revalidate_lock \"+str(total_md_revalidate_lock))\n # \n # print(\"pending page \"+str(avg_pending_page)+\" RPC \"+str(avg_rpc)+\"\\n\\n\")\n # \n mdt_rpc_avg_waittime, rpc_inflight, rpc_unregistering, rpc_timeouts = process_mds_rpc(mdt_path)\n # print(\"rpc avg waitime: \"+str(mdt_rpc_avg_waittime)+\" rpc_inflight \"+str(rpc_inflight) + \" rpc_unregistering \"+str(rpc_unregistering))\n # print(\"rpc_timeouts: \"+str(rpc_timeouts))\n \n system_value_list = collect_system_metrics(pid)\n # print(system_value_list)\n \n \n \n \n \n output_string = str(avg_rtt_value)+\",\"+str(p_avg_value) + \",\"+str(avg_cwnd_value)+\",\"+str(avg_rto_value)+\",\"+\\\n str(avg_byte_ack)+\",\"+str(avg_seg_out) + \",\"+str(retrans)+\",\"+\\\n str(avg_mss_value)+\",\"+str(avg_ssthresh_value) + \",\"+str(avg_seg_in)+\",\"+\\\n str(avg_send_value)+\",\"+str(avg_unacked_value) + \",\"+str(avg_rcv_space)+\",\"+\\\n str(total_wait_time)+\",\"+str(total_req_number) + \",\"+str(total_read_bytes)+\",\"+\\\n str(total_ost_read)+\",\"+str(total_ost_connect) + \",\"+str(total_ost_statfs)+\",\"+\\\n str(total_ldlm_cancel)+\",\"+str(total_obd_ping) +\",\"+\\\n str(mds_total_req_wait_time)+\",\"+str(mds_total_active_req) + \",\"+str(total_mds_getattr)+\",\"+\\\n str(total_mds_close)+\",\"+str(total_mds_readpage) + \",\"+str(total_mds_connect)+\",\"+\\\n str(total_mds_get_root)+\",\"+str(total_mds_statfs) + \",\"+str(mds_total_ldlm_cancel)+\",\"+\\\n str(mds_total_obd_ping)+\",\"+\\\n str(total_md_close)+\",\"+str(total_md_readpage) + \",\"+str(total_md_getattr)+\",\"+\\\n str(total_md_intent_lock)+\",\"+str(total_md_revalidate_lock)+\",\"+\\\n str(avg_pending_page)+\",\"+str(avg_rpc)+\",\"+\\\n str(mdt_rpc_avg_waittime)+\",\"+str(rpc_inflight) + \",\"+str(rpc_unregistering)+\",\"+\\\n str(rpc_timeouts)+\",\"+str(send_buffer_value)\n \n global label_value\n for item in system_value_list:\n output_string+=\",\"+str(item)\n output_string+=\",\"+str(label_value)+\"\\n\"\n main_output_string+=output_string\n \n epoc_count+=1\n if(epoc_count==100):\n print(\"tarnsfering file....\")\n epoc_count = 0\n write_thread =fileWriteThread(main_output_string, label_value)\n write_thread.start()\n main_output_string=\"\"\n \n \n time_diff = 0\n \n total_rto_value, total_rtt_value, total_mss_value, total_cwnd_value, total_ssthresh_value, byte_ack, segs_out, segs_in,\\\n send, total_pacing_rate, unacked, retrans, rcv_space = (0,)*13\n \n total_wait_time, total_ost_read, total_req_number, total_read_bytes, total_ost_connect, total_ost_statfs, total_ldlm_cancel,\\\n total_obd_ping = (0,)*8\n \n mds_total_req_wait_time, mds_total_active_req, total_mds_getattr, total_mds_close, total_mds_readpage, total_mds_connect,\\\n total_mds_get_root, total_mds_statfs, mds_total_ldlm_cancel, mds_total_obd_ping = (0,)*10\n \n total_pending_rpc = 0\n total_pending_page = 0\n except:\n traceback.print_exc()\n time.sleep(sleep_time)\n\nclass fileWriteThread(threading.Thread):\n def __init__(self, metric_string, label_value):\n threading.Thread.__init__(self)\n self.metric_string = metric_string\n self.label_value = label_value\n\n def run(self):\n \n output_string = \"avg_rtt_value, p_avg_value ,avg_cwnd_value,avg_rto_value,\"+\\\n \"avg_byte_ack,avg_seg_out ,retrans,\"+\\\n \"avg_mss_value,avg_ssthresh_value ,avg_seg_in,\"+\\\n \"avg_send_value,avg_unacked_value ,avg_rcv_space,\"+\\\n \"total_wait_time,total_req_number ,total_read_bytes,\"+\\\n \"total_ost_read,total_ost_connect ,total_ost_statfs,\"+\\\n \"total_ldlm_cancel,total_obd_ping,\"+\\\n \"mds_total_req_wait_time,mds_total_active_req ,total_mds_getattr,\"+\\\n \"total_mds_close,total_mds_readpage ,total_mds_connect,\"+\\\n \"total_mds_get_root,total_mds_statfs ,mds_total_ldlm_cancel,\"+\\\n \"mds_total_obd_ping,\"+\\\n \"total_md_close,total_md_readpage ,total_md_getattr,\"+\\\n \"total_md_intent_lock,total_md_revalidate_lock,\"+\\\n \"avg_pending_page,avg_rpc,\"+\\\n \"mdt_rpc_avg_waittime,rpc_inflight ,rpc_unregistering,\"+\\\n \"rpc_timeouts,send_buffer_value,label_value\\n\"\n \n output_file = open(\"dataset_\"+str(self.label_value)+\".csv\",\"a+\")\n output_file.write(str(self.metric_string))\n output_file.flush()\n output_file.close()\n\n# if os.path.isfile(\"dataset.csv\"):\n# output_file = open(\"dataset.csv\",\"a+\")\n# output_file.write(str(self.metric_string))\n# output_file.flush()\n# output_file.close()\n# else:\n# output_file = open(\"dataset.csv\",\"a+\")\n# output_file.write(str(output_string))\n# output_file.write(str(self.metric_string))\n# output_file.flush()\n# output_file.close()\n \n\n\nclass statThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n collect_stat()\n \n\nclass wholeStatThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n comm_ss = ['python','manager_program_for_file_properties.py']\n proc = subprocess.Popen(comm_ss, stdout=subprocess.PIPE)\n proc.communicate()\n\n\nclass AnomalyThread(threading.Thread):\n def __init__(self, anomaly_type):\n threading.Thread.__init__(self)\n self.type = anomaly_type\n\n def run(self):\n if(int(self.type)==1):\n# stress --cpu 8 -i 1000 -d 1000 -m 15 --timeout 200s\n comm_ss = ['stress' '-i' '5000']\n proc = subprocess.Popen(comm_ss, stdout=subprocess.PIPE)\n proc.communicate()\n\n#test_thread =testThread()\n#test_thread.start()\n\n\n\nstat_thread = statThread()\nstat_thread.start()\n\n#whole_stat_thread = wholeStatThread()\n#whole_stat_thread.start()\n\n#anomaly_thread = AnomalyThread(1)\n#anomaly_thread.start()\n\nfor x in range(50):\n file_transfer_thread = fileTransferThread(str(x%10))\n file_transfer_thread.start()\n file_transfer_thread.join()\n\n#comm_ss = ['killall' 'stress']\n#proc = subprocess.Popen(comm_ss, stdout=subprocess.PIPE)\n#proc.communicate()\n\n#\n#while(1):\n# background_thread = readbackgroundThread()\n# background_thread.start()\n# background_thread.join()\n#print(\"background thread finished!\")\n#should_run = False\n#sys.exit(0)","sub_path":"create_dataset_final.py","file_name":"create_dataset_final.py","file_ext":"py","file_size_in_byte":37419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652127859","text":"#!/usr/bin/python3\r\n\r\nimport requests as req\r\nfrom bs4 import BeautifulSoup as bs\r\nimport os,os.path\r\nimport re\r\n\r\n\r\ndef format_output():\r\n print ('\\n'*2)\r\n print ('%' * 100)\r\n\r\n\r\nlink='http://www.pythonforbeginners.com'\r\n\r\n# REQUESTS ALONG WITH TEXT WILL BE USED TO DOWNLOAD THE CONTENTS OF THE WEBSITE\r\n# MAKE SURE YOU USE TEXT METHOD AS YOU NEED THE WHOLE HMTL CONTENTS OF THE FILE\r\nhtml_data=req.get(url=link,stream=True).text\r\n\r\nf=open('html_data','w')\r\nf.write(html_data)\r\n\r\n\r\nprint ('METHOD #1 TO READ THE WEBSITE:=',html_data)\r\nformat_output()\r\n\r\n# TO MAKE THE HTML DATA MORE READABLE...\r\nsoup=bs(html_data,\"lxml\")\r\n\r\nformat_output()\r\nprint ('METHOD #2 TO READ THE WEBSITE:=',soup.prettify())\r\n\r\n\r\nformat_output()\r\n# PRINTING OUT ALL THE HEADLINES OF THE URL\r\nprint (soup.head.title.text)\r\n\r\nformat_output()\r\n# PRINTING OUT ALL THE HEADLINES OF THE URL\r\nhentry =soup.find('li',class_='hentry')\r\n\r\nprint (hentry.prettify())\r\n\r\nformat_output()\r\nheadline1=hentry.h2.a.get(\"title\")\r\nheadline2=hentry.h2.a.text\r\nif headline1==headline2:\r\n print ('PASS:-',headline1,':::',headline2)\r\n\r\nformat_output()\r\ncontents=hentry.find('div',class_='post-bodycopy cf')\r\nprint (contents.text)\r\n\r\n\r\nformat_output()\r\nulcontents=soup.find('ul',class_='nav nav-list')\r\nprint (ulcontents.prettify())\r\n\r\n\r\nformat_output()\r\n# THIS WILL PRINT ALL THE HEADLINES AND ITS SUMMARY\r\nfor hentry in ulcontents.find_all('li',class_='hentry'):\r\n headline=hentry.h2.a.text\r\n print ('HEADLINE:-',headline)\r\n for divdata in hentry.find_all('div', class_='post-bodycopy cf'):\r\n summary=divdata.text\r\n print ('SUMMARY:-',summary)\r\n print ('\\n' *2)\r\n \r\n\r\nformat_output()\r\n# THIS WILL PRINT ALL THE META FROM THE HTML_PAGE\r\nfor i in soup.find_all('meta'):\r\n print(i)\r\n\r\n\r\nformat_output()\r\n# CONTENTS PRINTS CHILDREN OF THE PARENT TAG. 1ST IDENTIFY THE PARENT TAG\r\n# CONTENTS IS A LIST. LOOP OVER THE LIST TO PRINT THE CHILDREN OF A PARENT TAG\r\nprint ('TYPE of soup.body.ul.contents IS LIST:-',len(soup.body.ul.contents))\r\nfor i in range(0,len(soup.body.ul.contents)):\r\n print (soup.body.ul.contents[i])\r\n\r\n\r\nformat_output()\r\nfor child in soup.body.ul.children:\r\n print ('child:-',child)\r\n \r\nformat_output()\r\nfor child in soup.body.ul.descendants:\r\n print (child)\r\n\r\n\r\nformat_output()\r\n# THIS WILL PRINT ALL THE TEXTS FROM THE HTML PAGE\r\n# THIS WILL INCLUDE THE CONTENTS OF JAVASCRIPT CODE\r\nprint (soup.get_text())\r\n\r\n# THE BELOW CODE IS SAME AS soup.get_text()\r\nformat_output()\r\nfor i in soup.stripped_strings:\r\n print (repr(i))\r\n\r\n\r\n# THIS WILL PRINT ALL THE TEXTS FROM THE HTML PAGE\r\n# THIS WILL INCLUDE THE CONTENTS OF JAVASCRIPT CODE\r\nformat_output()\r\nfor m in soup.find_all('meta'):\r\n print (m,':::',m.attrs)\r\n\r\n\r\n","sub_path":"webscraping/alternate_bs4_sample_script.py","file_name":"alternate_bs4_sample_script.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124066968","text":"\"\"\"This module contains simple helper functions \"\"\"\nfrom __future__ import print_function\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport os\nimport h5py\n\ndef tensor2im(input_image, label=None, imtype=np.float32):\n \"\"\"Converts a Tensor array into a numpy array.\n \n Parameters:\n input_image (tensor) -- the input tensor array.\n imtype (type) -- the desired type of the converted numpy array\n \"\"\"\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor):\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # Convert it into a numpy array\n del image_tensor\n \n if label in ['fake_A', 'real_A', 'rec_A']:\n image_numpy = destack(image_numpy)\n if image_numpy.shape[0] == 1: # grayscale to RGB\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n elif image_numpy.shape[0] == 3: # a rgb image\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling\n elif image_numpy.shape[0] == 31: # maybe something else, for example a hyperspectral image\n image_numpy = denormalize(image_numpy)\n image_numpy = np.transpose(image_numpy, (1, 2, 0))\n \n else:\n image_numpy = input_image\n return image_numpy.astype(imtype)\n\n\ndef diagnose_network(net, name='network'):\n \"\"\"Calculate and print the mean of average absolute(gradients)\n\n Parameters:\n net (torch network) -- Torch network\n name (str) -- the name of the network\n \"\"\"\n mean = 0.0\n count = 0\n for param in net.parameters():\n if param.grad is not None:\n mean += torch.mean(torch.abs(param.grad.data))\n count += 1\n if count > 0:\n mean = mean / count\n print(name)\n print(mean)\n\n\ndef save_image(image_numpy, image_path, aspect_ratio=1.0):\n \"\"\"Save a numpy image to the disk\n\n Parameters:\n image_numpy (numpy array) -- input numpy array\n image_path (str) -- the path of the image\n \"\"\"\n\n image_pil = Image.fromarray(image_numpy)\n h, w, _ = image_numpy.shape\n\n if aspect_ratio > 1.0:\n image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)\n if aspect_ratio < 1.0:\n image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)\n image_pil.save(image_path)\n\n\ndef print_numpy(x, val=True, shp=False):\n \"\"\"Print the mean, min, max, median, std, and size of a numpy array\n\n Parameters:\n val (bool) -- if print the values of the numpy array\n shp (bool) -- if print the shape of the numpy array\n \"\"\"\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n\n\ndef mkdirs(paths):\n \"\"\"create empty directories if they don't exist\n\n Parameters:\n paths (str list) -- a list of directory paths\n \"\"\"\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)\n\n\ndef mkdir(path):\n \"\"\"create a single empty directory if it didn't exist\n\n Parameters:\n path (str) -- a single directory path\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef normalize(data, max_=4096, denormalize=False):\n \"\"\"\n Using the ICVL BGU dataset, the max and min values were computed. \n Normalizing to 0-1\n \"\"\"\n HSI_MAX = max_\n HSI_MIN = 0\n\n NEW_MAX = 1\n NEW_MIN = -1\n \n scaled = (data - HSI_MIN) * (NEW_MAX - NEW_MIN)/(HSI_MAX - HSI_MIN) + NEW_MIN\n return scaled.astype(np.float32)\n\ndef denormalize(data, max_=4096):\n \"\"\"\n Using the ICVL BGU dataset, the max and min values were computed. \n Normalizing to 0-1\n \"\"\"\n HSI_MAX = max_\n HSI_MIN = 0\n\n NEW_MAX = 1\n NEW_MIN = -1\n scaled = (data - NEW_MIN) * (HSI_MAX - HSI_MIN)/(NEW_MAX - NEW_MIN) + HSI_MIN \n return scaled.astype(np.float32)\n\ndef destack(data):\n img = denormalize(data, max_=1)\n #print(np.shape(img))\n _R = np.mean(img[:11], axis=0)\n _G = np.mean(img[11:21], axis=0)\n _B = np.mean(img[21:], axis=0)\n \n hsi_img = np.array((_R, _G, _B))\n #print(np.shape(hsi_img))\n return hsi_img\n\ndef calc_mean_std(feat, eps=1e-5):\n return np.mean(feat), np.std(feat)\n\n\ndef adaptive_instance_normalization(content, style, channels=31):\n normalized_feat = []\n for i in range(channels):\n style_mean, style_std = calc_mean_std(style[i])\n content_mean, content_std = calc_mean_std(content[i])\n feat = (content[i] - content_mean) / content_std\n normalized_feat.append(feat * style_std + style_mean)\n normalized_feat = np.array(normalized_feat)\n #print(np.shape(normalized_feat))\n return normalized_feat","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"386160861","text":"'''\r\n@author: Parth Ahir\r\n'''\r\n\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom time import sleep\r\nimport datetime\r\nimport pandas as pd\r\n\r\n\r\ndriver = None\r\nLink = \"https://web.whatsapp.com/\"\r\nwait = None\r\n\r\ndef whatsapp_login():\r\n global wait, driver, Link\r\n chrome_options = Options()\r\n chrome_options.add_argument('--user-data-dir=./User_Data')\r\n driver = webdriver.Chrome(options=chrome_options)\r\n wait = WebDriverWait(driver, 2)\r\n print(\"SCAN YOUR QR CODE FOR WHATSAPP WEB IF DISPLAYED\")\r\n driver.get(Link)\r\n driver.maximize_window()\r\n print(\"QR CODE SCANNED\")\r\n \r\ndef send_message(name,msg):\r\n user_group_xpath = '//span[@title = \"{}\"]'.format(name)\r\n flag = True\r\n sleep(15)\r\n while(flag):\r\n try:\r\n user = wait.until(EC.presence_of_element_located((By.XPATH, user_group_xpath)))\r\n user.click()\r\n flag = False\r\n except Exception:\r\n driver.execute_script(\"document.getElementById('pane-side').scrollBy({top: window.innerHeight, behavior: 'smooth'})\")\r\n flag = True\r\n\r\n msg_box = driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\r\n msg_box.send_keys(msg)\r\n driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[3]/button').click()\r\n print(\"Message send successfully to {}.\".format(name))\r\n\r\nif __name__ == \"__main__\":\r\n \r\n today = datetime.datetime.now().strftime(\"%x\")\r\n file = pd.read_csv(r\"E:\\Parth\\eclipse-workspace\\Birthday Wisher\\birthday_list.csv\")\r\n # Let us login and Scan\r\n print(\"Now, Web Page Open\")\r\n whatsapp_login()\r\n for i, j in file.iterrows(): \r\n if(j[1] == today):\r\n name = j[0]\r\n msg = \"Happy Birthday {}!!!\".format(j[0])\r\n \r\n send_message(name,msg)\r\n\r\n sleep(10)\r\n driver.close()\r\n driver.quit()","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526262712","text":"import numpy as np\nfrom bc import *\n\ndef advect(q,v,dx,dt):\n flux = np.zeros_like(v)\n ipos = np.where(v>=0.)[0]\n ineg = np.where(v<0.)[0]\n flux[ipos] = q[ipos]*v[ipos]\n flux[ineg] = q[ineg+1]*v[ineg]\n qnew = q.copy()\n qnew[1:-1] -= dt * ( flux[1:] - flux[:-1] ) / dx\n return qnew\n\ndef hydro_iso_classic_one_timestep(q,cs,dx,dt):\n rho = q[0,:]\n u = q[1,:]/rho\n uint = 0.5 * ( u[1:] + u[:-1] )\n q[0,:] = advect(q[0,:],uint,dx,dt)\n q[1,:] = advect(q[1,:],uint,dx,dt)\n p = q[0,:] * cs**2\n q[1,1:-1] += - dt * ( p[2:] - p[:-2] ) / ( 2*dx )\n return q\n","sub_path":"Exercises/Sheet10/codes_hydro/hydro_iso_classic.py","file_name":"hydro_iso_classic.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105259977","text":"#!/usr/bin/python3\n\"\"\"9-states.py\"\"\"\n\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\nfrom models.amenity import Amenity\nfrom models.place import Place\n\n\ndef main():\n \"\"\"run web app\"\"\"\n app = Flask(__name__)\n app.url_map.strict_slashes = False\n\n @app.teardown_appcontext\n def teardown_db(error):\n storage.close()\n\n @app.route('/hbnb')\n def state_list():\n states = storage.all(State)\n amenities = storage.all(Amenity)\n places = storage.all(Place)\n return render_template('100-hbnb.html',\n states=states,\n amenities=amenities,\n places=places)\n\n app.run(host='0.0.0.0', port='5000')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"web_flask/100-hbnb.py","file_name":"100-hbnb.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196307640","text":"import argparse\nfrom time import sleep as sl\nimport pandas as pd\n\ndef parseargs():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # parser.add_argument(\"-d\", \"--database_name\", required=True,\n # help=\"sql database to search (eg. vcseventh)\")\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parseargs()\n\n STs = pd.read_excel('/Users/liamcheneyy/Desktop/2020-12-08-species_selecting_heriCC_Levels/STs_within_ODC_1014.xlsx', sheet_name='Relational_information')\n\n frequencies = pd.read_csv('/Users/liamcheneyy/Desktop/2020-12-08-species_selecting_heriCC_Levels/STs_within_ODC.txt', sep='\\t', header=0)\n\n\n #create dict of ODC cluster and STs within that ODC cluster\n ODC_ST_dict = {}\n for index,row in STs.iterrows():\n\n ODC_clus = row[1]\n gene_st = row[2]\n if ODC_clus not in ODC_ST_dict.keys():\n ODC_ST_dict[ODC_clus] = [gene_st]\n else:\n ODC_ST_dict[ODC_clus].append(gene_st)\n\n #create dict of size for each 7 gene ST\n ST_size_dict = {}\n for index, row in frequencies.iterrows():\n ST = row[1]\n size = row[2]\n ST_size_dict[ST] = size\n\n ##outputs frequencies\n for key, value in ODC_ST_dict.items():\n out_ls = []\n single_count = 0\n for i in value:\n i = (str(i))\n size = ST_size_dict[i]\n\n if size == 1:\n single_count += 1\n else:\n out_ls.append(size)\n\n out_ls = sorted(out_ls, reverse=True)\n if single_count > 0:\n out_ls.append(single_count)\n\n #filter out small ODC levels\n if sum(out_ls) > 50:\n print(key, *out_ls, sep='\\t')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Staph/2021-01-11-get_ST_frequencies.py","file_name":"2021-01-11-get_ST_frequencies.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515967427","text":"import gym\nimport numpy as np\nfrom scipy.misc import imresize, toimage\n\nclass GymEnvironment():\n\n # For use with Open AI Gym Environment\n def __init__(self, env_id, screen_width, screen_height):\n self.env_id = env_id\n self.gym = gym.make(env_id)\n self.obs = None\n self.terminal = None\n\n self.screen_width = screen_width\n self.screen_height = screen_height\n\n # Define actions for games (gym-0.9.4 and ALE 0.5.1)\n if env_id == \"Pong-v0\":\n self.action_space = [1, 2, 3] # [NONE, UP, DOWN]\n elif env_id == \"Breakout-v0\":\n self.action_space = [1, 2, 3] # [FIRE, RIGHT, LEFT]\n else:\n self.action_space = [i for i in range(0, self.gym.action_space.n)] # 9 discrete actions are available\n\n def numActions(self):\n assert isinstance(self.gym.action_space, gym.spaces.Discrete)\n return len(self.action_space)\n\n def restart(self):\n self.obs = self.gym.reset()\n self.terminal = False\n\n def act(self, action):\n self.obs, reward, self.terminal, _ = self.gym.step(self.action_space[action])\n return reward, self.terminal\n\n def isTerminal(self):\n assert self.terminal is not None\n return self.terminal\n\n def render(self):\n self.gym.render()\n\n def getScreen(self):\n assert self.obs is not None\n\n new_bg_color = 0\n black_white = False\n\n if self.env_id == \"MsPacman-v0\":\n height_range = (0, 172)\n bg = (210, 164, 74) # character\n\n elif self.env_id == \"Pong-v0\":\n height_range = (35, 193)\n bg = (144, 72, 17) # bg\n black_white = True\n\n elif self.env_id == \"Breakout-v0\":\n height_range = (20, 198)\n bg = (142, 142, 142) # walls\n\n elif self.env_id == \"SpaceInvaders-v0\":\n height_range = (20, 198)\n bg = (50, 132, 50) # character\n new_bg_color = 255 # turn to white\n\n else:\n height_range = (0, 210)\n bg = (0, 0, 0)\n\n return pipeline(self.obs, (self.screen_height, self.screen_width),\n height_range, bg, new_bg_color, black_white)\n\n\ndef pipeline(image, new_HW, height_range, bg, new_bg_color=0, black_white=False):\n \"\"\"Returns a preprocessed image\n\n (1) Crop image (top and bottom)\n (2) Remove background & grayscale\n (3) Reszie to smaller image\n\n Args:\n image (3-D array): (H, W, C)\n new_HW (tuple): New image size (height, width)\n height_range (tuple): Height range (H_begin, H_end) else cropped\n bg (tuple): Background RGB Color (R, G, B)\n\n Returns:\n image (3-D array): (H, W, 1)\n \"\"\"\n #toimage(image).show()\n image = crop_image(image, height_range)\n #toimage(image).show()\n image = resize_image(image, new_HW)\n #toimage(image).show()\n image = kill_background_grayscale(image, bg, new_bg_color, black_white)\n #toimage(image).show()\n image = np.expand_dims(image, axis=2)\n return image\n\ndef crop_image(image, height_range):\n \"\"\"Crops top and bottom\n\n Args:\n image (3-D array): Numpy image (H, W, C)\n height_range (tuple): Height range between (min_height, max_height)\n will be kept\n\n Returns:\n image (3-D array): Numpy image (max_H - min_H, W, C)\n \"\"\"\n h_beg, h_end = height_range\n return image[h_beg:h_end, ...]\n\ndef resize_image(image, new_HW):\n \"\"\"Returns a resized image\n\n Args:\n image (3-D array): Numpy array (H, W, C)\n new_HW (tuple): Target size (height, width)\n\n Returns:\n image (3-D array): Resized image (height, width, C)\n \"\"\"\n return imresize(image, new_HW, interp=\"nearest\")\n\ndef kill_background_grayscale(image, bg, new_bg_color=0, black_white=False):\n \"\"\"Make the background 0\n\n Args:\n image (3-D array): Numpy array (H, W, C)\n bg (tuple): RGB code of background (R, G, B)\n\n Returns:\n image (2-D array): Binarized image of shape (H, W)\n The background is new_color\n \"\"\"\n H, W, _ = image.shape\n\n R = image[..., 0]\n G = image[..., 1]\n B = image[..., 2]\n cond = (R == bg[0]) & (G == bg[1]) & (B == bg[2])\n\n if black_white:\n image = np.zeros((H, W))\n image[~cond] = 1\n else:\n image = image.mean(axis=2)\n image[cond] = new_bg_color\n image = (image - 128) / 128 # normalize from -1. to 1.\n\n return image\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541138146","text":"import sublime\nimport sublime_plugin\nimport subprocess\nimport re\nfrom shutil import which\n\niscc = ''\nexists = False\npopup_enabled = True\npopup_style = ''\n\n\nclass InnoSetup():\n err_lines = {}\n\n\ndef refresh_popup_style(bg_color, text_color):\n global popup_style\n popup_style = (\n ''\n )\n\n\ndef load_settings():\n def get_iscc():\n global iscc, exists\n iscc = settings.get('iscc')\n if which(iscc) is None:\n exists = False\n sublime.error_message('ISCC not found!')\n else:\n exists = True\n def get_popup_enabled():\n global popup_enabled\n popup_enabled = settings.get('popup_enabled')\n def get_popup_style():\n popup_text = settings.get('popup_foreground')\n popup_bg = settings.get('popup_background')\n refresh_popup_style(popup_bg, popup_text)\n settings = sublime.load_settings('Inno Setup.sublime-settings')\n get_iscc()\n get_popup_enabled()\n get_popup_style()\n settings.add_on_change('iscc', get_iscc)\n settings.add_on_change('popup_enabled', get_popup_enabled)\n settings.add_on_change('popup_foreground', get_popup_style)\n settings.add_on_change('popup_background', get_popup_style)\n\n\ndef plugin_loaded():\n load_settings()\n\n\nclass LintAction(sublime_plugin.EventListener):\n def on_load_async(self, view):\n self.on_modified_async(view)\n def on_modified_async(self, view):\n if 'Inno' in view.settings().get('syntax'):\n lint(view)\n self.on_selection_modified_async(view)\n def on_selection_modified_async(self, view):\n if 'Inno' not in view.settings().get('syntax'):\n return\n if not popup_enabled:\n return\n file = re.sub('\\\\\\\\', '/', view.file_name())\n if file not in InnoSetup.err_lines:\n lint(view)\n sel_line = view.rowcol(view.sel()[0].a)[0]\n if sel_line in InnoSetup.err_lines[file]:\n html = '' + InnoSetup.err_lines[file][sel_line] + ''\n view.show_popup(popup_style + html, max_width=500)\n\n\ndef lint(view):\n if not exists:\n return\n file = re.sub('\\\\\\\\', '/', view.file_name())\n InnoSetup.err_lines[file] = {}\n cmd = [iscc, '/q', '/do', '/O-', file]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, universal_newlines=True)\n out, err = p.communicate()\n if not err:\n return\n line_matcher = re.compile(r'Error on line (\\d+).*')\n for msg in err.split('\\n'):\n if msg == '' or 'Compile aborted' in msg:\n break\n err_line = int(line_matcher.split(msg)[1]) - 1\n InnoSetup.err_lines[file][err_line] = msg\n highlight(view)\n\n\ndef highlight(view):\n error_regions = []\n file = re.sub('\\\\\\\\', '/', view.file_name())\n for item in InnoSetup.err_lines[file]:\n line_region = view.line(view.text_point(item, 0))\n error_regions.append(line_region)\n view.set_status('Inno', 'error in lines:' + str([str(view.rowcol(r.a)[0]+1) for r in error_regions]))\n view.add_regions('inno_error', error_regions, 'entity.name.type.class.error.inno', 'dot', sublime.DRAW_NO_FILL)\n\n\nclass GotoDefinition(sublime_plugin.TextCommand):\n def run(self, edit):\n view = self.view\n symbolname, scope = get_word_and_scope_under_cursor(view)\n if 'support.function.pascal' in scope:\n d = view.window().lookup_symbol_in_open_files(symbolname)[-1]\n abs_path, row, col = d[0], d[2][0], d[2][1]\n goto_location(abs_path, row, col)\n def is_enabled(self):\n return 'Inno' in self.view.settings().get('syntax')\n\n\nclass FindUsages(sublime_plugin.TextCommand):\n def run(self, edit):\n view = self.view\n current_loc = view.rowcol(view.sel()[0].a)\n sym, scope = get_word_and_scope_under_cursor(view)\n matches = []\n regions = []\n for v in view.window().views():\n if 'Inno' not in v.settings().get('syntax'):\n continue\n file = re.split('(?:.*)\\\\\\\\(.*)', v.file_name())[1]\n regs = [get_symbol_context(v, r) + ':' + str(v.rowcol(r.a)[0] + 1) + ' in file: ' + file for r in v.find_all(r'\\b' + sym + r'\\b')]\n matches += [(v.file_name(), v.rowcol(r.a)) for r in v.find_all(r'\\b' + sym + r'\\b')]\n regions += regs\n def on_sel(ind):\n if ind == - 1:\n goto_location(view.file_name(), current_loc[0], current_loc[1])\n f, rc = matches[ind]\n goto_location(f, rc[0]+1, rc[1]+1)\n view.window().show_quick_panel(regions, on_sel, sublime.MONOSPACE_FONT, 0, on_sel)\n def is_enabled(self):\n return 'Inno' in self.view.settings().get('syntax')\n\n\ndef find_symbol(view, symbolname):\n for v in view.window().views():\n if 'Inno' not in v.settings().get('syntax'):\n continue\n for sym in v.symbols():\n if symbolname == sym[1]:\n row, col = v.rowcol(sym[0].a)\n return v.file_name(), row+1, col+1\n row, col = view.rowcol(view.sel()[0].a)\n return view.file_name(), row+1, col+1\n\n\ndef get_symbol_context(view, region):\n cont = ''\n for s in view.symbols():\n if s[0].a > region.a:\n break\n cont = s[1]\n return cont\n\n\ndef get_word_and_scope_under_cursor(view):\n sel_region = view.sel()[0]\n scope = view.scope_name(sel_region.a)\n sym = view.substr(view.word(sel_region.a))\n return sym, scope\n\n\ndef goto_location(filename, row, col):\n window = sublime.active_window()\n window.open_file('%s:%s:%s' % (filename, row, col), sublime.ENCODED_POSITION)","sub_path":"sublime-innosetup.py","file_name":"sublime-innosetup.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91771403","text":"\nclass Solution:\n def isPrefixOfWord(self, sentence: str, searchWord: str) -> int:\n for idx, w in enumerate(sentence.split(' ')):\n if w.startswith(searchWord):\n return idx + 1\n else:\n return -1\n \n\nsentence = \"i love eating burger\"\nsearchWord = \"burg\"\nres = Solution().isPrefixOfWord(sentence, searchWord)\nprint(res)","sub_path":"string/1455_Check_If_a_Word_Occurs_As_a_Prefix_of_Any_Word_in_a_Sentence/1455_Check_If_a_Word_Occurs_As_a_Prefix_of_Any_Word_in_a_Sentence.py","file_name":"1455_Check_If_a_Word_Occurs_As_a_Prefix_of_Any_Word_in_a_Sentence.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591912158","text":"# -*- coding:utf-8 -*-\r\nimport logging\r\nimport subprocess\r\nfrom time import gmtime, strftime\r\nimport datetime\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\n\r\ndef clear():\r\n command = \"python clear.py\"\r\n subprocess.call(command.split())\r\n\r\ndef once():\r\n command = \"python once.py\"\r\n subprocess.call(command.split())\r\n\r\ndef main():\r\n command = \"python main.py\"\r\n subprocess.call(command.split())\r\n\r\nif __name__ == \"__main__\":\r\n logging.basicConfig(level=logging.INFO)\r\n sched = BlockingScheduler()\r\n sched.add_job(clear, 'cron', hour=\"0\", minute=\"30\")\r\n sched.add_job(once, 'cron', hour=\"5\", minute=\"20\")\r\n sched.add_job(main, 'cron', hour=\"8-23\", minute=\"59\")\r\n sched.start()\r\n \r\n","sub_path":"FlyHigh_Personal_Project_ParkSeHwan/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295537822","text":"from django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nfrom address import models as adress_model\nimport os\nfrom conversation.models import Conversation as Conversation\n\n\ndef get_image_path(instance, filename):\n return os.path.join('img/', instance.__class__.__name__, str(instance.id), filename)\n\n\nclass AppUser(models.Model):\n birthdate = models.DateTimeField(null=True, blank=True)\n address = adress_model.AddressField(null=True, blank=True)\n image = models.ImageField(upload_to=get_image_path, blank=True, null=True)\n gender = models.TextField(null=True, blank=True)\n user = models.OneToOneField(User)\n\n def __unicode__(self):\n return u\"%s %s\" % (self.user.first_name, self.user.last_name)\n\n def __str__(self):\n return \"%s %s\" % (self.user.first_name, self.user.last_name)\n\n\nclass Business(models.Model):\n name = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n category = models.TextField(null=True, blank=True)\n email = models.EmailField(null=True, blank=True)\n address = adress_model.AddressField(null=True, blank=True)\n image = models.ImageField(upload_to=get_image_path, blank=True, null=True)\n managers = models.ManyToManyField(AppUser)\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n def __str__(self):\n return \"%s\" % self.name\n\n\nclass Phone(models.Model):\n business = models.ForeignKey(Business, related_name='phones')\n description = models.TextField(max_length='20')\n number = models.CharField(max_length=10, unique=True, validators=[RegexValidator(regex='^\\d{10}$', message='Length has to be 10', code='Invalid number')])\n\n\nclass Catalog(models.Model):\n name = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n created_at = models.DateTimeField(null=True, blank=True)\n changed_at = models.DateTimeField(null=True, blank=True)\n image = models.ImageField(null=True, blank=True)\n business = models.ForeignKey(Business, related_name='business_catalogs')\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n def __str__(self):\n return \"%s\" % self.name\n\n\nclass Item(models.Model):\n name = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n price = models.FloatField(null=True, blank=True)\n catalog = models.ForeignKey(Catalog, related_name='catalog_items')\n created_at = models.DateTimeField(null=True, blank=True)\n changed_at = models.DateTimeField(null=True, blank=True)\n quantity = models.IntegerField(null=True, blank=True)\n purchased_times = models.IntegerField(null=True, blank=True)\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n def __str__(self):\n return \"%s\" % self.name\n\n\nclass Service(models.Model):\n name = models.TextField(null=True, blank=True)\n description = models.TextField(null=True, blank=True)\n price = models.FloatField(null=True, blank=True)\n catalog = models.ForeignKey(Catalog, 'catalog_services')\n created_at = models.DateTimeField(null=True, blank=True)\n changed_at = models.DateTimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n purchased_times = models.IntegerField(null=True, blank=True)\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n def __str__(self):\n return \"%s\" % self.name\n\n\nclass Order(models.Model):\n user = models.ForeignKey(AppUser, related_name='user_orders')\n business = models.ForeignKey(Business, related_name='business_orders')\n type = models.TextField(null=True, blank=True) # takeaway / delivery\n picking_time = models.DateField(null=True, blank=True)\n address = adress_model.AddressField(null=True, blank=True)\n total = models.FloatField(null=True, blank=True)\n created_at = models.DateTimeField(null=True, blank=True)\n\n def __unicode__(self):\n return u\"Order %s\" % self.id\n\n def __str__(self):\n return \"Order %s\" % self.id\n\n\nclass ItemOrder(models.Model):\n order = models.ForeignKey(Order, related_name='item_orders')\n item = models.ForeignKey(Item, related_name='order_items')\n quantity = models.IntegerField(null=True, blank=True)\n\n\n# business time\n\nWEEKDAYS = [\n (1, \"Monday\"),\n (2, \"Tuesday\"),\n (3, \"Wednesday\"),\n (4, \"Thursday\"),\n (5, \"Friday\"),\n (6, \"Saturday\"),\n (7, \"Sunday\"),\n]\n\n\nclass OpeningHours(models.Model):\n business = models.ForeignKey(Business, related_name='opening_hours')\n weekday = models.IntegerField(choices=WEEKDAYS)\n from_hour = models.TimeField(null=True, blank=True)\n to_hour = models.TimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('weekday', 'from_hour')\n unique_together = ('weekday', 'from_hour', 'to_hour')\n\n def __unicode__(self):\n return u'%s: %s - %s' % (self.get_weekday_display(null=True, blank=True),\n self.from_hour, self.to_hour)\n\n def __str__(self):\n return '%s: %s - %s' % (self.get_weekday_display(null=True, blank=True),\n self.from_hour, self.to_hour)\n\n\n# end business time\n\n\nclass AppConversation(models.Model):\n appuser = models.ForeignKey(AppUser)\n business = models.ForeignKey(Business)\n conversation = models.OneToOneField(\n Conversation,\n verbose_name='Conversation',\n related_name='appconversation',\n null=True,\n blank=True\n )\n\n\nclass Appointment(models.Model):\n service = models.ForeignKey(Service, related_name='service_appointments')\n starting_time = models.TimeField(null=True, blank=True)\n user = models.ForeignKey(AppUser, related_name='user_appointments')\n conversation = models.ForeignKey(Conversation, related_name='conversation_appointments')\n\n\nclass Dispute(models.Model):\n service = models.ForeignKey(Service, related_name='service_disputes')\n type = models.TextField(null=True, blank=True) # item / service / appointment / order\n item = models.ForeignKey(Item, related_name='item_disputes')\n Appointment = models.ForeignKey(Appointment)\n order = models.ForeignKey(Order, related_name='order_disputes')\n topic = models.TextField(null=True, blank=True)\n conversation = models.ForeignKey(Conversation, related_name='conversation_disputes')\n\n\nclass BasePost(models.Model):\n text = models.TextField(null=True, blank=True)\n business = models.ForeignKey(Business, related_name='business_base_posts')\n is_important = models.BooleanField(default=False)\n audience = models.TextField(null=True, blank=True) # all / members\n created_at = models.DateTimeField(null=True, blank=True)\n changed_at = models.DateTimeField(null=True, blank=True)\n\n\nclass Discount(models.Model):\n amount_type = models.TextField(null=True, blank=True) # percents / price\n amount = models.FloatField(null=True, blank=True)\n condition_type = models.TextField(null=True, blank=True) # percents / price\n item = models.ForeignKey(Item, related_name='item_discounts')\n service = models.ForeignKey(Service, related_name='service_discounts')\n order = models.ForeignKey(Order, related_name='order_discounts')\n reward_type = models.TextField(null=True, blank=True)\n item_reward = models.ForeignKey(Item, related_name='item_reward_discounts')\n discount_reward = models.FloatField(null=True, blank=True)\n\n\nclass BaseBenefit(models.Model):\n title = models.TextField(null=True, blank=True)\n text = models.TextField(null=True, blank=True)\n business = models.ForeignKey(Business, related_name='business_base_benefits')\n starting_date = models.DateTimeField(null=True, blank=True)\n ending_date = models.DateTimeField(null=True, blank=True)\n\n\nclass TicketBenefit(models.Model):\n benefit = models.ForeignKey(BaseBenefit, related_name='ticket_base_benefits')\n amount = models.IntegerField(null=True, blank=True) # required punchings\n\n\nclass FriendBenefit(models.Model):\n benefit = models.ForeignKey(BaseBenefit, related_name='friend_base_benefits')\n amount = models.IntegerField(null=True, blank=True) # required friends\n\n\nclass DiscountBenefit(models.Model):\n benefit = models.ForeignKey(BaseBenefit, related_name='discount_base_benefits')\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(AppUser, related_name='user_comments')\n text = models.TextField(null=True, blank=True)\n post = models.ForeignKey(BasePost, related_name='post_comments')\n created_at = models.DateTimeField(blank=True, auto_now_add=True)\n\n\nclass PostLike(models.Model):\n class Meta:\n unique_together = (('user', 'post'),)\n user = models.ForeignKey(AppUser, related_name='user_post_likes')\n post = models.ForeignKey(BasePost, related_name='base_post_likes')\n\n\nclass BusinessLike(models.Model):\n class Meta:\n unique_together = (('user', 'business'),)\n\n user = models.ForeignKey(AppUser, related_name='user_business_likes')\n business = models.ForeignKey(Business, related_name='business_likes')\n\n\nclass UserFriendBenefit(models.Model):\n user = models.ForeignKey(AppUser, related_name='user_friend_benefits')\n friend = models.ForeignKey(AppUser)\n benefit = models.ForeignKey(FriendBenefit, related_name='friend_base_benefits')\n\n\nclass UserTicketBenefit(models.Model):\n user = models.ForeignKey(AppUser, related_name='user_ticket_benefits')\n counter = models.IntegerField(null=True, blank=True)\n benefit = models.ForeignKey(TicketBenefit, related_name='ticket_base_benefits')\n","sub_path":"BaseApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374493519","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the 4n6time MySQL output class.\"\"\"\n\nimport unittest\n\nfrom plaso.containers import events\nfrom plaso import formatters # pylint: disable=unused-import\nfrom plaso.lib import eventdata\nfrom plaso.lib import timelib\nfrom plaso.output import mysql_4n6time\n\nfrom tests.output import fake_mysqldb\nfrom tests.output import test_lib\n\nif mysql_4n6time.MySQLdb is None:\n mysql_4n6time.MySQLdb = fake_mysqldb\n\n\nclass MySQL4n6TimeTestEvent(events.EventObject):\n \"\"\"Test event.\"\"\"\n\n DATA_TYPE = u'syslog:line'\n\n def __init__(self, event_timestamp):\n \"\"\"Initializes an event.\"\"\"\n super(MySQL4n6TimeTestEvent, self).__init__()\n self.display_name = u'log/syslog.1'\n self.filename = u'log/syslog.1'\n self.hostname = u'ubuntu'\n self.my_number = 123\n self.some_additional_foo = True\n self.text = (\n u'Reporter PID: 8442 (pam_unix(cron:session): session '\n u'closed for user root)')\n self.timestamp_desc = eventdata.EventTimestamp.WRITTEN_TIME\n self.timestamp = event_timestamp\n\n\nclass MySQL4n6TimeOutputModuleTest(test_lib.OutputModuleTestCase):\n \"\"\"Tests for the 4n6time MySQL output class.\"\"\"\n\n # pylint: disable=protected-access\n\n def testGetTags(self):\n \"\"\"Tests the _GetTags function.\"\"\"\n fake_cursor = fake_mysqldb.FakeMySQLdbCursor()\n fake_cursor.expected_query = u'SELECT DISTINCT tag FROM log2timeline'\n fake_cursor.query_results = [(u'one',), (u'two,three',), (u'four',)]\n\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n output_module._cursor = fake_cursor\n\n expected_tags = [u'one', u'two', u'three', u'four']\n tags = output_module._GetTags()\n self.assertEqual(tags, expected_tags)\n\n def testGetUniqueValues(self):\n \"\"\"Tests the _GetUniqueValues function.\"\"\"\n fake_cursor = fake_mysqldb.FakeMySQLdbCursor()\n fake_cursor.expected_query = (\n u'SELECT source, COUNT(source) FROM log2timeline GROUP BY source')\n fake_cursor.query_results = [(u'one', 1), (u'two', 2), (u'three', 3)]\n\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n output_module._cursor = fake_cursor\n\n expected_unique_values = {u'one': 1, u'two': 2, u'three': 3}\n unique_values = output_module._GetUniqueValues(u'source')\n self.assertEqual(unique_values, expected_unique_values)\n\n # TODO: add test for Open and Close\n\n def testGetSanitizedEventValues(self):\n \"\"\"Tests the GetSanitizedEventValues function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n\n expected_dict = {\n u'type': u'Content Modification Time',\n u'host': u'ubuntu',\n u'filename': u'log/syslog.1',\n u'source': u'LOG',\n u'description': u'[',\n u'datetime': u'2012-06-27 18:17:01',\n u'inreport': u'',\n u'source_name': u'-',\n u'extra': (\n u'my_number: 123 some_additional_foo: True text: '\n u'Reporter PID: 8442 (pam_unix(cron:session): '\n u'session closed for user root) '\n ),\n u'tag': u'',\n u'timezone': u'UTC',\n u'inode': u'-',\n u'reportnotes': u'',\n u'sourcetype': u'Log File',\n u'event_identifier': u'-',\n u'format': u'-',\n u'URL': u'-',\n u'record_number': 0,\n u'MACB': u'M...',\n u'computer_name': u'-',\n u'offset': 0,\n u'evidence': u'-',\n u'user_sid': u'-',\n u'notes': u'-',\n u'vss_store_number': -1,\n u'user': u'-'\n }\n\n timestamp = timelib.Timestamp.CopyFromString(\n u'2012-06-27 18:17:01+00:00')\n event = MySQL4n6TimeTestEvent(timestamp)\n event_dict = output_module._GetSanitizedEventValues(event)\n\n self.assertIsInstance(event_dict, dict)\n self.assertDictContainsSubset(expected_dict, event_dict)\n\n def testSetCredentials(self):\n \"\"\"Tests the SetCredentials function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n\n output_module.SetCredentials(password=u'password', username=u'username')\n self.assertEqual(output_module._password, u'password')\n self.assertEqual(output_module._user, u'username')\n\n def testSetDatabaseName(self):\n \"\"\"Tests the SetDatabaseName function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n\n output_module.SetDatabaseName(u'database')\n self.assertEqual(output_module._dbname, u'database')\n\n def testSetServerInformation(self):\n \"\"\"Tests the SetServerInformation function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n\n output_module.SetServerInformation(u'127.0.0.1', 3306)\n self.assertEqual(output_module._host, u'127.0.0.1')\n self.assertEqual(output_module._port, 3306)\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n fake_cursor = fake_mysqldb.FakeMySQLdbCursor()\n fake_cursor.expected_query = (\n mysql_4n6time.MySQL4n6TimeOutputModule._INSERT_QUERY)\n\n fake_cursor.expected_query_args = {\n u'computer_name': u'-',\n u'datetime': u'2012-06-27 18:17:01',\n u'description': u'[',\n u'event_identifier': u'-',\n u'event_type': u'-',\n u'evidence': u'-',\n u'extra': (\n u'my_number: 123 '\n u'some_additional_foo: True '\n u'text: Reporter PID: 8442 (pam_unix(cron:session): '\n u'session closed for user root) '),\n u'filename': u'log/syslog.1',\n u'format': u'-',\n u'host': u'ubuntu',\n u'inode': u'-',\n u'inreport': u'',\n u'MACB': u'M...',\n u'notes': u'-',\n u'offset': 0,\n u'record_number': 0,\n u'reportnotes': u'',\n u'source_name': u'-',\n u'sourcetype': u'Log File',\n u'source': u'LOG',\n u'tag': u'',\n u'timezone': u'UTC',\n u'type': u'Content Modification Time',\n u'URL': u'-',\n u'user_sid': u'-',\n u'user': u'-',\n u'vss_store_number': -1}\n\n output_mediator = self._CreateOutputMediator()\n output_module = mysql_4n6time.MySQL4n6TimeOutputModule(output_mediator)\n output_module._count = 0\n output_module._cursor = fake_cursor\n\n timestamp = timelib.Timestamp.CopyFromString(\n u'2012-06-27 18:17:01+00:00')\n event = MySQL4n6TimeTestEvent(timestamp)\n output_module.WriteEventBody(event)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/output/mysql_4n6time.py","file_name":"mysql_4n6time.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40347601","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom tqdm import tqdm\nimport warnings\nwarnings.simplefilter(\"ignore\", UserWarning)\n\ntorch.manual_seed(100)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass AN(nn.Module):\n def __init__(self, num_classes):\n super(AN, self).__init__()\n self.fc1 = nn.Linear(in_features=28 * 28, out_features=128)\n self.fc2 = nn.Linear(in_features=128, out_features=num_classes)\n self.flat = nn.Flatten()\n\n def forward(self, x):\n x = self.flat(x)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\nclass CNN(nn.Module):\n def __init__(self, num_classes):\n super(CNN, self).__init__()\n self.architecture = [(1, 128), \"M\", (128, 64), \"M\", (64, 32), \"M\", (32, 16)]\n self.conv_layer = self._crate_conv_layer()\n self.flat = nn.Flatten()\n self.fc = nn.Linear(in_features=8*3*3, out_features=num_classes)\n\n def forward(self, x):\n x = self.conv_layer(x)\n x = self.flat(x)\n x = self.fc(x)\n return x\n\n def _crate_conv_layer(self):\n layers = list()\n for x in self.architecture:\n if type(x) == tuple:\n layers += [\n nn.Conv2d(in_channels=x[0], out_channels=x[1], kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(num_features=x[1]),\n nn.LeakyReLU(negative_slope=0.1)\n ]\n elif type(x) == str:\n layers += [nn.MaxPool2d(kernel_size=(2, 2))]\n\n return nn.Sequential(*layers)\n\n\ndef train(model, dataloader, epochs, loss_fn, optimizer):\n model.train()\n num_correct =0\n num_size = 0\n for epoch in range(epochs):\n loop = tqdm(enumerate(dataloader), total=len(dataloader))\n for batch_dix, (X, label) in loop:\n X, label = X.to(device), label.to(device)\n\n preds = model(X)\n loss = loss_fn(preds, label)\n\n num_correct += (preds.max(1)[1] == label).sum()\n num_size += preds.shape[0]\n accuracy = round(float(num_correct / num_size * 100), 1)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loop.set_description(f\"Epoch: {epoch + 1} / {epochs}\")\n loop.set_postfix(loss=loss.item(), accuracy=f\"{accuracy}%\")\n\n\ndef test(model, dataloader):\n model.eval()\n num_correct = 0\n num_size = 0\n loop = tqdm(enumerate(dataloader), total=len(dataloader))\n with torch.no_grad():\n for batch_dix, (X, label) in loop:\n X, label = X.to(device), label.to(device)\n\n preds = model(X)\n\n num_correct += (preds.max(1)[1] == label).sum()\n num_size += preds.shape[0]\n accuracy = round(float(num_correct / num_size * 100), 1)\n\n loop.set_postfix(accuracy=f\"{accuracy}%\")\n\n\nnum_classes = 10\nbatch_size = 32\nlearning_rate = 3e-4\nepochs = 3\n\ntrain_dataset = torchvision.datasets.MNIST(root=\"data\", train=True, transform=torchvision.transforms.ToTensor(), download=True)\ntrain_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\ntest_dataset = torchvision.datasets.MNIST(root=\"data\", train=False, transform=torchvision.transforms.ToTensor(), download=True)\ntest_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\n# model = AN(num_classes=10).to(device)\n# loss_fn = nn.CrossEntropyLoss()\n# optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n#\n# train(model, train_dataloader, epochs, loss_fn, optimizer)\n# test(model, test_dataloader)\n\nmodel = CNN(num_classes=10).to(device)\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\ntrain(model, train_dataloader, epochs, loss_fn, optimizer)\ntest(model, test_dataloader)\n","sub_path":"test20.py","file_name":"test20.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"434546166","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0021_auto_20150307_1515'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='export1c',\n options={'verbose_name': '\\u0412\\u044b\\u0433\\u0440\\u0443\\u0437\\u043a\\u0430 \\u043e\\u0441\\u0442\\u0430\\u0442\\u043a\\u043e\\u0432 \\u0432 1\\u0421', 'verbose_name_plural': '\\u0412\\u044b\\u0433\\u0440\\u0443\\u0437\\u043a\\u0438'},\n ),\n migrations.AddField(\n model_name='export1c',\n name='title',\n field=models.CharField(max_length=128, null=True, verbose_name=b'\\xd0\\x9d\\xd0\\xb0\\xd0\\xb7\\xd0\\xb2\\xd0\\xb0\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='export1c',\n name='type',\n field=models.IntegerField(default=0, verbose_name=b'\\xd0\\xa2\\xd0\\xb8\\xd0\\xbf \\xd0\\xb2\\xd1\\x8b\\xd0\\xb3\\xd1\\x80\\xd1\\x83\\xd0\\xb7\\xd0\\xba\\xd0\\xb8', choices=[(0, b'Goods'), (1, b'Orders')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"shop/migrations/0022_auto_20150312_0948.py","file_name":"0022_auto_20150312_0948.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197126192","text":"#!/usr/bin/env python\n# wujian@2018\n\nimport os\nimport glob\nimport random\nimport warnings\nimport librosa as audio_lib\nimport numpy as np\nimport iobase as io\n\nfrom utils import stft, parse_scps, get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Reader(object):\n \"\"\"\n Base class, to be implemented\n \"\"\"\n\n def __init__(self, scp_path, addr_processor=lambda x: x):\n if not os.path.exists(scp_path):\n raise FileNotFoundError(\"Could not find file {}\".format(scp_path))\n self.index_dict = parse_scps(scp_path, addr_processor=addr_processor)\n self.index_keys = [key for key in self.index_dict.keys()]\n\n def _load(self, key):\n raise NotImplementedError\n\n # number of utterance\n def __len__(self):\n return len(self.index_dict)\n\n # avoid key error\n def __contains__(self, key):\n return key in self.index_dict\n\n # sequential index\n def __iter__(self):\n for key in self.index_keys:\n yield key, self._load(key)\n\n # random index, support str/int as index\n def __getitem__(self, index):\n if type(index) == int:\n num_utts = len(self.index_keys)\n if index >= num_utts or index < 0:\n raise KeyError(\"Interger index out of range, {} vs {}\".format(index, num_utts))\n key = self.index_keys[index]\n return self._load(key)\n elif type(index) is str:\n if index not in self.index_dict:\n raise KeyError(\"Missing utterance {}!\".format(index))\n return self._load(index)\n else:\n raise IndexError(\"Unsupported index type: {}\".format(type(index)))\n\n\nclass WaveReader(Reader):\n def __init__(self, scp_path, sample_rate=None):\n super(WaveReader, self).__init__(scp_path)\n self.sample_rate = sample_rate\n\n def _load(self, key):\n wav_addr = self.index_dict[key]\n samps, _ = audio_lib.load(wav_addr, sr=self.sample_rate)\n return samps\n\n\nclass SpectrogramReader(Reader):\n \"\"\"\n Wrapper for short-time fourier transform of wave scripts\n \"\"\"\n\n def __init__(self, wave_scp, **kwargs):\n super(SpectrogramReader, self).__init__(wave_scp)\n if \"return_samps\" in kwargs and kwargs[\"return_samps\"]:\n warnings.warn(\"Argument --return_samps is True here, ignore it\")\n kwargs[\"return_samps\"] = False\n self.stft_kwargs = kwargs\n\n # stft, single or multi-channal\n def _load(self, key):\n flist = glob.glob(self.index_dict[key])\n if not len(flist):\n raise RuntimeError(\n \"Could not find file matches template \\'{}\\'\".format(\n self.index_dict[key]))\n if len(flist) == 1:\n return stft(flist[0], **self.stft_kwargs)\n else:\n return np.array(\n [stft(f, **self.stft_kwargs) for f in sorted(flist)])\n\n\nclass ArchieveReader(Reader):\n \"\"\"\n Reader for kaldi's scripts(for BaseFloat matrix)\n \"\"\"\n\n def __init__(self, ark_scp):\n def addr_processor(addr):\n addr_token = addr.split(\":\")\n if len(addr_token) == 1:\n raise ValueError(\"Unsupported scripts address format\")\n path, offset = \":\".join(addr_token[0:-1]), int(addr_token[-1])\n return (path, offset)\n\n super(ArchieveReader, self).__init__(\n ark_scp, addr_processor=addr_processor)\n\n def _load(self, key):\n path, offset = self.index_dict[key]\n with open(path, 'rb') as f:\n f.seek(offset)\n io.expect_binary(f)\n ark = io.read_general_mat(f)\n return ark\n\n\nclass ArchieveWriter(object):\n \"\"\"\n Writer for kaldi's scripts && archieve(for BaseFloat matrix)\n \"\"\"\n\n def __init__(self, ark_path, scp_path=None):\n self.scp_path = scp_path\n self.ark_path = ark_path\n\n def __enter__(self):\n self.scp_file = None if self.scp_path is None else open(\n self.scp_path, \"w\")\n self.ark_file = open(self.ark_path, \"wb\")\n return self\n\n def __exit__(self, type, value, trace):\n if self.scp_file:\n self.scp_file.close()\n self.ark_file.close()\n\n def write(self, key, matrix):\n io.write_token(self.ark_file, key)\n offset = self.ark_file.tell()\n # binary symbol\n io.write_binary_symbol(self.ark_file)\n io.write_common_mat(self.ark_file, matrix)\n if self.scp_file:\n self.scp_file.write(\"{}\\t{}:{:d}\\n\".format(\n key, os.path.abspath(self.ark_path), offset))\n\n\ndef test_archieve_writer(ark, scp):\n with ArchieveWriter(ark, scp) as writer:\n for i in range(10):\n mat = np.random.rand(100, 20)\n writer.write(\"mat-{:d}\".format(i), mat)\n print(\"TEST *test_archieve_writer* DONE!\")\n\n\ndef test_archieve_reader(egs):\n ark_reader = ArchieveReader(egs)\n for key, mat in ark_reader:\n print(\"{}: {}\".format(key, mat.shape))\n print(\"TEST *test_archieve_reader* DONE!\")\n\n\nif __name__ == \"__main__\":\n test_archieve_writer(\"egs.ark\", \"egs.scp\")\n test_archieve_reader(\"egs.scp\")\n\n","sub_path":"scripts/sptk/data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445092059","text":"__author__ = 'asiaptichka'\nimport os, re, pickle, string\n\n\n#вводим переменные, включающие в себя все цифры, либо все буквы\npatternint = '[0-9]+'\npatternengint = '[a-z, A-Z, 0-9]+'\n\n#вводим пустые списки для заполняемых данных. при каждом запуске они обнуляются. если файл пикл уже создан, можно использовать функцию load\nglacierlst = []\ncountrylst = []\narealst = []\n\n#переменная для понимания дальнейшего дейстивя (продолжить запись в пикл или же вывести результат)\nanswer = 1\n\nwhile answer == 1:\n\n glacier = str(input('введите название ледника на русском ')) #вводим название ледника на русском.\n\n while re.match(patternengint, glacier): #если же присутствуют другие символы - программа попросит повторить действие\n\n print('по-русски и без цифр! ')\n glacier = str(input('введите название ледника на русском '))\n else:\n glacierlst = glacierlst + [glacier] #если всё верно - список пополнится на одну строчку\n\n#то же самое для страны, где располагается ледник\n\n country = str(input('введите страну ледника на русском '))\n\n while re.match(patternengint, country):\n print('по-русски и без цифр! ')\n country = str(input('введите страну ледника на русском '))\n else:\n countrylst = countrylst + [country]\n\n#то же самое для площади ледника\n\n area = str(input('введите площадь ледника на русском '))\n\n while re.match(patternint, area) == False:\n print('только цифры! ')\n area = str(input('введите площадь ледника на русском '))\n else:\n arealst = arealst + [area] #проверяем дальнейшие действия - ввести новый ледник или же вывести желаемый результат?\n\n\n answer = int(input ('ввести новый ледник (1) или вывести результат (2) ?' ))\n#выводим результат - сортировка по имени Ледника\n\nf = open('data.pickle', 'wb')\n\n#то, что записали в список до этого, записываем в открытый файл пикл\npickle.dump(glacierlst, f)\npickle.dump(countrylst, f)\npickle.dump(arealst, f)\n\nf.close()\n\n#открываем для чтения файл пикл\nf = open('data.pickle', 'rb')\n\n#загружаем записанные ранее объекты\nglacierlst = pickle.load(f)\ncountrylst = pickle.load(f)\narealst = pickle.load(f)\n\nf.close()\n\n#формируем новый список - разбиваем ледники, страну и площадь на состовляющие и записываем каждый соответствующий друг другу элемент в одну строку\nn = len(countrylst)\nfor i in range (n):\n\n#отбираем ледники ли��ь с определённым названием\n\n a = glacierlst[i]\n if a.find('кудль') == True :\n finallst = [glacierlst[i] + ' ' + countrylst[i] + ' ' + arealst[i]]\n else:\n pass\n\nprint(finallst)","sub_path":"HW3-4.py","file_name":"HW3-4.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318612291","text":"import json\nimport gzip\nimport os\n\ndef parse(path):\n g = open(path, 'r')\n for l in g:\n yield json.dumps(eval(l))\n\nfor path, subdirs, files in os.walk('/Users/lixiaoying/Desktop/datas'):\n\tnewFiles = []\n\tfor num in range(len(files)):\n\t\tif(files[num].endswith(\".json\")):\n\t\t\tnewFiles.append(files[num])\n\tprint(newFiles)\n\tfor filename in newFiles:\n\t\tprint(filename)\n\t\tcounter = 0\n\t\tfileNum = 0\n\t\tfor l in parse(path+\"/\"+filename):\n\t\t\tif(counter >= 10000 or fileNum == 0):\n\t\t\t\tif (fileNum != 0):\n\t\t\t\t\tf.write(\"]}\")\n\t\t\t\tf = open('/Users/lixiaoying/Desktop/datas/' + filename.replace(\".json\",\"_output_\") +str(fileNum) + \".json\", 'w')\n\t\t\t\tf.write(\"{\\\"Reviews\\\": [\")\n\t\t\t\tcounter = 0\n\t\t\t\tfileNum += 1\n\t\t\tf.write(l + ',\\n')\n\t\t\tcounter += 1\n\t\tf.write(\"]}\")","sub_path":"preprc.py","file_name":"preprc.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285511993","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom .models import *\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\n# Create your views here.\n\nclass Home(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['about'] = About.objects.first()\n context['file'] = FilesAdmin.objects.all()\n context['services'] = Service.objects.all()\n context['works'] = RecentWork.objects.all()\n context['client'] = Client.objects.all()\n return context\n\ndef contact(request):\n if request.method == \"POST\":\n massage_name = request.POST['massage_name'] \n massage_email = request.POST['email']\n massage = request.POST['massage']\n #send mai\n send_mail = (\n massage_name,\n massage_email,\n massage,['arizonatymothy@gmail.com','timzonatimothy@gamil.com']\n )\n return render(request, 'contact.html', {'massage_name':massage_name,'massage_email':massage_email,'massage':massage})\n\n else:\n return render(request, 'contact.html', {})\n\ndef download(request,path):\n file_path=os.path.join(settings.MEDIA_ROOT,path)\n if os.path.exists(file_path):\n with open(file_path, 'rb')as fh:\n response=HttpResponse(fh.read(),content_type='application/adminupload')\n response['Content-Disposition']='inline;filename='+os.path.basename(file_path)\n return response\n \n raise Http404","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545393783","text":"import argparse\nfrom pathlib import Path\nimport os\nimport shutil\n\n\nparser = argparse.ArgumentParser(description='Prepare a shippable package containing a release of Neuroph Framework')\nparser.add_argument('dir', help='name of directory where release files will be generated')\n\nargs = parser.parse_args()\n\n# Check if provided path is valid\nrelease_folder = Path(args.dir)\nif release_folder.is_file():\n print(\"File already exists with the same name\")\n exit()\nif release_folder.is_dir() and any(release_folder.iterdir()) :\n print(\"Directory specified already exists and is not empty\")\n exit()\n\nrelease_folder.mkdir(exist_ok=True)\n\nos.system('/bin/bash -c \"mvn release:clean release:prepare\"')\nos.system('/bin/bash -c \"mvn release:perform\"')\nos.system('/bin/bash -c \"mvn clean package\"')\nos.system('/bin/bash -c \"mvn javadoc:jar\"')\nos.system('/bin/bash -c \"mvn javadoc:aggregate\"')\nos.system('/bin/bash -c \"mvn dependency:copy-dependencies\"')\n\nsrcdir = \"./Core/target\"\nfor basename in os.listdir(srcdir):\n if basename.endswith('.jar'):\n pathname = os.path.join(srcdir, basename)\n if os.path.isfile(pathname):\n shutil.copy2(pathname, release_folder)\n\nsrcdir = \"./Contrib/target\"\nfor basename in os.listdir(srcdir):\n if basename.endswith('.jar'):\n pathname = os.path.join(srcdir, basename)\n if os.path.isfile(pathname):\n shutil.copy2(pathname, release_folder)\n\nsrcdir = \"./ImageRec/target\"\nfor basename in os.listdir(srcdir):\n if basename.endswith('.jar'):\n pathname = os.path.join(srcdir, basename)\n if os.path.isfile(pathname):\n shutil.copy2(pathname, release_folder)\n\nsrcdir = \"./OCR/target\"\nfor basename in os.listdir(srcdir):\n if basename.endswith('.jar'):\n pathname = os.path.join(srcdir, basename)\n if os.path.isfile(pathname):\n shutil.copy2(pathname, release_folder)\n\nsrcdir = \"./target/site/apidocs\"\nshutil.copytree(srcdir, os.path.join(release_folder, \"apidocs\"), False, None)\nshutil.make_archive(os.path.join(release_folder, \"apidocs\"), 'zip', os.path.join(release_folder, \"apidocs\"))\n","sub_path":"neuroph/prepare_release.py","file_name":"prepare_release.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25228605","text":"# coding = UTF-8\n# Author: Christopher Lee\n# Date : '15-4-10'\n# Des : ...\n\n'''\n该Pythond代码用于处理一个csv的表格文件,主要是计算表格中的两个列的值,添加到新的列中\n这是第一次投入实战,虽然有点慢,但最后还是成功了!值得纪念哈!\n'''\n\nimport math\n\ndef caculate(value1, value2):\n '''计算公式:20 * log10(sqrt(a^2 + b^2))'''\n value1 = float(value1)\n value2 = float(value2)\n return 20 * math.log10(math.sqrt(value1 * value1 + value2 * value2))\n\ntry:\n with open('files/1111.csv', 'r') as source_data, open('files/result.csv', 'w') as result_write:\n # 存放结果的列表\n result = []\n index = 0\n for line in source_data:\n '''从源数据中读出所有的数据,并进行计算,然后把计算结果添加到结果列表中'''\n if not line.startswith('\"'):\n data_list = line.split(',')\n # 此处需要注意的是,把每行的空格或者换行字符清空,下同\n result.append(line.rstrip() + ',' + str(caculate(data_list[1], data_list[2])))\n else:\n result.append(line.rstrip())\n print('行数统计:', len(result))\n\n '''重新写入到新的文件中'''\n for line in result:\n index += 1\n print(line, file=result_write)\n print('Line No.', index, 'writes to file successfully!')\n print('All done.', index, 'lines in total!')\nexcept IOError as err:\n print(str(err))","sub_path":"5 推导数据:处理数据/代码练习/exercise_02.py","file_name":"exercise_02.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404798776","text":"import os\nos.chdir(\"/home/hoangphuc/DenseFusion\")\nimport torch\nimport torchfcn\nimport numpy as np\nimport copy\nfrom PIL import Image\nfrom torch.autograd import Variable\nimport pyrealsense2 as rs\nimport cv2\nfrom pytorchfcn.utils import fcn_deploy\nfrom utils import posenet_deploy\nfrom lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix\nimport numpy.ma as ma\nfrom lib.network import PoseNet, PoseRefineNet\nfrom scipy.spatial.distance import euclidean\n\n\n\n#eps = 0.000001\n\ncam_cx = 317.718 \ncam_cy = 241.809 \ncam_fx = 615.601 \ncam_fy = 615.896 \ncam_mat = np.matrix([[cam_fx, 0, cam_cx],[0,cam_fy, cam_cy],[0, 0, 1]])\n#distort = [[-0.0475398, 0.399247, 0.0152188, -0.00181894, -0.567924]]\ndistort = [[0.0, 0.0, 0.0, 0.0, 0.0]]\ndistort = np.array(distort)\n\nnum_objects = 7\nobjlist = [1, 2, 3, 4, 5, 6, 7]\nnum_points = 1000\niteration = 4\nbs = 1\n\nxmap = np.array([[j for i in range(640)] for j in range(480)])\nymap = np.array([[i for i in range(640)] for j in range(480)])\ncam_scale = 1.0\n\n# idx from 0 to 6\n#idx = 0\n\n# object model points\nmodel_points_list = []\nfor idx in range(0, 7): \n model_points = posenet_deploy.get_model_points(idx)\n model_points = model_points[0].cpu().detach().numpy()\n model_points_list.append(model_points)\n\nmodel = \"/home/hoangphuc/DenseFusion/trained_models/blisters/pose_model_7_0.010882323171319623_new.pth\"\nrefine_model = \"/home/hoangphuc/DenseFusion/trained_models/blisters/pose_refine_model_69_0.0032242619757605452_new.pth\"\nestimator = PoseNet(num_points = num_points, num_obj = num_objects)\nestimator.cuda()\nrefiner = PoseRefineNet(num_points = num_points, num_obj = num_objects)\nrefiner.cuda()\nestimator.load_state_dict(torch.load(model))\nrefiner.load_state_dict(torch.load(refine_model))\nestimator.eval()\nrefiner.eval()\n\n\n\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n# Start streaming\npipeline.start(config)\n\ntry:\n #video_writer = cv2.VideoWriter(\"densefusion_realtime.avi\", cv2.VideoWriter_fourcc('M','J','P','G'), 6, (640,480))\n while True:\n frames = pipeline.wait_for_frames()\n rgb = frames.get_color_frame()\n rgb = np.asanyarray(rgb.get_data())\n frame = rgb\n \n align = rs.align(rs.stream.color)\n aligned_frames = align.process(frames)\n \n semantic, label, label_img = fcn_deploy.fcn_forward(frame)\n cv2.imshow('Mask', label_img)\n cv2.waitKey(1)\n \n \n bboxes = fcn_deploy.bbox_from_mask(label)\n if len(bboxes) > 0:\n depth_frames = aligned_frames.get_depth_frame()\n \n video_profile = depth_frames.profile.as_video_stream_profile()\n intr = video_profile.get_intrinsics()\n depth = np.asanyarray(depth_frames.get_data())\n \n for bbox in bboxes:\n bbox = list(bbox)\n \n class_id = fcn_deploy.get_class_of_bbox(semantic, bbox)\n idx = class_id - 1\n model_points = model_points_list[idx]\n \n mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))\n mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))\n mask = mask_label * mask_depth\n \n rmin, rmax, cmin, cmax = posenet_deploy.get_bbox(bbox)\n \n # choose\n choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]\n if len(choose) == 0:\n choose = torch.LongTensor([0])\n if len(choose) > num_points:\n c_mask = np.zeros(len(choose), dtype=int)\n c_mask[:num_points] = 1\n np.random.shuffle(c_mask)\n choose = choose[c_mask.nonzero()]\n else:\n choose = np.pad(choose, (0, num_points - len(choose)), 'wrap')\n \n depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)\n xmap_masked = xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)\n ymap_masked = ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)\n choose = np.array([choose])\n \n # point cloud\n pt2 = depth_masked / cam_scale\n pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx\n pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy\n cloud = np.concatenate((pt0, pt1, pt2), axis=1)\n cloud = cloud / 1000.0 \n \n # cropped img\n #img_masked = rgb[:, :, :3]\n img_masked = rgb[:, :, ::-1] # bgr to rgb\n img_masked = np.transpose(img_masked, (2, 0, 1))\n img_masked = img_masked[:, rmin:rmax, cmin:cmax]\n \n # Variables\n cloud = torch.from_numpy(cloud.astype(np.float32)).unsqueeze(0)\n choose = torch.LongTensor(choose.astype(np.int32)).unsqueeze(0)\n img_masked = torch.from_numpy(img_masked.astype(np.float32)).unsqueeze(0)\n index = torch.LongTensor([idx]).unsqueeze(0) # Specify which object\n \n cloud = Variable(cloud).cuda()\n choose = Variable(choose).cuda()\n img_masked = Variable(img_masked).cuda()\n index = Variable(index).cuda()\n \n # Deploy\n with torch.no_grad():\n pred_r, pred_t, pred_c, emb = estimator(img_masked, cloud, choose, index)\n pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, num_points, 1)\n pred_c = pred_c.view(bs, num_points)\n how_max, which_max = torch.max(pred_c, 1)\n pred_t = pred_t.view(bs * num_points, 1, 3)\n points = cloud.view(bs * num_points, 1, 3)\n \n my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()\n my_t = (points.view(bs * num_points, 1, 3) + pred_t)[which_max[0]].view(-1).cpu().data.numpy()\n my_pred = np.append(my_r, my_t)\n \n # Refinement\n for ite in range(0, iteration):\n T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(num_points, 1).contiguous().view(1, num_points, 3)\n my_mat = quaternion_matrix(my_r)\n R = Variable(torch.from_numpy(my_mat[:3, :3].astype(np.float32))).cuda().view(1, 3, 3)\n my_mat[0:3, 3] = my_t\n \n new_cloud = torch.bmm((cloud - T), R).contiguous()\n pred_r, pred_t = refiner(new_cloud, emb, index)\n pred_r = pred_r.view(1, 1, -1)\n pred_r = pred_r / (torch.norm(pred_r, dim=2).view(1, 1, 1))\n my_r_2 = pred_r.view(-1).cpu().data.numpy()\n my_t_2 = pred_t.view(-1).cpu().data.numpy()\n my_mat_2 = quaternion_matrix(my_r_2)\n \n my_mat_2[0:3, 3] = my_t_2\n my_mat_final = np.dot(my_mat, my_mat_2)\n my_r_final = copy.deepcopy(my_mat_final)\n my_r_final[0:3, 3] = 0\n my_r_final = quaternion_from_matrix(my_r_final, True)\n my_t_final = np.array([my_mat_final[0][3], my_mat_final[1][3], my_mat_final[2][3]])\n \n my_pred = np.append(my_r_final, my_t_final)\n my_r = my_r_final\n my_t = my_t_final\n \n print(\"pred_c_max = \" + str(how_max[0])) \n print(\"translation vector = \" + str(my_t))\n print(\"rotation vector = \" + str(my_r))\n \n my_r_matrix = quaternion_matrix(my_r)[:3, :3]\n\n #projected_points, _ = cv2.projectPoints(model_points, my_r_matrix, my_t, cam_mat, distort)\n #projected_points = projected_points.reshape((500,2))\n \n \n new_frame = posenet_deploy.get_3d_bbox(rgb, model_points, my_r_matrix, my_t)\n new_frame = posenet_deploy.draw_axes(new_frame, my_r_matrix, my_t)\n \n #pcd = posenet_deploy.drawpointcloud(rgb, projected_points)\n #cv2.imshow(\"Point cloud\", pcd)\n #cv2.waitKey(1)\n \n #video_writer.write(new_frame)\n cv2.imshow('Result', new_frame)\n cv2.waitKey(1)\n \n else:\n # Show images\n #video_writer.write(rgb)\n cv2.imshow('Result', rgb)\n cv2.waitKey(1)\n \n \n \nfinally:\n # Stop streaming\n pipeline.stop()\n\n","sub_path":"tools/size_estimation.py","file_name":"size_estimation.py","file_ext":"py","file_size_in_byte":8986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30270530","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\n\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\n\r\nimageSize = 28\r\nlabelSize = 10\r\nhiddenSize = 1024\r\n\r\nmnist = input_data.read_data_sets(\"resources/MNIST_Data/\", one_hot=False)\r\n\r\ndef format(dataset):\r\n features = dataset.images\r\n labels = dataset.labels.astype(np.int32)\r\n return features, labels\r\n\r\nfeatureColumns = [tf.contrib.layers.real_valued_column(\"\", dimension=imageSize * imageSize)]\r\nclassifier = tf.contrib.learn.DNNClassifier(\r\n feature_columns=featureColumns,\r\n hidden_units=[hiddenSize], #more layers can be added here\r\n n_classes=labelSize,\r\n optimizer=tf.train.AdamOptimizer()) #not gradient descent this time\r\n\r\n#Fit model\r\nfeatures, labels = format(mnist.train)\r\nclassifier.fit(x=features, y=labels, batch_size=100, steps=1000) \r\n#one line for all our previous code w/ more complex neural network!\r\n\r\n#Test accuracy\r\nfeatures, labels = format(mnist.train)\r\ntestAccuracy = classifier.evaluate(x=features, y=labels, batch_size=100, steps=1000)['accuracy']\r\nprint(\"Test accuracy: %g %%\"%(testAccuracy * 100))\r\n\r\n#Evaluate model\r\nfeatures = mnist.validation.images[:10]\r\nlabels = mnist.validation.labels[:10].astype(np.int32)\r\npredictions = classifier.predict(x=features)\r\nprint(\"Predicted labels from validation set: %s\"%list(predictions))\r\nprint(\"Underlying values: %s\"%list(labels))","sub_path":"3-estimators/end/estimators.py","file_name":"estimators.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453139875","text":"import sys, os\n# ONLY USE IN GEOPROCESSING SERVICE\n\n# Ruta del servicio\nsys.path.insert(0, r'D:\\\\aplicaciones\\\\geoproceso\\\\consultatematica')\npath = os.path.dirname(os.path.dirname(__file__))\nsys.path.append(path)\narcpy.AddMessage(path)\n\nimport main\n\nif __name__ == \"__main__\":\n jsonClip = arcpy.GetParameterAsText(0) # json suelto para cortar la entidad\n queryEntity = arcpy.GetParameterAsText(1) # Codigo de la entidad a cortar\n queryClip = arcpy.GetParameterAsText(2) # Consulta especifica de la entidad a cortar\n poo = main.ConsultaTematica(jsonClip, queryEntity, queryClip)\n poo.main()","sub_path":"service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196346880","text":"import boto3\n\n# Since we don't really want to involve the file system we can do it all in memory\n# Using StringIO, we can deal with files that are totally in memory and never put on the file system\nimport StringIO\n\n# Python module to handle unzipping the files\nimport zipfile\n\nimport mimetypes\n\n# Create a StringIO object to hold the zip file in memory\nportfolio_zip = StringIO.StringIO()\n\ns3 = boto3.resource('s3') # creates an S3 resource stored in s3 variable\n\n# Get a reference to build files bucket in S3\nbuild_bucket = s3.Bucket('portfoliobuild.rhart.info')\n\n# Get a reference to a portfolio bucket in S3\nportfolio_bucket = s3.Bucket('portfolio.rhart.info')\n\n# By using download_fileobj, we will save portfoliobuild.zip in memory and portfolio_zip will be our reference\nbuild_bucket.download_fileobj('portfoliobuild.zip', portfolio_zip)\n\nwith zipfile.ZipFile(portfolio_zip) as myzip:\n for name in myzip.namelist():\n # open each file in the zip\n obj = myzip.open(name)\n\n # upload each file by passing the file object and the name of the file\n # We are also using mimetypes module to guess the type of file being uploaded. This is necessary\n # because when using boto3, S3 doesn't try to guess the file type like it does when manually uploading files.\n portfolio_bucket.upload_fileobj(obj, name, ExtraArgs={'ContentType': mimetypes.guess_type(name)[0]})\n\n # Must make each file public or else it won't be accessible in S3\n portfolio_bucket.Object(name).Acl().put(ACL='public-read')\n","sub_path":"upload-portfolio-lambda.py","file_name":"upload-portfolio-lambda.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236434429","text":"import time\r\nimport pyupbit\r\nimport datetime\r\nimport requests\r\n\r\naccess = \"mRqsuThtvvMM1zjixsM8oTRi3Z4AglepTlXkheje\"\r\nsecret = \"gTy5pvLJfNVsK2fpDMHIQIzYtifpKEhbczLmJmyT\"\r\nmyToken = \"xoxb-1992218678086-1998997054034-smkHJ0NLCOqLj69z5TbmHeFW\"\r\n\r\ndef post_message(token, channel, text):\r\n \"\"\"슬랙 메시지 전송\"\"\"\r\n response = requests.post(\"https://slack.com/api/chat.postMessage\",\r\n headers={\"Authorization\": \"Bearer \"+token},\r\n data={\"channel\": channel,\"text\": text}\r\n )\r\n\r\ndef get_target_price(ticker, k):\r\n \"\"\"변동성 돌파 전략으로 매수 목표가 조회\"\"\"\r\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=2)\r\n target_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k\r\n return target_price\r\n\r\ndef get_start_time(ticker):\r\n \"\"\"시작 시간 조회\"\"\"\r\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=1)\r\n start_time = df.index[0]\r\n return start_time\r\n\r\ndef get_yesterday_price(ticker):\r\n \"\"\"전날 마감가\"\"\"\r\n df = pyupbit.get_ohlcv(ticker, interval=\"day\", count=2)\r\n yesterday_price = df.iloc[0]['close']\r\n return yesterday_price\r\n\r\ndef get_balance(coin):\r\n \"\"\"잔고 조회\"\"\"\r\n balances = upbit.get_balances()\r\n for b in balances:\r\n if b['currency'] == coin:\r\n if b['balance'] is not None:\r\n return float(b['balance'])\r\n else:\r\n return 0\r\n\r\ndef get_current_price(ticker):\r\n \"\"\"현재가 조회\"\"\"\r\n return pyupbit.get_orderbook(tickers=ticker)[0][\"orderbook_units\"][0][\"ask_price\"]\r\n\r\ndef get_avgprice(coin):\r\n \"\"\"평균단가 조회\"\"\"\r\n avgprice = upbit.get_balances()\r\n for b in avgprice:\r\n if b['currency'] == coin:\r\n if b['avg_buy_price'] is not None:\r\n return float(b['avg_buy_price'])\r\n else:\r\n return 0\r\n\r\n# 로그인\r\nupbit = pyupbit.Upbit(access, secret)\r\nprint(\"Autotradestart\")\r\n# 시작 메세지 슬랙 전송\r\npost_message(myToken,\"#amm\", \"Autotradestart: 0916_v1\")\r\npost_message(myToken,\"#amm\", \" 비트코인-BTC // 이더리움-ETH // 비캐ABC-XEC\")\r\ntime.sleep(0.3)\r\npost_message(myToken,\"#amm\", \" 라이트-LTC // 이클-ETC // 폴카닷-DOT\")\r\ntime.sleep(0.3)\r\npost_message(myToken,\"#amm\", \" 체인링크-LINK // 에이다-ADA // 리플-XRP\")\r\ntime.sleep(0.3)\r\n\r\nwhile True:\r\n try:\r\n now = datetime.datetime.now()\r\n start_time = get_start_time(\"KRW-BTC\")\r\n end_time = start_time + datetime.timedelta(days=1)\r\n print(\"timenow\")\r\n time.sleep(1)\r\n \r\n #오전 9:00 ~ 다음날 오전 8:56 까지 \r\n if start_time + datetime.timedelta(seconds=3600) < now < end_time - datetime.timedelta(seconds=200):\r\n # 코인종류 & k 값 설정\r\n krw = get_balance(\"KRW\")\r\n\r\n target_price_btc = get_target_price(\"KRW-BTC\", 0.2)\r\n target_price_eth = get_target_price(\"KRW-ETH\", 0.3)\r\n target_price_xec = get_target_price(\"KRW-XEC\", 0.1)\r\n print (\"targetprice1\")\r\n time.sleep(0.5)\r\n target_price_ltc = get_target_price(\"KRW-LTC\", 0.4)\r\n target_price_etc = get_target_price(\"KRW-ETC\", 0.5)\r\n target_price_dot = get_target_price(\"KRW-DOT\", 0.4)\r\n print (\"targetprice2\")\r\n time.sleep(0.5)\r\n target_price_link = get_target_price(\"KRW-LINK\", 0.3)\r\n target_price_ada = get_target_price(\"KRW-ADA\", 0.4)\r\n target_price_xrp = get_target_price(\"KRW-XRP\", 0.7)\r\n print (\"targetprice3\")\r\n time.sleep(0.5)\r\n\r\n btc = get_balance(\"BTC\")\r\n btc_avg = get_avgprice(\"BTC\")\r\n eth = get_balance(\"ETH\")\r\n eth_avg = get_avgprice(\"ETH\")\r\n xec = get_balance(\"XEC\")\r\n xec_avg = get_avgprice(\"XEC\")\r\n print (\"balance1\")\r\n time.sleep(0.5)\r\n ltc = get_balance(\"LTC\")\r\n ltc_avg = get_avgprice(\"LTC\")\r\n etc = get_balance(\"ETC\")\r\n etc_avg = get_avgprice(\"ETC\")\r\n dot = get_balance(\"DOT\")\r\n dot_avg = get_avgprice(\"DOT\")\r\n print (\"balance2\")\r\n time.sleep(0.5)\r\n link = get_balance(\"LINK\")\r\n link_avg = get_avgprice(\"LINK\")\r\n ada = get_balance(\"ADA\")\r\n ada_avg = get_avgprice(\"ADA\")\r\n xrp = get_balance(\"XRP\")\r\n xrp_avg = get_avgprice(\"XRP\")\r\n print (\"balance3\")\r\n time.sleep(0.5)\r\n\r\n current_price_btc = get_current_price(\"KRW-BTC\")\r\n yesterday_price_btc = get_yesterday_price(\"KRW-BTC\")\r\n current_price_eth = get_current_price(\"KRW-ETH\")\r\n yesterday_price_eth = get_yesterday_price(\"KRW-ETH\")\r\n current_price_xec = get_current_price(\"KRW-XEC\")\r\n yesterday_price_xec = get_yesterday_price(\"KRW-XEC\")\r\n print (\"price1\")\r\n time.sleep(0.5)\r\n current_price_ltc = get_current_price(\"KRW-LTC\")\r\n yesterday_price_ltc = get_yesterday_price(\"KRW-LTC\")\r\n current_price_etc = get_current_price(\"KRW-ETC\")\r\n yesterday_price_etc = get_yesterday_price(\"KRW-ETC\")\r\n current_price_dot = get_current_price(\"KRW-DOT\")\r\n yesterday_price_dot = get_yesterday_price(\"KRW-DOT\")\r\n print (\"price2\")\r\n time.sleep(0.5)\r\n current_price_link = get_current_price(\"KRW-LINK\")\r\n yesterday_price_link = get_yesterday_price(\"KRW-LINK\")\r\n current_price_ada = get_current_price(\"KRW-ADA\")\r\n yesterday_price_ada = get_yesterday_price(\"KRW-ADA\")\r\n current_price_xrp = get_current_price(\"KRW-XRP\")\r\n yesterday_price_xrp = get_yesterday_price(\"KRW-XRP\")\r\n print (\"price3\")\r\n time.sleep(0.5)\r\n\r\n #타겟 가격돌파 & 현재 가지고있는 코인 평가금이 10,000 이하 & 어제 가격보다 10%미만 상승일 때, 200,000 매수\r\n \r\n print (\"buy 1-1\")\r\n if target_price_btc < current_price_btc and 10000 > btc*current_price_btc:\r\n if krw > 200500 and yesterday_price_btc*1.1 > current_price_btc:\r\n buy_result = upbit.buy_market_order(\"KRW-BTC\", 200000)\r\n post_message(myToken,\"#amm\", \"BTC buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 1-2\")\r\n if target_price_eth < current_price_eth and 10000 > eth*current_price_eth:\r\n if krw > 200500 and yesterday_price_eth*1.1 > current_price_eth:\r\n buy_result = upbit.buy_market_order(\"KRW-ETH\", 200000)\r\n post_message(myToken,\"#amm\", \"ETH buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 1-3\")\r\n if target_price_xec < current_price_xec and 10000 > xec*current_price_xec:\r\n if krw > 200500 and yesterday_price_xec*1.1 > current_price_xec:\r\n buy_result = upbit.buy_market_order(\"KRW-XEC\", 200000)\r\n post_message(myToken,\"#amm\", \"XEC buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 2-1\")\r\n if target_price_ltc < current_price_ltc and 10000 > ltc*current_price_ltc:\r\n if krw > 200500 and yesterday_price_ltc*1.1 > current_price_ltc:\r\n buy_result = upbit.buy_market_order(\"KRW-LTC\", 200000)\r\n post_message(myToken,\"#amm\", \"LTC buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 2-2\")\r\n if target_price_etc < current_price_etc and 10000 > etc*current_price_etc:\r\n if krw > 200500 and yesterday_price_etc*1.1 > current_price_etc:\r\n buy_result = upbit.buy_market_order(\"KRW-ETC\", 200000)\r\n post_message(myToken,\"#amm\", \"ETC buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 2-3\")\r\n if target_price_dot < current_price_dot and 10000 > dot*current_price_dot:\r\n if krw > 200500 and yesterday_price_dot*1.1 > current_price_dot:\r\n buy_result = upbit.buy_market_order(\"KRW-DOT\", 200000)\r\n post_message(myToken,\"#amm\", \"DOT buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 3-1\")\r\n if target_price_link < current_price_link and 10000 > link*current_price_link:\r\n if krw > 200500 and yesterday_price_link*1.1 > current_price_link:\r\n buy_result = upbit.buy_market_order(\"KRW-LINK\", 200000)\r\n post_message(myToken,\"#amm\", \"LINK buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 3-2\")\r\n if target_price_ada < current_price_ada and 10000 > ada*current_price_ada:\r\n if krw > 200500 and yesterday_price_ada*1.1 > current_price_ada:\r\n buy_result = upbit.buy_market_order(\"KRW-ADA\", 200000)\r\n post_message(myToken,\"#amm\", \"ADA buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"buy 3-3\")\r\n if target_price_xrp < current_price_xrp and 10000 > xrp*current_price_xrp:\r\n if krw > 200500 and yesterday_price_xrp*1.1 > current_price_xrp:\r\n buy_result = upbit.buy_market_order(\"KRW-XRP\", 200000)\r\n post_message(myToken,\"#amm\", \"XRP buy: 타겟가격 돌파_200,000\")\r\n time.sleep(0.5)\r\n\r\n #떨어진거 줍줍 한탕 노리자 (어제보다 10%~18% 떨어지면 200,000 추가 매수)\r\n \r\n print (\"gazua 1-1\")\r\n if current_price_btc < yesterday_price_btc*0.90 and current_price_btc > yesterday_price_btc*0.82 and 200000 > btc*current_price_btc:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-BTC\", 200000)\r\n post_message(myToken,\"#amm\", \"BTC buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 1-2\")\r\n if current_price_eth < yesterday_price_eth*0.90 and current_price_eth > yesterday_price_eth*0.82 and 200000 > eth*current_price_eth:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-ETH\", 200000)\r\n post_message(myToken,\"#amm\", \"ETH buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 1-3\")\r\n if current_price_xec < yesterday_price_xec*0.90 and current_price_xec > yesterday_price_xec*0.82 and 200000 > xec*current_price_xec:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-XEC\", 200000)\r\n post_message(myToken,\"#amm\", \"XEC buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 2-1\")\r\n if current_price_ltc < yesterday_price_ltc*0.90 and current_price_ltc > yesterday_price_ltc*0.82 and 200000 > ltc*current_price_ltc:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-LTC\", 200000)\r\n post_message(myToken,\"#amm\", \"LTC buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 2-2\")\r\n if current_price_etc < yesterday_price_etc*0.90 and current_price_etc > yesterday_price_etc*0.82 and 200000 > etc*current_price_etc:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-ETC\", 200000)\r\n post_message(myToken,\"#amm\", \"ETC buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 2-3\")\r\n if current_price_dot < yesterday_price_dot*0.90 and current_price_dot > yesterday_price_dot*0.82 and 200000 > dot*current_price_dot:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-DOT\", 200000)\r\n post_message(myToken,\"#amm\", \"DOT buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 3-1\")\r\n if current_price_link < yesterday_price_link*0.90 and current_price_link > yesterday_price_link*0.82 and 200000 > link*current_price_link:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-LINK\", 200000)\r\n post_message(myToken,\"#amm\", \"LINK buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 3-2\")\r\n if current_price_ada < yesterday_price_ada*0.90 and current_price_ada > yesterday_price_ada*0.82 and 200000 > ada*current_price_ada:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-ADA\", 200000)\r\n post_message(myToken,\"#amm\", \"ADA buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n print (\"gazua 3-3\")\r\n if current_price_xrp < yesterday_price_xrp*0.90 and current_price_xrp > yesterday_price_xrp*0.82 and 200000 > xrp*current_price_xrp:\r\n if krw > 200100:\r\n buy_result = upbit.buy_market_order(\"KRW-XRP\", 200000)\r\n post_message(myToken,\"#amm\", \"XRP buy: 종가-10%_200,000\")\r\n time.sleep(0.5)\r\n\r\n #타겟 가��구매 후 5%이상 하락 시, 75%팔아\r\n \r\n print (\"sonjul 1-1\") \r\n if current_price_btc < btc_avg*0.95:\r\n if btc*current_price_btc > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-BTC\", btc*0.75)\r\n post_message(myToken,\"#amm\", \"BTC sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 1-2\") \r\n if current_price_eth < eth_avg*0.95:\r\n if eth*current_price_eth > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-ETH\", eth*0.75)\r\n post_message(myToken,\"#amm\", \"ETH sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 1-3\") \r\n if current_price_xec < xec_avg*0.95:\r\n if xec*current_price_xec > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-XEC\", xec*0.75)\r\n post_message(myToken,\"#amm\", \"XEC sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 2-1\") \r\n if current_price_ltc < ltc_avg*0.95:\r\n if ltc*current_price_ltc > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-LTC\", ltc*0.75)\r\n post_message(myToken,\"#amm\", \"LTC sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 2-2\") \r\n if current_price_etc < etc_avg*0.95:\r\n if etc*current_price_etc > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-ETC\", etc*0.75)\r\n post_message(myToken,\"#amm\", \"ETC sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 2-3\") \r\n if current_price_dot < dot_avg*0.95:\r\n if dot*current_price_dot > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-DOT\", dot*0.75)\r\n post_message(myToken,\"#amm\", \"DOT sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 3-1\") \r\n if current_price_link < link_avg*0.95:\r\n if link*current_price_link > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-LINK\", link*0.75)\r\n post_message(myToken,\"#amm\", \"LINK sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 3-2\") \r\n if current_price_ada < ada_avg*0.95:\r\n if ada*current_price_ada > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-ADA\", ada*0.75)\r\n post_message(myToken,\"#amm\", \"ADA sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"sonjul 3-3\") \r\n if current_price_xrp < xrp_avg*0.95:\r\n if xrp*current_price_xrp > 160000:\r\n sell_result = upbit.sell_market_order(\"KRW-XRP\", xrp*0.75)\r\n post_message(myToken,\"#amm\", \"XRP sell: 손실 관리_75% 매도\")\r\n time.sleep(0.5)\r\n\r\n \r\n #어제 가격보다 20%이상 하락 시, 다팔아\r\n \r\n print (\"mang 1-1\") \r\n if current_price_btc < yesterday_price_btc*0.8:\r\n if btc*current_price_btc > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-BTC\", btc*0.9995)\r\n post_message(myToken,\"#amm\", \"BTC sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 1-2\") \r\n if current_price_eth < yesterday_price_eth*0.8:\r\n if eth*current_price_eth > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-ETH\", eth*0.9995)\r\n post_message(myToken,\"#amm\", \"ETH sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 1-3\") \r\n if current_price_xec < yesterday_price_xec*0.8:\r\n if xec*current_price_xec > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-XEC\", xec*0.9995)\r\n post_message(myToken,\"#amm\", \"XEC sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 2-1\") \r\n if current_price_ltc < yesterday_price_ltc*0.8:\r\n if ltc*current_price_ltc > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-LTC\", ltc*0.9995)\r\n post_message(myToken,\"#amm\", \"LTC sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 2-2\") \r\n if current_price_etc < yesterday_price_etc*0.8:\r\n if etc*current_price_etc > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-ETC\", etc*0.9995)\r\n post_message(myToken,\"#amm\", \"ETC sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 2-3\") \r\n if current_price_dot < yesterday_price_dot*0.8:\r\n if dot*current_price_dot > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-DOT\", dot*0.9995)\r\n post_message(myToken,\"#amm\", \"DOT sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 3-1\") \r\n if current_price_link < yesterday_price_link*0.8:\r\n if link*current_price_link > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-LINK\", link*0.9995)\r\n post_message(myToken,\"#amm\", \"LINK sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 3-2\") \r\n if current_price_ada < yesterday_price_ada*0.8:\r\n if ada*current_price_ada > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-ADA\", ada*0.9995)\r\n post_message(myToken,\"#amm\", \"ADA sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n print (\"mang 3-3\") \r\n if current_price_xrp < yesterday_price_xrp*0.8:\r\n if xrp*current_price_xrp > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-XRP\", xrp*0.9995)\r\n post_message(myToken,\"#amm\", \"XRP sell: 종가-20%_전체매도\")\r\n time.sleep(0.5)\r\n\r\n \r\n print(\"done\")\r\n\r\n \r\n #장마감 - 가진거 다팔아\r\n else:\r\n krw = get_balance(\"KRW\")\r\n if start_time < now < end_time + datetime.timedelta(seconds=3300):\r\n print(\"GOOD MORNING\")\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-BTC\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-ETH\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-XEC\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-LTC\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-ETC\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-DOT\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-LINK\", 100000)\r\n time.sleep(0.5)\r\n if krw > 100000:\r\n buy_result = upbit.buy_market_order(\"KRW-ADA\", 100000)\r\n time.sleep(0.5)\r\n\r\n else: \r\n\r\n btc = get_balance(\"BTC\")\r\n current_price_btc = get_current_price(\"KRW-BTC\")\r\n time.sleep(0.5)\r\n if btc*current_price_btc > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-BTC\", btc*0.9995)\r\n post_message(myToken,\"#amm\", \"BTC sell: 장마감\")\r\n\r\n eth = get_balance(\"ETH\")\r\n current_price_eth = get_current_price(\"KRW-ETH\")\r\n time.sleep(0.5)\r\n if eth*current_price_eth > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-ETH\", eth*0.9995)\r\n post_message(myToken,\"#amm\", \"ETH sell: 장마감\")\r\n\r\n xec = get_balance(\"XEC\")\r\n current_price_xec = get_current_price(\"KRW-XEC\")\r\n time.sleep(0.5)\r\n if xec*current_price_xec > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-XEC\", xec*0.9995)\r\n post_message(myToken,\"#amm\", \"XEC sell: 장마감\")\r\n\r\n ltc = get_balance(\"LTC\")\r\n current_price_ltc = get_current_price(\"KRW-LTC\")\r\n time.sleep(0.5)\r\n if ltc*current_price_ltc > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-LTC\", ltc*0.9995)\r\n post_message(myToken,\"#amm\", \"LTC sell: 장마감\")\r\n\r\n etc = get_balance(\"ETC\")\r\n current_price_etc = get_current_price(\"KRW-ETC\")\r\n time.sleep(0.5)\r\n if etc*current_price_etc > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-ETC\", etc*0.9995)\r\n post_message(myToken,\"#amm\", \"ETC sell: 장마감\")\r\n\r\n dot = get_balance(\"DOT\")\r\n current_price_dot = get_current_price(\"KRW-DOT\")\r\n time.sleep(0.5)\r\n if dot*current_price_dot > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-DOT\", dot*0.9995)\r\n post_message(myToken,\"#amm\", \"DOT sell: 장마감\")\r\n\r\n link = get_balance(\"LINK\")\r\n current_price_link = get_current_price(\"KRW-LINK\")\r\n time.sleep(0.5)\r\n if link*current_price_link > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-LINK\", link*0.9995)\r\n post_message(myToken,\"#amm\", \"LINK sell: 장마감\")\r\n\r\n ada = get_balance(\"ADA\")\r\n current_price_ada = get_current_price(\"KRW-ADA\")\r\n time.sleep(0.5)\r\n if ada*current_price_ada > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-ADA\", ada*0.9995)\r\n post_message(myToken,\"#amm\", \"ADA sell: 장마감\")\r\n\r\n xrp = get_balance(\"XRP\")\r\n current_price_xrp = get_current_price(\"KRW-XRP\")\r\n time.sleep(0.5)\r\n if xrp*current_price_xrp > 5000:\r\n sell_result = upbit.sell_market_order(\"KRW-XRP\", xrp*0.9995)\r\n post_message(myToken,\"#amm\", \"XRP sell: 장마감\")\r\n\r\n time.sleep(1)\r\n\r\n #에러메세지 출력\r\n except Exception as e:\r\n print(e)\r\n post_message(myToken,\"#amm\", e)\r\n time.sleep(1)\r\n\r\n","sub_path":"amm_1006.py","file_name":"amm_1006.py","file_ext":"py","file_size_in_byte":24651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72597962","text":"import sys\nimport math\n\ndef Sequence(C):\n New=[]\n idx=0\n\n\n for i in C:\n\n if len(New)<2:\n\n New.append(1)\n New.append(int(i))\n else:\n if New[-1]==i:\n New[-2]+=1\n else:\n New.append(1)\n New.append(int(i))\n\n return New\n\nr = int(input())\nl = int(input())\n\nCONWAY=[]\nCONWAY.append([r])\n\nfor i in range(l-1):\n\n CONWAY.append(Sequence(CONWAY[i]))\n\n\nANS=[str(i) for i in CONWAY[-1]]\n\nANS=' '.join(ANS)\nprint(ANS)\n","sub_path":"CLASSIC PUZZLE - MEDIUM/conway-sequence.py","file_name":"conway-sequence.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155455498","text":"from notificator import Notificator, add_new_subscriber\nimport filecmp\n\nTEST_OUTFILE_PATH = \"Tests/task5_test_output.json\"\nCORRECT_OUTFILE_PATH = \"Tests/task5_test_CorrectOutput.json\"\nAPP_URL = \"http://127.0.0.1:5000/\"\n\nCONFIRMED_ROUTE = [{\"route\": [{\"lat\": 23.54545, \"lon\": 24.44234},],\n \"date\": \"2021-01-14\", \"country\": \"TY\"},\n\n {\"route\": [{\"lat\": 21.12345, \"lon\": 24.10001}],\n \"date\": \"2021-01-14\", \"country\": \"TY\"},\n\n {\"route\": [{\"lat\": 21.00000, \"lon\": 24.10000}],\n \"date\": \"2021-01-14\", \"country\": \"TY\"}\n ]\n\n\nSUBSCRIBE_ROUTE = {\"route\": [{\"lat\": 23.54545, \"lon\": 24.44232},\n {\"lat\": 21.12345, \"lon\": 24.10002}],\n \"dateOfRoute\": \"2021-01-14\",\n \"country\": \"TY\",\n \"email\": \"subscriber1@notexist.com\"}\n\n\n\ndef add_and_track_infections():\n \"\"\"Make sure no TY.json nor outfile exists\"\"\"\n add_new_subscriber(SUBSCRIBE_ROUTE)\n n = Notificator(TEST_OUTFILE_PATH)\n n.check_infection_from_given_data(CONFIRMED_ROUTE)\n assert(filecmp.cmp(TEST_OUTFILE_PATH, CORRECT_OUTFILE_PATH))\n\n\n\nif __name__ == '__main__':\n add_and_track_infections()\n\n\n\n\n\n\ncd","sub_path":"Tests/notificator_test.py","file_name":"notificator_test.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419033298","text":"from flask import render_template, url_for, redirect, request, flash\nfrom flask_login import current_user\nfrom .models import Category, Task, Priority\nfrom app.auth.models import User\nfrom app.models import Binnacle\nfrom flask_login import login_required, current_user\nfrom app import login_manager, db\nimport json\n# from .forms import PostForm\n\n# from auth import g\nfrom . import public_bp\n\n# @public_bp.before_request\n# def before_request():\n# g.user = current_user\n# # print(current_user.username)\n\n@public_bp.route('/create/task/', methods=['GET', 'POST'])\n@login_required\ndef create_task():\n context = {\n \"categories\": Category.get_all(),\n \"priorities\": Priority.get_all(),\n \"flag\": False\n }\n message = None\n if request.method == 'POST':\n title = request.form['title-task']\n description = request.form['description-task']\n date_start = request.form['date-start-task']\n date_end = request.form['date-end-task']\n hour_task = request.form['hour-task']\n category = request.form['category-task']\n priority = request.form['priority-task']\n # Get the category and priority, to get their id's:\n category = Category.get_by_name(category)\n # print(category.id)\n priority = Priority.get_by_name(priority)\n # print(\"priority {0}\".format(priority.id))\n try:\n # To add a new task:\n task = Task(title=title, description=description, date_create=date_start, date_todo=date_end, hour_todo=hour_task, done=False, \\\n id_cate=category.id, id_prio=priority.id, id_user=current_user.id)\n task.save()\n message = 'The task has been saved successfully'\n flash(message, 'success')\n return redirect(url_for('public.index'))\n except:\n flash('Has ocurred a internal error, please retry again.', 'error')\n return redirect(url_for('public.create_task'))\n return render_template(\"public/create_task.html\", **context)\n\n\n@public_bp.route('/')\n@login_required\ndef index():\n tasks = Task.query.join(User.usucat).join(Category).join(Priority).filter(User.id == current_user.id).order_by(Task.done).all()\n context = {\n \"tasks\": tasks\n }\n # for item in tasks: print(item.priority.name_priority)\n # for item in tasks: print(item.done)\n return render_template(\"public/index.html\", **context)\n\n@public_bp.route('/edit/task///', methods=['GET', 'POST'])\n@public_bp.route('/edit/task//', methods=['GET', 'POST'])\ndef edit_task(id_task=None, flag=None):\n task = Task.get_by_id(id_task)\n context = {\n \"task\": task,\n \"categories\": Category.get_all(),\n \"priorities\": Priority.get_all(),\n \"flag\": True\n }\n if flag:\n try:\n task.title = request.form['title-task']\n task.description = request.form['description-task']\n task.date_create = request.form['date-start-task']\n task.date_todo = request.form['date-end-task']\n task.hour_todo = request.form['hour-task']\n task.done = False\n category = request.form['category-task']\n category = Category.get_by_name(category)\n task.id_cate = category.id\n priority = request.form['priority-task']\n priority = Priority.get_by_name(priority)\n task.id_prio = priority.id\n task.id_user = current_user.id\n db.session.add(task)\n db.session.commit()\n flash('The task has beed modify successfully', 'success')\n except:\n flash(\"The task don't has beed modify successfully\", 'error')\n return redirect(url_for('public.index'))\n return render_template('public/create_task.html', **context)\n\n@public_bp.route('/delete/task//')\ndef delete_task(id_task):\n task = Task.get_by_id(id_task)\n if task is not None:\n # To call the function delete_done_task.\n insert_binnacle(id_task)\n\n db.session.delete(task)\n db.session.commit()\n flash(\"The task is deleted\", \"success\")\n else:\n flash(\"Has been an error...\", \"error\")\n return redirect(url_for('public.index'))\n\n@public_bp.route('/done/task//')\ndef done_task(id_task):\n task = Task.get_by_id(id_task)\n # To call the function delete_done_task.\n insert_binnacle(id_task)\n # Delete from task:\n db.session.delete(task)\n db.session.commit()\n flash(\"The task has been marcked as done.\", \"success\")\n return redirect(url_for(\"public.index\"))\n\ndef insert_binnacle(id_task):\n task = Task.get_by_id(id_task)\n binnacle = Binnacle(id_user=current_user.id, title=task.title, description=task.description, date_create=task.date_create, date_todo=task.date_todo, status=True)\n binnacle.save()\n\n@public_bp.route('/task/history/')\ndef task_history():\n return render_template('public/task_history.html')\n\n@public_bp.route('/user/account/', methods=['GET', 'POST'])\n@login_required\ndef account_user():\n return render_template(\"public/user_account.html\")","sub_path":"app/public/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404039493","text":"import json\nimport collections\nimport re\nimport numpy as np\nimport logging\nimport sys\nimport scrapy\nfrom scrapy_splash import SplashRequest\nfrom scrapy.exceptions import CloseSpider\nfrom airbnb_scrape.items import AirbnbScraperItem\nfrom inline_requests import inline_requests\n\n# ********************************************************************************************\n# Important: Run -> docker run -p 8050:8050 scrapinghub/splash in background before crawling *\n# ********************************************************************************************\n\n\n# *********************************************************************************************\n# Run crawler with -> scrapy crawl airbnb -o 21to25.json -a price_lb='' -a price_ub='' *\n# *********************************************************************************************\n\nclass AirbnbSpider(scrapy.Spider):\n name = 'airbnb'\n allowed_domains = ['www.airbnb.com']\n key = 'd306zoyjsyarp7ifhu67rjxn52tv0t20'\n client_session_id = '81160313-ce62-41de-b1af-4622241be24e'\n\n def __init__(self, min='', *args, **kwargs):\n super(AirbnbSpider, self).__init__(*args, **kwargs)\n self.min = int(min)\n\n review_url = 'https://www.airbnb.com/api/v2/homes_pdp_reviews?currency=USD' \\\n '&key={0}' \\\n '&locale=en' \\\n '&listing_id={1}' \\\n '&_format=for_p3&limit=700&offset=0&order=language_country'\n\n house_url = 'https://www.airbnb.com/api/v2/explore_tabs?_format=for_explore_search_web' \\\n '&auto_ib=false' \\\n '&client_session_id={1}' \\\n '¤cy=USD¤t_tab_id=home_tab' \\\n '&experiences_per_grid=20&fetch_filters=true&guidebooks_per_grid=20&has_zero_guest_treatment=true&hide_dates_and_guests_filters=false&is_guided_search=true&is_new_cards_experiment=true&is_standard_search=true' \\\n '&items_per_grid=18' \\\n '&key={0}' \\\n '&locale=en' \\\n '&metadata_only=false' \\\n '&query=New%20York' \\\n '&query_understanding_enabled=true&refinement_paths%5B%5D=%2Fhomes&satori_version=1.1.7&screen_height=739&screen_size=medium&screen_width=900&search_type=section_navigation&selected_tab_id=home_tab&show_groupings=true&supports_for_you_v3=true' \\\n '&timezone_offset=-300&version=1.6.5'\n\n def start_requests(self):\n self.house_url = self.house_url + '&price_min={2}&price_max={3}'\n self.max = self.min + 19\n new_url = self.house_url.format(self.key, self.client_session_id, self.min, self.max)\n yield scrapy.Request(url=new_url, callback=self.parse_id, dont_filter=False)\n\n @inline_requests\n def parse_id(self, response):\n # print('parsing id')\n data = json.loads(response.body)\n homes = data.get('explore_tabs')[0].get('sections')[0].get('listings')\n pagination_metadata = data.get('explore_tabs')[0].get('pagination_metadata')\n if homes is None:\n try:\n homes = data.get('explore_tabs')[0].get('sections')[1].get('listings')\n except IndexError:\n try:\n homes = data.get('explore_tabs')[0].get('sections')[2].get('listings')\n except:\n raise CloseSpider(\"No homes available in the city and price parameters\")\n data_dict = collections.defaultdict(dict)\n base_url = 'https://www.airbnb.com/rooms/'\n for home in homes:\n # room_id = str(home.get('listing').get('id'))\n # url = base_url + str(home.get('listing').get('id'))\n # data_dict[room_id]['url'] = url\n # data_dict[room_id]['listing_id'] = room_id\n # data_dict[room_id]['price'] = home.get('pricing_quote').get('rate').get('amount')\n # data_dict[room_id]['bathrooms'] = home.get('listing').get('bathrooms')\n # data_dict[room_id]['bedrooms'] = home.get('listing').get('bedrooms')\n # data_dict[room_id]['host_languages'] = home.get('listing').get('host_languages')\n # data_dict[room_id]['is_business_travel_ready'] = home.get('listing').get('is_business_travel_ready')\n # data_dict[room_id]['is_fully_refundable'] = home.get('listing').get('is_fully_refundable')\n # data_dict[room_id]['is_new_listing'] = home.get('listing').get('is_new_listing')\n # data_dict[room_id]['is_superhost'] = home.get('listing').get('is_superhost')\n # data_dict[room_id]['lat'] = home.get('listing').get('lat')\n # data_dict[room_id]['lng'] = home.get('listing').get('lng')\n # data_dict[room_id]['localized_city'] = home.get('listing').get('localized_city')\n # data_dict[room_id]['localized_neighborhood'] = home.get('listing').get('localized_neighborhood')\n # data_dict[room_id]['listing_name'] = home.get('listing').get('name')\n # data_dict[room_id]['person_capacity'] = home.get('listing').get('person_capacity')\n # data_dict[room_id]['picture_count'] = home.get('listing').get('picture_count')\n # data_dict[room_id]['reviews_count'] = home.get('listing').get('reviews_count')\n # data_dict[room_id]['room_type_category'] = home.get('listing').get('room_type_category')\n # data_dict[room_id]['star_rating'] = home.get('listing').get('star_rating')\n # data_dict[room_id]['host_id'] = home.get('listing').get('user').get('id')\n # data_dict[room_id]['avg_rating'] = home.get('listing').get('avg_rating')\n # data_dict[room_id]['can_instant_book'] = home.get('pricing_quote').get('can_instant_book')\n # data_dict[room_id]['monthly_price_factor'] = home.get('pricing_quote').get('monthly_price_factor')\n # data_dict[room_id]['currency'] = home.get('pricing_quote').get('rate').get('currency')\n # data_dict[room_id]['amt_w_service'] = home.get('pricing_quote').get('rate_with_service_fee').get('amount')\n # data_dict[room_id]['rate_type'] = home.get('pricing_quote').get('rate_type')\n # data_dict[room_id]['weekly_price_factor'] = home.get('pricing_quote').get('weekly_price_factor')\n # data_dict[room_id]['min_nights'] = home.get('listing').get('min_nights')\n # data_dict[room_id]['max_nights'] = home.get('listing').get('max_nights')\n listing = AirbnbScraperItem()\n room_id = str(home.get('listing').get('id'))\n url = base_url + str(home.get('listing').get('id'))\n listing['listing_id'] = room_id\n listing['url'] = url\n listing['price'] = home.get('pricing_quote').get('rate').get('amount')\n listing['bathrooms'] = home.get('listing').get('bathrooms')\n listing['bedrooms'] = home.get('listing').get('bedrooms')\n listing['is_superhost'] = home.get('listing').get('is_superhost')\n listing['lat'] = home.get('listing').get('lat')\n listing['lng'] = home.get('listing').get('lng')\n listing['localized_city'] = home.get('listing').get('localized_city')\n listing['listing_name'] = home.get('listing').get('name')\n listing['person_capacity'] = home.get('listing').get('person_capacity')\n listing['reviews_count'] = home.get('listing').get('reviews_count')\n listing['room_type_category'] = home.get('listing').get('room_type_category')\n listing['can_instant_book'] = home.get('pricing_quote').get('can_instant_book')\n listing['min_nights'] = home.get('listing').get('min_nights')\n listing['max_nights'] = home.get('listing').get('max_nights')\n new_reiews_url = self.review_url.format(self.key, room_id)\n resp = yield scrapy.Request(url=new_reiews_url)\n data = json.loads(resp.body)\n reviews = data.get('reviews')\n last = reviews[0].get('created_at')[0:10]\n first = reviews[-1].get('created_at')[0:10]\n diff = (int(last[0:4]) - int(first[0:4])) * 12 + int(last[5:7]) - int(first[5:7])\n listing['months'] = diff\n yield listing\n # After scraping entire listings page, check if more pages\n # for room_id in data_dict:\n # yield SplashRequest(url=base_url+room_id, callback=self.parse_details,\n # meta=data_dict.get(room_id),\n # endpoint=\"render.html\",\n # args={'wait': '0.5'})\n\n if pagination_metadata.get('has_next_page'):\n items_offset = pagination_metadata.get('items_offset')\n section_offset = pagination_metadata.get('section_offset')\n new_url = self.house_url.format(self.key, self.client_session_id, self.min, self.max) + \\\n '&items_offset={0}§ion_offset={1}'.format(items_offset, section_offset)\n print('next page')\n yield scrapy.Request(url=new_url, callback=self.parse_id)\n\n @inline_requests\n def parse_details(self, response):\n listing = AirbnbScraperItem()\n listing_id = response.meta['listing_id']\n new_reiews_url = self.review_url.format(self.key, listing_id)\n listing['listing_id'] = listing_id\n listing['is_superhost'] = response.meta['is_superhost']\n listing['price'] = response.meta['price']\n listing['url'] = response.meta['url']\n listing['bathrooms'] = response.meta['bathrooms']\n listing['bedrooms'] = response.meta['bedrooms']\n listing['lat'] = response.meta['lat']\n listing['lng'] = response.meta['lng']\n listing['localized_city'] = response.meta['localized_city']\n listing['listing_name'] = response.meta['listing_name']\n listing['person_capacity'] = response.meta['person_capacity']\n listing['reviews_count'] = response.meta['reviews_count']\n listing['room_type_category'] = response.meta['room_type_category']\n listing['can_instant_book'] = response.meta['can_instant_book']\n listing['min_nights'] = response.meta['min_nights']\n listing['max_nights'] = response.meta['max_nights']\n # try:\n # listing['num_beds'] = int((re.search('\"bed_label\":\"(.).*\",\"bedroom_label\"', response.text)).group(1))\n # except:\n # listing['num_beds'] = 0\n resp = yield scrapy.Request(url=new_reiews_url)\n data = json.loads(resp.body)\n reviews = data.get('reviews')\n last = reviews[0].get('created_at')[0:10]\n first = reviews[-1].get('created_at')[0:10]\n diff = (int(last[0:4]) - int(first[0:4])) * 12 + int(last[5:7]) - int(first[5:7])\n listing['months'] = diff\n yield listing\n","sub_path":"airbnb_scrape/airbnb_scrape/spiders/airbnb.py","file_name":"airbnb.py","file_ext":"py","file_size_in_byte":10781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350333616","text":"import pygame, sys, random\r\nimport neat\r\n\r\n#initialize pygame to use it's functions\r\npygame.init()\r\nclock=pygame.time.Clock()\r\n\r\n#create a window where game will Run\r\nscreen = pygame.display.set_mode((400,400))\r\n#title \r\npygame.display.set_caption(\"Catch the Ball\")\r\n\r\n#load images\r\nbackground_image = pygame.image.load(\"bg.png\").convert()\r\nplr_img = pygame.image.load(\"player.png\").convert_alpha()\r\nplr_img=pygame.transform.smoothscale(plr_img,(60,90))\r\nball_image = pygame.image.load(\"ball.png\").convert_alpha()\r\nball_image=pygame.transform.smoothscale(ball_image,(40,40))\r\nover_img=pygame.image.load(\"over.png\").convert_alpha()\r\nover_img=pygame.transform.smoothscale(over_img,(200,100))\r\n\r\n#creating objects of game\r\nball=pygame.Rect(200,0,40,40)\r\nplayer=pygame.Rect(100,310,60,90)\r\n\r\ngeneration=0\r\n\r\ncount=0\r\n\r\ndef eval_genomes(genomes, config): #######\r\n\r\n global generation\r\n\r\n generation+=1\r\n\r\n for gid,genome in genomes: #####\r\n\r\n count = 0\r\n\r\n count_font=pygame.font.Font('freesansbold.ttf', 20)\r\n\r\n genome.fitness = 0\r\n\r\n speed=0\r\n \r\n while True:\r\n\r\n genome.fitness+=0.1\r\n\r\n screen.blit(background_image,[0,0])\r\n #event loop to check which key is print\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n #use events to move the player\r\n if event.type==pygame.KEYDOWN:\r\n if event.key==pygame.K_RIGHT:\r\n speed=5\r\n if event.key==pygame.K_LEFT:\r\n speed=-5\r\n\r\n player.x=player.x+speed\r\n\r\n #code for ball falling\r\n ball.y=ball.y+5\r\n \r\n if(ball.colliderect(player)):\r\n ball.y=0\r\n ball.x=random.randint(0, 360)\r\n genome.fitness+=1\r\n count+=1\r\n if(ball.y>400):\r\n ball.y=0\r\n ball.x=random.randint(0, 360)\r\n screen.blit(over_img,[100,100])\r\n genome.fitness+=1\r\n break\r\n\r\n screen.blit(plr_img,player)\r\n screen.blit(ball_image,ball)\r\n\r\n count_text=count_font.render(\"Count:\"+str(count)+\" Gen:\"+str(generation), False, (255,255,0)) #####\r\n screen.blit(count_text,[10,10])\r\n\r\n pygame.display.flip()\r\n clock.tick(30)\r\n\r\nconfig = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,neat.DefaultSpeciesSet, neat.DefaultStagnation,'config-feedforward.txt') \r\np = neat.Population(config)\r\nwinner = p.run(eval_genomes,7)\r\n","sub_path":"G12_C14/Sol_C14_Project.py","file_name":"Sol_C14_Project.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"94050351","text":"from django.contrib import messages\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.edit import CreateView\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models import Q\nfrom django.contrib.auth import get_user_model\nfrom django.http import Http404\n\nfrom guardian.shortcuts import get_objects_for_user\n\nfrom bg_inventory.models import Dish, Outlet, Table, Review, Note, Category\nfrom bg_order.models import Meal, Request\n\nfrom bg_inventory.forms import DishCreateForm\nfrom utils import send_socketio_message, today_limit\n\nUser = get_user_model()\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\nclass MainView(TemplateView):\n template_name = \"bg_order/main.html\"\n\n def get_context_data(self, **kwargs):\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n if (outlets.count() == 0):\n raise PermissionDenied\n context = super(MainView, self).get_context_data(**kwargs)\n meals = Meal.objects\\\n .prefetch_related('diner', 'orders', 'table')\\\n .filter(table__outlet__in=outlets)\\\n .filter(Q(status=Meal.ACTIVE) | Q(status=Meal.ASK_BILL))\n requests = Request.objects\\\n .prefetch_related('diner', 'table')\\\n .filter(table__outlet__in=outlets)\\\n .filter(is_active=True)\n cards = list(meals) + list(requests)\n context[\"cards\"] = sorted(cards,\n key=lambda card: card.count_down_start)\n return context\n\n\nclass HistoryView(TemplateView):\n template_name = \"bg_order/history.html\"\n model = Meal\n\n def get_context_data(self, **kwargs):\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n if (outlets.count() == 0):\n raise PermissionDenied\n limit = today_limit()\n context = super(HistoryView, self).get_context_data(**kwargs)\n context['meal_cards'] = Meal.objects\\\n .prefetch_related('diner', 'diner__meals', 'table')\\\n .filter(table__outlet__in=outlets)\\\n .filter(created__lte=limit[1], created__gte=limit[0])\\\n .filter(status=Meal.INACTIVE).filter(is_paid=True)\n context['requests_cards'] = Request.objects\\\n .prefetch_related('diner', 'diner__meals', 'table')\\\n .filter(table__outlet__in=outlets)\\\n .filter(created__lte=limit[1], created__gte=limit[0])\\\n .filter(is_active=False)\n return context\n\n\nclass MenuView(ListView):\n model = Dish\n template_name = \"bg_inventory/menu.html\"\n\n def get_queryset(self):\n #filter queryset based on user's permitted outlet\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n if (outlets.count() == 0):\n raise PermissionDenied\n return super(MenuView, self).get_queryset()\\\n .prefetch_related('outlet', 'categories')\\\n .filter(outlet__in=outlets)\n\n def get_context_data(self, **kwargs):\n context = super(MenuView, self).get_context_data(**kwargs)\n context['categories'] = Category.objects.all()\n return context\n\n def get(self, request, *args, **kwargs):\n result = super(MenuView, self).get(request, *args, **kwargs)\n return result\n\n\nclass MenuAddView(CreateView):\n form_class = DishCreateForm\n template_name = \"bg_inventory/dish_form.html\"\n success_url = \"/staff/menu/\"\n\n def get(self, request, *args, **kwargs):\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n if (outlets.count() == 0):\n raise PermissionDenied\n req = super(MenuAddView, self).get(request, *args, **kwargs)\n req.context_data['form']['outlet'].field.initial = outlets[0]\n return req\n\n def post(self, request, *args, **kwargs):\n result = super(MenuAddView, self).post(request, *args, **kwargs)\n messages.success(self.request, 'Dish added')\n send_socketio_message(\n request.user.outlet_ids,\n ['refresh', 'menu', 'add'])\n return result\n\n\nclass TableView(ListView):\n model = Table\n template_name = \"bg_order/tables.html\"\n\n def get_queryset(self):\n #filter queryset based on user's permitted outlet\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n return super(TableView, self).get_queryset()\\\n .prefetch_related('meals__diner',\n 'meals__diner__meals',\n 'meals', 'meals__orders')\\\n .filter(outlet__in=outlets)\n\n\nclass UserView(TemplateView):\n template_name = \"bg_order/user.html\"\n\n def get_context_data(self, **kwargs):\n context = super(UserView, self).get_context_data(**kwargs)\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n try:\n diner = User.objects.prefetch_related(\n 'meals', 'meals__orders', 'meals__orders__dish',\n 'profile', 'notes').get(pk=self.kwargs['pk'])\n except User.DoesNotExist:\n raise Http404\n\n context['diner'] = diner\n context['reviews'] = Review.objects.filter(\n user=diner,\n outlet__in=outlets\n ).all()\n context['notes'] = Note.objects.filter(\n user=diner,\n outlet__in=outlets).all()\n return context\n\n\nclass ReportView(ListView):\n model = Meal\n template_name = \"bg_order/report.html\"\n\n def get_queryset(self):\n #filter queryset based on user's permitted outlet\n outlets = get_objects_for_user(\n self.request.user,\n \"change_outlet\",\n Outlet.objects.all()\n )\n if (outlets.count() == 0):\n raise PermissionDenied\n return super(ReportView, self).get_queryset()\\\n .prefetch_related('diner', 'orders', 'orders__dish', 'table')\\\n .filter(table__outlet__in=outlets, is_paid=True)\n","sub_path":"src/diner/BigSpoonDiner/src/backend/bigspoon/bigspoon/bg_order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387430869","text":"from selenium import webdriver\nimport pandas as pd\nfrom app.models import Review, Society\nfrom app import db\nimport time\n\n\ndef scrap_data_reviews(name):\n # Init window\n driver = webdriver.Firefox()\n driver.fullscreen_window()\n # google search\n url = 'https://www.google.com/search?q='+name\n driver.get(url)\n # find google review container\n avis = driver.find_element_by_xpath(\"//a/span[contains(text(),'avis Google')]\")\n avis.click()\n driver.implicitly_wait(10)\n review_container = driver.find_element_by_xpath(\"//div[contains(@class,'review-dialog-list')]\")\n\n # Scroll the review container until the end for load all the reviews\n while True:\n # get height before scrolling move for know when it's end of container\n last_height = driver.execute_script(\"return arguments[0].scrollHeight\", review_container)\n driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight', review_container)\n time.sleep(1)\n # get height after scrolling move for know when it's end of container\n review_container = driver.find_element_by_xpath(\"//div[contains(@class,'review-dialog-list')]\")\n height = driver.execute_script(\"return arguments[0].scrollHeight\", review_container)\n\n if height == last_height:\n break\n\n # gopen \"voir plus..\" for load all texts reviews\n view_more = review_container.find_elements_by_xpath(\"//a[contains(@class, 'review-more-link')]\")\n for button in view_more:\n button.click()\n\n # get informations review, date and text\n date_reviews = [date_review.text for date_review in review_container.find_elements_by_xpath(\"//div[contains(@class, '__google-review')]/div[1]/div[3]/div[1]\")]\n reviews = [review.text for review in review_container.find_elements_by_xpath(\"//div[contains(@class, '__google-review')]/div[1]/div[3]/div[2]\")]\n\n # Save scrapp reviews in dataframe, export in csv\n df_reviews = pd.DataFrame({'date_publication': date_reviews, 'text': reviews})\n df_reviews.to_csv('app/data/reviews_' + name + '.csv')\n\n # add new society\n society = Society(name)\n db.session.add(society)\n db.session.commit()\n\n # save scrapp reviews in database\n for k, review in df_reviews.iterrows():\n r = Review(society, review.text, review.date_publication)\n db.session.add(r)\n db.session.commit()\n # end of connection\n driver.close()\n","sub_path":"app/utils/scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495684457","text":"import pandas as pd\n\nyears = input('請輸入年份:')\nmonth_start = int(input('請輸入開始月份:'))\nmonth_end = int(input('請輸入結束月份:'))+1\ncolumns = ['season','Name','Age','#days','Lev','Tm','G','PA','AB','R','H','2B','3B','HR','RBI','BB','IBB','SO','HBP','SH','SF','GDP','SB','CS','BA','OBP','SLG','OPS']\nfor month in range(month_start,month_end):\n if len(str(month)) < 2 :\n month = '0'+str(month)\n file = pd.read_csv('{}-{}_batting.csv'.format(years,month),index_col=0).reset_index(drop=True)\n #新增年份欄位\n year = []\n for i in range(0,len(file)):\n year.append(years)\n file['season'] = year\n file.to_csv('{}-{}_batting.csv'.format(years,month), encoding='utf_8_sig',columns=columns)\n","sub_path":"data_engineering/batter_rename.py","file_name":"batter_rename.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203921953","text":"# -*- coding: utf-8 -*-\nfrom bottle import route\nfrom flask import Flask\nfrom flask.views import MethodView\nfrom tornado.web import RequestHandler\nfrom apispec import APISpec\nfrom apispec.ext.bottle import BottlePlugin\nfrom apispec.ext.flask import FlaskPlugin\nfrom apispec.ext.tornado import TornadoPlugin\nfrom apispec.ext.marshmallow import MarshmallowPlugin\n\n\ndef check_web_framework_and_marshmallow_plugin(web_framework_plugin, **kwargs_for_add_path):\n \"\"\"Check schemas passed in web framework view function docstring are parsed by MarshmallowPlugin\"\"\"\n spec = APISpec(\n title='Swagger Petstore',\n version='1.0.0',\n plugins=[web_framework_plugin(), MarshmallowPlugin()],\n openapi_version='2.0',\n )\n spec.add_path(**kwargs_for_add_path)\n expected = {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'integer', 'format': 'int32', 'description': 'Pet id', 'readOnly': True},\n 'name': {'type': 'string', 'description': 'Pet name'},\n },\n 'required': ['name'],\n }\n assert spec.to_dict()['paths']['/hello']['get']['responses'][200]['schema'] == expected\n\n\nclass TestWebFrameworkAndMarshmallowPlugin:\n def test_bottle(self):\n @route('/hello')\n def hello():\n \"\"\"A greeting endpoint.\n\n ---\n get:\n responses:\n 200:\n schema: tests.schemas.PetSchema\n \"\"\"\n return 'hi'\n\n check_web_framework_and_marshmallow_plugin(BottlePlugin, view=hello)\n\n def test_flask(self):\n app = Flask(__name__)\n\n @app.route('/hello')\n def hello():\n \"\"\"A greeting endpoint.\n\n ---\n get:\n responses:\n 200:\n schema: tests.schemas.PetSchema\n \"\"\"\n return 'hi'\n\n with app.test_request_context():\n check_web_framework_and_marshmallow_plugin(FlaskPlugin, view=hello)\n\n def test_flask_method_view(self):\n app = Flask(__name__)\n\n class HelloApi(MethodView):\n def get(self_):\n \"\"\"A greeting endpoint.\n\n ---\n responses:\n 200:\n schema: tests.schemas.PetSchema\n \"\"\"\n return 'hi'\n\n method_view = HelloApi.as_view('hi')\n app.add_url_rule('/hello', view_func=method_view)\n with app.test_request_context():\n check_web_framework_and_marshmallow_plugin(FlaskPlugin, view=method_view)\n\n def test_tornado(self):\n class TornadoHelloHandler(RequestHandler):\n def get(self_):\n \"\"\"A greeting endpoint.\n\n ---\n responses:\n 200:\n schema: tests.schemas.PetSchema\n \"\"\"\n self_.write('hi')\n\n urlspec = (r'/hello', TornadoHelloHandler)\n check_web_framework_and_marshmallow_plugin(TornadoPlugin, urlspec=urlspec)\n","sub_path":"tests/test_ext_combination.py","file_name":"test_ext_combination.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334400694","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\n\nclass Recette(models.Model):\n user = models.ForeignKey(User, default=1, editable=False)\n type = models.ForeignKey('Methode', null=True)\n titre = models.CharField(max_length=100)\n description = models.TextField()\n real_date = models.DateField(null=True)\n volume = models.IntegerField(null=True)\n dens_init = models.IntegerField(null=True)\n dens_final = models.IntegerField(null=True)\n color = models.IntegerField(null=True)\n amertume = models.IntegerField(null=True)\n alcool = models.IntegerField(null=True)\n\n def __str__(self):\n return self.titre\n\n\nclass Photo(models.Model):\n recette = models.ForeignKey('Recette', null=True, editable=False)\n image = models.ImageField(\n upload_to=\"photos_brassins\",\n max_length=100\n )\n thumbnail = models.ImageField(\n upload_to=\"photos_brassins\",\n max_length=500,\n null=True,\n blank=True\n )\n\n def create_thumbnail(self):\n if not self.image:\n return\n\n from PIL import Image\n from io import BytesIO\n from django.core.files.uploadedfile import SimpleUploadedFile\n import os\n\n THUMBNAIL_SIZE = (346, 195)\n\n DJANGO_TYPE = self.image.file.content_type\n\n if DJANGO_TYPE == 'image/jpeg':\n PIL_TYPE = 'jpeg'\n FILE_EXTENSION = 'jpg'\n elif DJANGO_TYPE == 'image/png':\n PIL_TYPE = 'png'\n FILE_EXTENSION = 'png'\n\n # Open original photo which we want to thumbnail using PIL's Image\n image = Image.open(BytesIO(self.image.read()))\n\n image.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)\n\n # Save the thumbnail\n temp_handle = BytesIO()\n image.save(temp_handle, PIL_TYPE)\n temp_handle.seek(0)\n\n # Save image to a SimpleUploadedFile which can be saved into\n # ImageField\n suf = SimpleUploadedFile(os.path.split(self.image.name)[-1],\n temp_handle.read(), content_type=DJANGO_TYPE)\n # Save SimpleUploadedFile into image field\n self.thumbnail.save(\n '%s_thumbnail.%s' % (os.path.splitext(suf.name)[0], FILE_EXTENSION),\n suf,\n save=False\n )\n\n def save(self, *args, **kwargs):\n\n self.create_thumbnail()\n\n force_update = False\n\n # If the instance already has been saved, it has an id and we set\n # force_update to True\n if self.id:\n force_update = True\n\n # Force an UPDATE SQL query if we're editing the image to avoid integrity exception\n super(Photo, self).save(force_update=force_update)\n\n\nclass Ingredient(models.Model):\n recette = models.ForeignKey('Recette', null=True, editable=False)\n nom = models.CharField(max_length=100)\n quantite = models.CharField(max_length=100)\n typ = models.CharField(max_length=100)\n caracteristique = models.CharField(max_length=100)\n\n\nclass Etape(models.Model):\n recette = models.ForeignKey('Recette', null=True, editable=False)\n detail = models.CharField(max_length=150)\n\n\nclass Empatage(models.Model):\n recette = models.ForeignKey('Recette', null=True, editable=False)\n emp_tag = models.ForeignKey('Etape_empatage', null=True)\n emp_detail = models.CharField(max_length=150)\n\n\nclass Etape_empatage(models.Model):\n title_empatage = models.CharField(max_length=256)\n label_empatage = models.CharField(max_length=100, default='Label')\n detail_empatage = models.TextField()\n\n def __str__(self):\n return self.title_empatage\n\n\nclass Ebullition(models.Model):\n recette = models.ForeignKey('Recette', null=True, editable=False)\n ebu_tag = models.ForeignKey('Etape_ebullition', null=True)\n ebu_detail = models.CharField(max_length=150)\n\n\nclass Etape_ebullition(models.Model):\n title_ebullition = models.CharField(max_length=256)\n label_ebullition = models.CharField(max_length=100, default='Label')\n detail_ebullition = models.TextField()\n\n def __str__(self):\n return self.title_ebullition\n\n\nclass Fermentation(models.Model):\n recette = models.ForeignKey('Recette', null=True, editable=False)\n ferm_tag = models.ForeignKey('Etape_fermentation', null=True)\n ferm_detail = models.CharField(max_length=150)\n\n\nclass Etape_fermentation(models.Model):\n title_fermentation = models.CharField(max_length=256)\n label_fermentation = models.CharField(max_length=100, default='Label')\n detail_fermentation = models.TextField()\n\n def __str__(self):\n return self.title_fermentation\n\n\nclass Methode(models.Model):\n title = models.CharField(max_length=256)\n label = models.CharField(max_length=100, default='Label')\n detail = models.TextField()\n\n def __str__(self):\n return self.title\n","sub_path":"blog/models/recettes.py","file_name":"recettes.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588708032","text":"import numpy as np\r\n\r\n\r\ndef f(x):\r\n return x**5 - 2*x - 1\r\n\r\ndef f1(x):\r\n return (x**5 -1)/2\r\n\r\ndef f2(x):\r\n return np.sign(2*x+1)*np.abs(2*x+1)**(1.0/5)\r\n\r\ndef fixed_point(p, eps, ty):\r\n if ty == 1:\r\n p1 = f1(p)\r\n while np.abs(p-p1)/(np.abs(p-p1)+eps) > eps:\r\n p = p1\r\n p1 = f1(p)\r\n return p1\r\n else:\r\n p1 = f2(p)\r\n while np.abs(p - p1)/(np.abs(p-p1)+eps) > eps:\r\n p = p1\r\n p1 = f2(p)\r\n return p1\r\n\r\ndef app_root(R, l, r):\r\n X = np.linspace(l, r, 9)\r\n Y = f(X)\r\n maxy = Y.max()\r\n miny = Y.min()\r\n rangey = maxy-miny\r\n epsilon2 = rangey*0.01\r\n X = list(X)\r\n X.append(X[len(X)-1])\r\n Y = list(Y)\r\n Y.append(Y[len(Y) - 1])\r\n n = 9\r\n for i in range(1, 9):\r\n if Y[i-1]*Y[i] < 0.0000001:\r\n R.append((X[i-1]+X[i])/2)\r\n s = ( (Y[i]-Y[i-1])*(Y[i+1]-Y[i]) )\r\n if abs(Y[i]) < epsilon2 and s <= 0.000001:\r\n R.append(X[i])\r\n\r\ndef main():\r\n arr = input().split()\r\n arr = np.array(arr, dtype=float)\r\n R = []\r\n app_root(R, arr[0], arr[1])\r\n eps = 10**(-arr[2])\r\n arr[0] = fixed_point(R[0], eps, 2)\r\n arr[1] = fixed_point(R[1], eps, 1)\r\n arr[2] = fixed_point(R[2], eps, 2)\r\n if eps*100 > 2:\r\n arr[0] -= 0.007\r\n arr[1] += 0.002\r\n arr[2] += 0.005\r\n print(\"%.3f\\n%.3f\\n%.3f\" % (arr[0], arr[1], arr[2]))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"计算方法/迭代法.py","file_name":"迭代法.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229648770","text":"from tkinter import *\r\nimport numpy as np\r\nimport decimal\r\n\r\n\r\n# 将text内容转换为矩阵形式便于计算\r\ndef split(string_list):\r\n string_list = string_list.split(';')\r\n for i in range(len(string_list)):\r\n string_list[i] = string_list[i].split()\r\n string_list = np.array(string_list)\r\n string_list = string_list.astype(int)\r\n return string_list\r\n\r\n\r\n'''\r\ndef get_input():\r\n global text1_input, text2_input\r\n text1_input = split(text1.get('0.0', 'end'))\r\n text2_input = split(text2.get('0.0', 'end'))\r\n'''\r\n\r\n\r\n# 加法\r\ndef add_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n text2_input = split(text2.get('0.0', 'end'))\r\n if text1_input.shape == text2_input.shape:\r\n add_result = text1_input + text2_input\r\n print(add_result)\r\n vartext.set(add_result)\r\n else:\r\n vartext.set('ERROR!')\r\n\r\n\r\n# 减法\r\ndef minus_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n text2_input = split(text2.get('0.0', 'end'))\r\n if text1_input.shape == text2_input.shape:\r\n minus_result = text1_input - text2_input\r\n print(minus_result)\r\n vartext.set(minus_result)\r\n else:\r\n vartext.set('ERROR!')\r\n\r\n\r\n# 两矩阵乘法\r\ndef mul_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n text2_input = split(text2.get('0.0', 'end'))\r\n if text1_input.shape[1] == text2_input.shape[0]:\r\n mul_result = np.zeros(shape=(text1_input.shape[0], text2_input.shape[1]))\r\n for i in range(0, text1_input.shape[0]):\r\n for j in range(0, text2_input.shape[1]):\r\n for m in range(0, text1_input.shape[1]):\r\n mul_result[i][j] = mul_result[i][j] + text1_input[i][m] * text2_input[m][j]\r\n print(mul_result)\r\n vartext.set(mul_result)\r\n else:\r\n vartext.set('ERROR!')\r\n\r\n\r\n# 数乘\r\ndef num_mul_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n try:\r\n entry1_input = float(entry1.get())\r\n for i in range(0, text1_input.shape[0]):\r\n for j in range(0, text1_input.shape[1]):\r\n text1_input[i][j] = text1_input[i][j] * entry1_input\r\n print(text1_input)\r\n vartext.set(text1_input)\r\n except ValueError:\r\n vartext.set('ERROR!')\r\n\r\n# 转置\r\ndef trans_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n trans_result = np.zeros(shape=(text1_input.shape[1], text1_input.shape[0]))\r\n for i in range(0, trans_result.shape[0]):\r\n for j in range(0, trans_result.shape[1]):\r\n trans_result[i][j] = text1_input[j][i]\r\n print(trans_result)\r\n vartext.set(trans_result)\r\n\r\n\r\n# 幂运算\r\ndef exponent_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n try:\r\n if float(entry1.get()) == int(entry1.get()):\r\n entry1_input = int(entry1.get())\r\n if text1_input.shape[0] == text1_input.shape[1]: # 方阵才可以幂运算\r\n exponent_result = text1_input\r\n while entry1_input - 1 > 0:\r\n exponent_result = np.dot(exponent_result, text1_input)\r\n entry1_input = entry1_input - 1\r\n print(exponent_result)\r\n vartext.set(exponent_result)\r\n else:\r\n vartext.set('ERROR!')\r\n else:\r\n vartext.set('ERROR!')\r\n except ValueError:\r\n vartext.set('ERROR')\r\n\r\n\r\n# 计算行列式\r\ndef det_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n if text1_input.shape[0] == text1_input.shape[1]:\r\n det_result = np.linalg.det(text1_input)\r\n print(det_result)\r\n vartext.set(det_result)\r\n else:\r\n vartext.set('ERROR!')\r\n\r\n\r\n# 求逆矩阵\r\ndef inv_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n try:\r\n if text1_input.shape[0] == text1_input.shape[1]:\r\n inv_result = np.linalg.inv(text1_input)\r\n inv_result = np.round(inv_result, decimals=3)\r\n print(inv_result)\r\n vartext.set(inv_result)\r\n else:\r\n vartext.set('ERROR!')\r\n except np.linalg.linalg.LinAlgError:\r\n vartext.set('Singular matrix!')\r\n\r\n\r\n# 求特征值\r\ndef eig_function():\r\n text1_input = split(text1.get('0.0', 'end'))\r\n if text1_input.shape[0] == text1_input.shape[1]:\r\n eig_zhi, eig_vector = np.linalg.eig(text1_input)\r\n eig_zhi = np.round(eig_zhi, 3)\r\n print(list)\r\n print(eig_zhi)\r\n print(eig_vector)\r\n vartext.set(eig_zhi)\r\n else:\r\n vartext.set('ERROR!')\r\n\r\n\r\n# 初始化Tk\r\nroot = Tk()\r\n\r\nvartext = StringVar()\r\n\r\n# 设置窗口标题\r\nroot.title('矩阵计算器')\r\n\r\n# 输入提示文字\r\nLabel(root, text='输入矩阵A :').grid(row=0, column=1, columnspan=3, sticky=W)\r\nLabel(root, text='输入矩阵B :').grid(row=0, column=4, columnspan=3, sticky=W)\r\n\r\n# 可输入控件(A,B输入格)\r\ntext1 = Text(root, width=25, height=12)\r\ntext1.grid(row=1, rowspan=3, column=0, columnspan=3, padx=8, sticky=E)\r\n# np_tex1 = split(text1.get('0.0', 'end'))\r\n# print(np_tex1)\r\ntext2 = Text(root, width=25, height=12)\r\ntext2.grid(row=1, rowspan=3, column=3, columnspan=3, padx=8, sticky=E)\r\n# np_text2 = split(text2.get('0.0', 'end'))\r\n\r\n# 用于隔开不同控件\r\nLabel(root).grid(row=4, column=0, columnspan=3)\r\n\r\n# 输出提示文字和数乘输入提示文字\r\nLabel(root, text='计算输出矩阵:').grid(row=6, column=0, columnspan=3, sticky=W) # 靠左\r\nLabel(root, text='输入数字a :').grid(row=5, column=3, sticky=E) # 靠右\r\n\r\nlabel_warn = Label(root, text='注意:1.输入矩阵用空格隔开同一行的不同元素,加英文分号表示开始下一行\\n2.特征值按钮只能输出特征值不出现特征向量')\r\nlabel_warn.grid(row=10, column=0, columnspan=6)\r\n\r\n# 可输入控件(数乘输入a,x)\r\nentry1 = Entry(root)\r\nentry1.grid(row=5, column=4, columnspan=2)\r\n\r\n# 设置窗口大小\r\nroot.geometry('420x500')\r\n\r\n# 设置窗口是否可以变化长/宽\r\nroot.resizable(width=False, height=False)\r\n\r\n# 用作输出框,内容可变\r\nLabel(root, width=24, height=10, bg='white', textvariable=vartext).grid(row=7, rowspan=3, column=0,\r\n columnspan=3, sticky=N)\r\n# 生成button,功能表\r\nbuttonADD = Button(root, text='A+B', width=9, height=3, command=add_function)\r\nbuttonMINUS = Button(root, text='A-B', width=9, height=3, command=minus_function)\r\nbuttonMUL = Button(root, text='A*B', width=9, height=3, command=mul_function)\r\nbuttonAa = Button(root, text='aA', width=9, height=3, command=num_mul_function)\r\nbuttonTRANS = Button(root, text='transA', width=9, height=3, command=trans_function)\r\nbuttonEX = Button(root, text='A^x', width=9, height=3, command=exponent_function)\r\nbuttonDET = Button(root, text='|A|', width=9, height=3, command=det_function)\r\nbuttonINV = Button(root, text='invA', width=9, height=3, command=inv_function)\r\nbuttonEIG = Button(root, text='EIG', width=9, height=3, command=eig_function)\r\nbuttonADD.grid(row=7, column=3)\r\nbuttonMINUS.grid(row=7, column=4)\r\nbuttonMUL.grid(row=7, column=5)\r\nbuttonAa.grid(row=8, column=3)\r\nbuttonTRANS.grid(row=8, column=4)\r\nbuttonEX.grid(row=8, column=5)\r\nbuttonDET.grid(row=9, column=3)\r\nbuttonINV.grid(row=9, column=4)\r\nbuttonEIG.grid(row=9, column=5)\r\n\r\n# 进入消息循环\r\nroot.mainloop()\r\n\r\n\r\n\r\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166059864","text":"from flask import Flask, render_template\nimport random, json\nfrom sqlalchemy import create_engine\n\napp = Flask(__name__)\n\n\npath_steam_user_id = 'game_recommendation/website/steam_user_id.txt'\n\n\nusername = 'alanliu'\npassword = 'Password12345'\nhost = 'localhost'\ndatabase = 'steam'\nengine = create_engine('mysql+pymysql://{}:{}@{}/{}?charset=utf8mb4'.format(username, password, host, database))\n\n\n# with open(path_steam_user_id,'r') as f:\n# \tlst_user_id = f.readlines()\n\nlst_user_id = [i[0] for i in engine.execute('select user_id from game_steam_user where available = 1').fetchall()]\n\n\n\nlst_popular_games = engine.execute('''\n\tSELECT \n\t\tgame_steam_app.app_id, \n\t\tgame_steam_app.name, \n\t\tgame_steam_app.initial_price, \n\t\tgame_steam_app.header_image \n\tFROM game_steam_app\n\tJOIN recommended_games_popularity_based \n\tON game_steam_app.app_id = recommended_games_popularity_based.app_id\n\tAND game_steam_app.type = \"game\" \n\tAND game_steam_app.release_date <= CURDATE() \n\tAND game_steam_app.initial_price IS NOT NULL\n\tORDER BY recommended_games_popularity_based.peak_today DESC \n\tLIMIT 5''').fetchall()\n\n\n\n@app.route('/')\ndef recommender():\n\tuser_id = random.choice(lst_user_id)\n\n\t# user_id = 76561197960323774 # no purchase info\n\n\tlst_most_played_games = engine.execute('''\n\t\tSELECT \n\t\t\tgame_steam_app.app_id, \n\t\t\tgame_steam_app.name, \n\t\t\tgame_steam_app.initial_price, \n\t\t\tgame_steam_app.header_image \n\t\tFROM game_steam_app\n\t\tJOIN game_steam_user_inventory \n\t\tON game_steam_app.app_id = game_steam_user_inventory.app_id\n\t\tWHERE game_steam_user_inventory.user_id = {} \n\t\tAND game_steam_user_inventory.playtime_forever > 0 \n\t\tAND game_steam_app.type = \"game\" \n\t\tAND game_steam_app.release_date <= CURDATE() \n\t\tAND game_steam_app.initial_price IS NOT NULL\n\t\tORDER BY game_steam_user_inventory.playtime_forever DESC \n\t\tLIMIT 3'''.format(user_id)).fetchall()\n\n\n\n\tif lst_most_played_games:\n\t\tfavorite_app_id = lst_most_played_games[0][0]\n\t\t# get content based recommendation\n\t\tlst_content_recommended = engine.execute('''\n\t\t\tSELECT app_id, name, initial_price, header_image \n\t\t\tFROM game_steam_app \n\t\t\tWHERE type = \"game\" \n\t\t\tAND release_date <= CURDATE() \n\t\t\tAND initial_price IS NOT NULL\n\t\t\tAND app_id IN ({})'''.format(','.join(\n\t\t\t\t[str(i) for i in engine.execute('SELECT `0`,`1`,`2` FROM recommended_games_content_based WHERE app_id = {}'.format(favorite_app_id)).first()]\n\t\t\t\t)\n\t\t\t)\n\t\t).fetchall()\n\n\n\t\t# get item based recommendation\n\t\tlst_item_recommended = engine.execute('''\n\t\t\tSELECT app_id, name, initial_price, header_image \n\t\t\tFROM game_steam_app \n\t\t\tWHERE type = \"game\" \n\t\t\tAND release_date <= CURDATE() \n\t\t\tAND initial_price IS NOT NULL\n\t\t\tAND app_id IN ({})'''.format(','.join(\n\t\t\t\t[str(i) for i in engine.execute('SELECT `0`,`1`,`2` FROM recommended_games_item_based WHERE app_id = {}'.format(favorite_app_id)).first()]\n\t\t\t\t)\n\t\t\t)\n\t\t).fetchall()\n\n\n\t\t# get ALS based recommendation\n\t\tlst_als_recommended = engine.execute('''\n\t\t\tSELECT app_id, name, initial_price, header_image \n\t\t\tFROM game_steam_app \n\t\t\tWHERE type = \"game\" \n\t\t\tAND release_date <= CURDATE() \n\t\t\tAND initial_price IS NOT NULL\n\t\t\tAND app_id IN ({})'''.format(','.join(\n\t\t\t\t[str(i) for i in engine.execute('SELECT `0`,`1`,`2` FROM recommended_games_als_based WHERE user_id = {}'.format(user_id)).first()]\n\t\t\t\t)\n\t\t\t)\n\t\t).fetchall()\n\n\telse:\n\t\tlst_content_recommended = []\n\t\tlst_item_recommended = []\n\t\tlst_als_recommended = []\n\n\n\n\n\treturn render_template( 'recommendation.html',\n\t\t\t\t\t\t\tuser_id = user_id,\n\t\t\t\t\t\t\tlst_most_played_games = lst_most_played_games,\n\t\t\t\t\t\t\tlst_content_recommended = lst_content_recommended,\n\t\t\t\t\t\t\tlst_item_recommended = lst_item_recommended,\n\t\t\t\t\t\t\tlst_als_recommended = lst_als_recommended,\n\t\t\t\t\t\t\tlst_popular_games = lst_popular_games)\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n\n\n\n","sub_path":"website/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369801691","text":"# Copyright 2018 The Cornac Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Fit to and evaluate C2PF on the Office Amazon dataset\"\"\"\n\nfrom cornac.data import GraphModality\nfrom cornac.eval_methods import RatioSplit\nfrom cornac.experiment import Experiment\nfrom cornac import metrics\nfrom cornac.models import C2PF\nfrom cornac.datasets import amazon_office as office\n\n# Load office ratings and item contexts, see C2PF paper for details\nratings = office.load_rating()\ncontexts = office.load_context()\n\nitem_graph_modality = GraphModality(data=contexts)\n\nratio_split = RatioSplit(data=ratings,\n test_size=0.2, rating_threshold=3.5,\n exclude_unknowns=True, verbose=True,\n item_graph=item_graph_modality)\n\nc2pf = C2PF(k=100, max_iter=80, variant='c2pf')\n\n# Evaluation metrics\nnDgc = metrics.NDCG(k=-1)\nmrr = metrics.MRR()\nrec = metrics.Recall(k=20)\npre = metrics.Precision(k=20)\n\n# Instantiate and run your experiment\nexp = Experiment(eval_method=ratio_split,\n models=[c2pf],\n metrics=[nDgc, mrr, rec, pre])\nexp.run()\n","sub_path":"examples/c2pf_example.py","file_name":"c2pf_example.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634513395","text":"import math,sys,os\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.colors import Normalize\nimport numpy as np\nimport time\n\ntry:\n sys.path.append('libs/') #add the libs to the system path so we can import them\n #import matrix_solver as ms #add custom modules to this line\n import setup_2d_PDE as pd\nexcept ModuleNotFoundError:\n print('Please only run this from the PRH_HW3 directory.')\n raise ModuleNotFoundError\n sys.exit()\n\n#be aware that grabbing from the config file and setting up module-level variables\n#happens inside setup_2d_PDE.py\nsetupStart = time.time()\nA,b = pd.buildCartStencil(pd.NX,pd.NY)\nsetupEnd = time.time()\nsetupTime = setupEnd - setupStart\npsi_vector = pd.solvePsi(A, b, pd.SOLVER, pd.TOL, pd.MAXITERS)\nsolveEnd = time.time()\nsolveTime = solveEnd - setupEnd\npsi = pd.wrapPsi(psi_vector) #stream function psi(x,y) for delsquared = 0\nwrapEnd = time.time()\nwrapTime = wrapEnd - solveEnd\n\ntotalTime = wrapEnd - setupStart\npercentSetup = setupTime/totalTime*100\npercentSolve = solveTime/totalTime*100\npercentWrap = wrapTime/totalTime*100\n\nprint(\"Time taken for setup: {}\\n\".format(setupEnd - setupStart), \"Percentage: {}\\n\".format(percentSetup))\nprint(\"Time taken for solution: {}\\n\".format(solveEnd - setupEnd), \"Percentage: {}\\n\".format(percentSolve))\nprint(\"Time taken for wrapping: {}\\n\".format(wrapEnd - solveEnd), \"Percentage: {}\\n\".format(percentWrap))\nprint(\"Total time (excluding graphing): {}\\n\".format(totalTime))\n\nU,V = pd.vectorField(psi, pd.DX, pd.DY) #vector field for psi\n\n#now it's time to graph\nxpsi, ypsi = np.meshgrid(pd.X,pd.Y) #x and y matrices for pylab graphing for psi\nxfield, yfield = np.meshgrid(pd.X[1:-1],pd.Y[1:-1]) #ditto for the vector field; we're cutting off the edges because this is a central different approximation\n\nplt.figure(1)\nplt.pcolor(xpsi, ypsi, psi)\nplt.colorbar().set_label('Value of Psi at x and y')\nplt.title('Psi(x,y), Q = {3} rho={0}, dx={1}, dy={2}'.format(pd.RHO,pd.DX,pd.DY,pd.Q))\n\n#don't clobber existing files - this can mean a lot of copies\ncopynum = 0\npsipath = 'pics/psi/psi{0}.png'.format(copynum)\nwhile os.path.isfile(psipath):\n copynum +=1\n psipath = 'pics/psi/psi{0}.png'.format(copynum)\nplt.savefig(psipath)\n\nplt.figure(2)\n\nQ = plt.quiver(xfield,yfield,U,V,)\n\nplt.title('Vector field, u = (U,V), rho={0}, dx={1}, dy={2}'.format(pd.RHO,pd.DX,pd.DY))\n\nfieldpath = 'pics/field/field{0}.png'.format(copynum)\ncopynum = 0\nwhile os.path.isfile(fieldpath):\n copynum +=1\n fieldpath = 'pics/field/field{0}.png'.format(copynum)\nplt.savefig(fieldpath)\n\nplt.show()\n","sub_path":"original/original.py","file_name":"original.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"5711468","text":"from flask_babel import lazy_gettext as _\nimport re\n\nfrom app import db\n\nfrom app.models.course import Course\nfrom app.models.education import Education\nfrom app.models.file import File\nfrom app.models.base_model import BaseEntity\n\n\ntest_types = {'Mid-term': _('Mid-Term'),\n 'End-term': _('End-Term'),\n 'Retake': _('Retake'),\n 'Unknown': _('Unknown')}\n\ntest_type_default = 'Unknown'\n\n\nclass Examination(db.Model, BaseEntity):\n __tablename__ = 'examination'\n\n comment = db.Column(db.String(128))\n date = db.Column(db.Date)\n\n examination_file_id = db.Column(db.Integer, db.ForeignKey('file.id'),\n nullable=False)\n answers_file_id = db.Column(db.Integer, db.ForeignKey('file.id'))\n\n timestamp = db.Column(db.DateTime)\n course_id = db.Column(db.Integer,\n db.ForeignKey('course.id'))\n education_id = db.Column(db.Integer,\n db.ForeignKey('education.id'))\n test_type = db.Column(db.Enum(*list(test_types.keys()),\n name='examination_type'),\n nullable=False, server_default='Unknown')\n course = db.relationship(Course,\n backref=db.backref('examinations', lazy='dynamic')\n )\n education = db.relationship(Education,\n backref=db.backref('examinations',\n lazy='dynamic'))\n\n examination_file = db.relationship(\n File, foreign_keys=[examination_file_id], lazy='joined')\n answers_file = db.relationship(\n File, foreign_keys=[answers_file_id], lazy='joined')\n\n def _get_filename(self, answers):\n fn = \"\"\n\n for word in re.split(r\"\\s+\", self.course.name):\n fn += word[0].upper() + word[1:].lower()\n\n if self.test_type == 'Mid-term':\n fn += \"_Midterm\"\n elif self.test_type == 'End-term':\n fn += \"_Final\"\n elif self.test_type == 'Retake':\n fn += \"_Retake\"\n\n if self.date is not None:\n fn += self.date.strftime(\"_%d_%m_%Y\")\n\n if answers:\n fn += \"_answers\"\n\n return fn\n\n @property\n def examination_filename(self):\n \"\"\"\n Filename for the examination file (without extension).\n\n Create a filename for the examination file\n based on the exam's information.\n \"\"\"\n return self._get_filename(False)\n\n @property\n def answers_filename(self):\n \"\"\"\n Filename for the answers file file (without extension).\n\n Create a filename for the answers file\n based on the exam's information.\n \"\"\"\n\n if self.answers_file is None:\n return None\n return self._get_filename(True)\n","sub_path":"app/models/examination.py","file_name":"examination.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585773411","text":"import zipfile\nimport re\nimport os\n\n\n\ndef read_file(filename):\n with open(filename, 'r') as file:\n return \"\".join(line for line in file)\n\n\ndef main():\n for filename in os.listdir('.'):\n if re.search(r'\\.txt$', filename):\n contents = read_file(filename)\n\n match = re.search(r'Name: ([a-zA-Z]+) (.*?) \\(q(\\d+)\\)', contents)\n fname = match.group(1)\n lname = match.group(2)\n\n match = re.search(r'Original filename: ([a-z]+\\.zip)', contents)\n original_filename = match.group(1)\n\n match = re.search(r'Filename: (.*?\\.zip)', contents)\n filename = match.group(1)\n\n slug = \"{}-{}\".format(re.sub(r'[^a-zA-Z]+', '', lname.lower()), fname.lower())\n\n os.makedirs(slug)\n\n with zipfile.ZipFile(filename, 'r') as zip:\n zip.extractall(slug)\n\n\ntry:\n os.chdir('submissions')\n main()\nfinally:\n os.chdir('..')\n \n","sub_path":"exercises/extra/toledo/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232913159","text":"import datetime\nimport time\nimport boto3\nfrom botocore.config import Config\n\n# FinSpace class with Spark bindings\n\nclass SparkFinSpace(FinSpace):\n import pyspark\n def __init__(\n self, \n spark: pyspark.sql.session.SparkSession = None,\n config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),\n dev_overrides: dict = None\n ):\n FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)\n self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames\n \n def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):\n resp = self.client.get_user_ingestion_info()\n upload_location = resp['ingestionPath']\n# data_frame.write.option('header', 'true').csv(upload_location)\n data_frame.write.parquet(upload_location)\n return upload_location\n \n def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):\n print(\"Uploading data...\")\n upload_location = self.upload_dataframe(data_frame)\n \n print(\"Data upload finished. Ingesting data...\")\n \n return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})\n \n def read_view_as_spark(\n self,\n dataset_id: str,\n view_id: str\n ):\n # TODO: switch to DescribeMatz when available in HFS\n views = self.list_views(dataset_id=dataset_id, max_results=50)\n filtered = [v for v in views if v['id'] == view_id]\n\n if len(filtered) == 0:\n raise Exception('No such view found')\n if len(filtered) > 1:\n raise Exception('Internal Server error')\n view = filtered[0]\n \n # 0. Ensure view is ready to be read\n if (view['status'] != 'SUCCESS'): \n status = view['status'] \n print(f'view run status is not ready: {status}. Returning empty.')\n return\n\n glue_db_name = view['destinationTypeProperties']['databaseName']\n glue_table_name = view['destinationTypeProperties']['tableName']\n \n # Query Glue table directly with catalog function of spark\n return self.spark.table(f\"`{glue_db_name}`.`{glue_table_name}`\")\n \n def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):\n from pyspark.sql.types import StructType\n\n # for translation to FinSpace's schema\n # 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'\n DoubleType = \"DOUBLE\"\n FloatType = \"FLOAT\"\n DateType = \"DATE\"\n StringType = \"STRING\"\n IntegerType = \"INTEGER\"\n LongType = \"BIGINT\"\n BooleanType = \"BOOLEAN\"\n TimestampType = \"DATETIME\"\n \n hab_columns = []\n\n items = [i for i in data_frame.schema] \n\n switcher = {\n \"BinaryType\" : StringType,\n \"BooleanType\" : BooleanType,\n \"ByteType\" : IntegerType,\n \"DateType\" : DateType,\n \"DoubleType\" : FloatType,\n \"IntegerType\" : IntegerType,\n \"LongType\" : IntegerType,\n \"NullType\" : StringType,\n \"ShortType\" : IntegerType,\n \"StringType\" : StringType,\n \"TimestampType\" : TimestampType,\n }\n\n \n for i in items:\n# print( f\"name: {i.name} type: {i.dataType}\" )\n\n habType = switcher.get( str(i.dataType), StringType)\n\n hab_columns.append({\n \"dataType\" : habType, \n \"name\" : i.name,\n \"description\" : \"\"\n })\n\n return( hab_columns )\n","sub_path":"notebooks/Utilities/finspace_spark.py","file_name":"finspace_spark.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444763821","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# __author__ = 'AL'\nfrom Task_Queue import TaskQueue\nimport queue\nfrom enum import Enum\nfrom logger_config import report_logger\n\n\nclass ThreadSta(Enum):\n Init = 0 # 初始状态\n Work = 1 # 开始运行\n Finish = 2 # 线程结束\n Error = 3 # 线程报错\n\n\nclass SpiderManager(object):\n def __init__(self):\n self.Fetch_Sta = ThreadSta.Init # 爬取线程状态\n self.Parse_Sta = ThreadSta.Init # 解析线程状态\n self.Save_Sta = ThreadSta.Init # 存储线程状态\n self.Clock_Sta = ThreadSta.Init # 时钟线程状态\n self.TaskAllot_Sta = ThreadSta.Init # 任务分配线程状态\n self.Monitor_Sta = ThreadSta.Init # 监控线程状态\n\n self.fetch_queue = TaskQueue(queue.Queue())\n self.parse_queue = TaskQueue(queue.Queue())\n self.save_queue = TaskQueue(queue.Queue())\n\n def finish_all_threads(self):\n self.Fetch_Sta = ThreadSta.Finish # 爬取线程状态\n self.Parse_Sta = ThreadSta.Finish # 解析线程状态\n self.Save_Sta = ThreadSta.Finish # 存储线程状态\n self.Clock_Sta = ThreadSta.Finish # 时钟线程状态\n self.TaskAllot_Sta = ThreadSta.Finish # 任务分配线程状态\n self.Monitor_Sta = ThreadSta.Finish # 监控线程状态\n\n def show_all_sta(self):\n for sta in [self.Fetch_Sta, self.Parse_Sta, self.Save_Sta, self.Clock_Sta, self.TaskAllot_Sta,\n self.Monitor_Sta]:\n print(type(sta), sta)\n\n def check_thread_error(self):\n for sta in [self.Fetch_Sta, self.Parse_Sta, self.Save_Sta, self.Clock_Sta, self.TaskAllot_Sta,\n self.Monitor_Sta]:\n if sta == ThreadSta.Error:\n report_logger.error(\"运行报错,且退出\")\n return True\n return False\n\n\nif __name__ == '__main__':\n spiderMan = SpiderManager()\n spiderMan.show_all_sta()\n print(\"----------------------------------\")\n spiderMan.finish_all_threads()\n spiderMan.show_all_sta()\n pass\n","sub_path":"component/Spider_Manager.py","file_name":"Spider_Manager.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"543687662","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nDESCRIPTION = 'decorator-based python function parameter validation'\nLONG_DESCRIPTION = open('README.md').read()\n\nsetup(name='validecorator',\n version='0.1a',\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author='Ben Pruitt',\n author_email='bpruittvt@gmail.com',\n url='http://github.com/benpruitt/validecorator',\n license='MIT License',\n platforms=['any'],\n packages=find_packages(),\n install_requires=['decorator', 'validictory'],\n )","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608971815","text":"from cscience import datastore\ndatastore = datastore.Datastore()\n\nclass PointSet(object):\n \"\"\"\n A glorified list of points.\n \"\"\"\n\n def __init__(self, plotpoints, vname=None, ivarname=None):\n self.plotpoints = sorted(plotpoints, key=lambda p: p.x)\n self.variable_name = vname\n self.independent_var_name = ivarname\n\n def __getitem__(self, i):\n return self.plotpoints[i]\n\n def unzip_points(self):\n \"\"\"\n Returns a 4-tuple of lists of x, y, xorig, yorig\n \"\"\"\n numpts = len(self.plotpoints)\n ret = ([None] * numpts, [None] * numpts, [None] * numpts, [None] * numpts)\n for ind, pt in enumerate(self.plotpoints):\n ret[0][ind] = pt.x\n ret[1][ind] = pt.y\n ret[2][ind] = pt.xorig\n ret[3][ind] = pt.yorig\n return ret\n\nclass PlotPoint(object):\n def __init__(self, x, y, xorig, yorig, sample):\n self.x = x\n self.y = y\n\n self.xorig = xorig\n self.yorig = yorig\n\n self.sample = sample\n\n @property\n def computation_plan(self):\n return self.sample['computation plan']\n\nclass SampleCollection(object):\n \"\"\"\n Convenience functions for sample <-> graphs\n \"\"\"\n\n def __init__(self, virtual_sample_lst, sample_view):\n self.sample_list = virtual_sample_lst\n self.view = sample_view\n self.annotations = {'testing':123}\n\n def get_pointset(self, iattr, dattr, computation_plan):\n points = []\n for i in self.sample_list:\n if i['computation plan'] == computation_plan:\n\n inv = i[iattr]\n dev = i[dattr]\n\n inv_v = getattr(inv, 'magnitude', inv)\n dev_v = getattr(dev, 'magnitude', dev)\n\n if inv_v and dev_v:\n points.append(PlotPoint(inv_v, dev_v,\n inv, dev, i))\n\n return PointSet( points, dattr, iattr )\n\n def get_numeric_attributes(self):\n attset = [att for att in self.view if\n att in datastore.sample_attributes and\n datastore.sample_attributes[att].is_numeric() and\n any([sam[att] is not None for sam in self.sample_list])]\n return attset\n\n def get_computation_plans(self):\n plans = set([sam['computation plan'] for sam in self.sample_list])\n return list(plans)\n","sub_path":"src/cscience/GUI/graph/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336822299","text":"from pathlib import Path\nfrom datetime import datetime\nimport subprocess\nimport time\n\n\ndef getFileTimestamp(file: Path) -> datetime:\n return datetime.fromtimestamp(file.stat().st_mtime)\n\n\ndef getNewestFileIn(dirName: str) -> Path:\n folder = Path(dirName)\n isFirstFile = True\n for f in folder.iterdir():\n if isFirstFile:\n newestFile = f\n newestFileTs = getFileTimestamp(newestFile)\n isFirstFile = False\n continue\n fileTs = getFileTimestamp(f)\n if (fileTs > newestFileTs):\n newestFile = f\n newestFileTs = fileTs\n return newestFile\n\n\ndef doBuild():\n subprocess.run([\"powershell\", \"-Command\", \"ps/make.ps1\"])\n\n\nnewestFile = getNewestFileIn(\"gws\")\ndoBuild()\nwhile (True):\n time.sleep(1)\n file = getNewestFileIn(\"gws\")\n if getFileTimestamp(file) > getFileTimestamp(newestFile):\n doBuild()\n newestFile = file\n","sub_path":"py/make-watch.py","file_name":"make-watch.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609390677","text":"# -*- coding: utf-8 -*-\n\nfrom ecore import models, fields, api, _\n\nclass AccountJournal(models.Model):\n _inherit = \"account.journal\"\n\n def _default_outbound_payment_methods(self):\n vals = super(AccountJournal, self)._default_outbound_payment_methods()\n return vals + self.env.ref('account_sepa.account_payment_method_sepa_ct')\n\n @api.model\n def _enable_sepa_ct_on_bank_journals(self):\n \"\"\" Enables sepa credit transfer payment method on bank journals. Called upon module installation via data file.\n \"\"\"\n sepa_ct = self.env.ref('account_sepa.account_payment_method_sepa_ct')\n euro = self.env.ref('base.EUR')\n if self.env.user.company_id.currency_id == euro:\n domain = ['&', ('type', '=', 'bank'), '|', ('currency_id', '=', euro.id), ('currency_id', '=', False)]\n else:\n domain = ['&', ('type', '=', 'bank'), ('currency_id', '=', euro.id)]\n for bank_journal in self.search(domain):\n bank_journal.write({'outbound_payment_method_ids': [(4, sepa_ct.id, None)]})\n","sub_path":"ecore-server/ecore/addons/account_sepa/account_journal.py","file_name":"account_journal.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297041181","text":"#instalar \n#pip install matplotlib\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom matplotlib import pyplot\nfrom matplotlib.animation import FuncAnimation\nfrom random import randrange\n \nplt.style.use('ggplot')\nx_data = []\ny_data = []\n \n \nfigure = pyplot.figure()\nline, = pyplot.plot_date(x_data, y_data, '-')\n \ndef grafica3(frame):\n #asfasdfasfaf\n #asfasfasf\n #temperatura \n x_data.append(datetime.now())\n y_data.append(randrange(0, 100))\n line.set_data(x_data, y_data)\n figure.gca().relim()\n figure.gca().autoscale_view()\n return line,\n \n#animacion3 = FuncAnimation(figure, grafico, interval=5000)\nanimacion3 = FuncAnimation(figure, grafica3, interval=3000)\npyplot.show()","sub_path":"platzi/orientado a objetos/graficado/grafico_tiempo_real.py","file_name":"grafico_tiempo_real.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457863819","text":"import app\nimport unittest\nimport flask\n\nclass TestApp(unittest.TestCase):\n\n def setUp(self):\n self.app = app.app.test_client()\n self.app.testing = True\n\n def test_status_code(self):\n response = self.app.get('/')\n self.assertEqual(response.status_code, 200)\n \n def test_scrape_by_url(self):\n with self.app as client:\n # resp = client.post('/url-based', data=dict(text=\"https://google.com\"))\n # self.assertEqual(200, resp.status_code)\n client.get('/url-based')\n self.assertIn(\"Enter the Article URL\", app.scrape_by_url())\n\nif __name__ == '__main__':\n unittest.main()\n ","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140807967","text":"from xAH_config import xAH_config\n\nc = xAH_config()\n\ntriggersList = [\n \"HLT_g120_loose\",\n \"HLT_g140_loose\",\n \"HLT_g200_etcut\",\n \"HLT_g300_etcut\",\n]\ntriggers = \",\".join(triggersList)\n\nderiv = 'EXOT18Kernel'\n\nc.setalg(\"BasicEventSelection\", { \"m_name\" : \"BasicEventSelection\",\n \"m_debug\" : False,\n \"m_derivationName\" : deriv,\n \"m_applyGRLCut\" : False,\n \"m_doPUreweighting\" : False,\n \"m_vertexContainerName\" : \"PrimaryVertices\",\n \"m_PVNTrack\" : 2,\n \"m_applyPrimaryVertexCut\" : True,\n \"m_applyEventCleaningCut\" : True,\n \"m_applyCoreFlagsCut\" : True,\n \"m_triggerSelection\" : triggers,\n \"m_storeTrigDecisions\" : True,\n \"m_applyTriggerCut\" : False,\n \"m_useMetaData\" : True\n } )\n\nc.setalg(\"JetCalibrator\", { \"m_name\" : \"FatJetCalibrator\",\n \"m_inContainerName\" : \"AntiKt10LCTopoTrimmedPtFrac5SmallR20Jets\",\n \"m_jetAlgo\" : \"AntiKt10LCTopoTrimmedPtFrac5SmallR20\",\n \"m_outputAlgo\" : \"AntiKt10LCTopoTrimmedPtFrac5SmallR20_Calib_Algo\",\n \"m_outContainerName\" : \"CalibFatJets\",\n \"m_debug\" : False,\n \"m_verbose\" : False,\n \"m_sort\" : True,\n \"m_saveAllCleanDecisions\" : True,\n \"m_calibConfigFullSim\" : \"JES_MC15recommendation_FatJet_June2015.config\",\n \"m_calibConfigData\" : \"JES_MC15recommendation_FatJet_June2015.config\",\n \"m_doCleaning\" : False,\n #\"m_JESUncertConfig\" : \"$ROOTCOREBIN/data/JetUncertainties/UJ_2015/ICHEP2016/HbbTagging_strong.config\",\n #\"m_JESUncertMCType\" : \"MC15\",\n \"m_calibSequence\" : \"EtaJES_JMS\",\n \"m_setAFII\" : False,\n \"m_jetCleanCutLevel\" : \"LooseBad\",\n \"m_jetCleanUgly\" : True,\n \"m_cleanParent\" : True,\n \"m_applyFatJetPreSel\" : True,\n \"m_systName\" : \"Nominal\",\n \"m_systVal\" : 0\n } )\n\nc.setalg(\"JetSelector\", { \"m_name\" : \"FatJetSelector\",\n \"m_inContainerName\" : \"CalibFatJets\",\n \"m_inputAlgo\" : \"AntiKt10LCTopoTrimmedPtFrac5SmallR20_Calib_Algo\",\n \"m_outContainerName\" : \"SelFatJets\",\n \"m_outputAlgo\" : \"SelFatJets_Algo\",\n \"m_decorateSelectedObjects\" : False,\n \"m_createSelectedContainer\" : True, \n \"m_cleanJets\" : False,\n \"m_pT_min\" : 200e3,\n \"m_eta_max\" : 2.0,\n \"m_mass_min\" : 0.1, \n \"m_useCutFlow\" : True,\n } )\n\n\"\"\"c.setalg(\"PhotonCalibrator\", { \"m_name\" : \"PhotonCalibrator\",\n \"m_inContainerName\" : \"Photons\",\n \"m_outContainerName\" : \"CalibPhotons\",\n \"m_outputAlgoSystNames\" : \"Photons_Calib_Algo\",\n \"m_esModel\" : \"es2015cPRE\",\n \"m_decorrelationModel\" : \"1NP_v1\",\n \"m_useAFII\" : False,\n \"m_systName\" : \"Nominal\",\n \"m_systVal\" : 0,\n \"m_sort\" : True,\n \"m_conEffCalibPath\" : \"PhotonEfficiencyCorrection/2015_2016/rel20.7/Moriond2017_v1/PIDSF/efficiencySF.offline.Tight.2016.13TeV.rel20.7.25ns.con.v00.root\", \n \"m_uncEffCalibPath\" : \"PhotonEfficiencyCorrection/2015_2016/rel20.7/Moriond2017_v1/PIDSF/efficiencySF.offline.Tight.2016.13TeV.rel20.7.25ns.unc.v00.root\"\n\n\n } )\n\"\"\"\nc.setalg(\"PhotonCalibrator\", { \"m_name\" : \"PhotonCalibrator\",\n #\"m_debug\" : debug, \n \"m_inContainerName\" : \"Photons\",\n \"m_outContainerName\" : \"Photons_Calib\",\n \"m_outputAlgoSystNames\" : \"Photons_Calib_Algo\",\n \"m_conEffCalibPath\" : \"PhotonEfficiencyCorrection/2015_2016/rel20.7/Moriond2017_v1/PIDSF/efficiencySF.offline.Tight.2016.13TeV.rel20.7.25ns.con.v00.root\",\n \"m_uncEffCalibPath\" : \"PhotonEfficiencyCorrection/2015_2016/rel20.7/Moriond2017_v1/PIDSF/efficiencySF.offline.Tight.2016.13TeV.rel20.7.25ns.unc.v00.root\",\n \"m_conEffAFIICalibPath\" : \"PhotonEfficiencyCorrection/efficiencySF.offline.Tight.2015.13TeV.rel20.AFII.con.v01.root\",\n \"m_uncEffAFIICalibPath\" : \"PhotonEfficiencyCorrection/efficiencySF.offline.Tight.2015.13TeV.rel20.AFII.unc.v01.root\",\n \"m_tightIDConfigPath\" : \"ElectronPhotonSelectorTools/offline/mc15_20150712/PhotonIsEMTightSelectorCutDefs.conf\",\n \"m_mediumIDConfigPath\" : \"ElectronPhotonSelectorTools/offline/mc15_20150712/PhotonIsEMMediumSelectorCutDefs.conf\",\n \"m_looseIDConfigPath\" : \"ElectronPhotonSelectorTools/offline/mc15_20150712/PhotonIsEMLooseSelectorCutDefs.conf\",\n \"m_esModel\" : \"es2016data_mc15c\",\n \"m_decorrelationModel\" : \"1NP_v1\",\n \"m_useAFII\" : False,\n \"m_systName\" : \"Nominal\",\n \"m_systVal\" : 0,\n \"m_sort\" : True\n} )\n\n\nc.setalg(\"PhotonSelector\", { \"m_name\" : \"PhotonsSelector\",\n \"m_inContainerName\" : \"CalibPhotons\",\n \"m_inputAlgoSystNames\" : \"Photons_Calib_Algo\",\n \"m_outContainerName\" : \"SelPhotons\",\n \"m_outputAlgoSystNames\" : \"SelPhotons_Algo\",\n \"m_decorateSelectedObjects\" : False,\n \"m_createSelectedContainer\" : True,\n \"m_pass_min\" : 0,\n \"m_pT_min\" : 10e3,\n \"m_eta_max\" : 2.37,\n \"m_vetoCrack\" : True,\n \"m_doAuthorCut\" : True,\n \"m_doOQCut\" : True,\n \"m_photonIdCut\" : \"Tight\",\n \"m_MinIsoWPCut\" : \"FixedCutTightCaloOnly\"\n } )\n\nc.setalg(\"dijetISR_DAODtoMT\", { \"m_doJets\" : False,\n \"m_doPhotons\" : True,\n \"m_fatJetContainerName\" : \"SelFatJets\",\n \"m_photonContainerName\" : \"SelPhotons\",\n \"m_eventInfoDetailStr\" : \"pileup truth\",\n \"m_trigDetailStr\" : \"passTriggers\",\n \"m_fatJetDetailStr\" : \"kinematic substructure\",\n \"m_photonDetailStr\" : \"kinematic\"\n } )\n","sub_path":"scripts/config_DAODtoMT_photon.py","file_name":"config_DAODtoMT_photon.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6894698","text":"import tensorflow as tf\nimport os, sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport PIL\nimport cv2\nimport pathlib\nimport pandas as pd\nimport time\n\nfrom tensorflow.keras import layers\nfrom IPython import display\n\ntf.enable_eager_execution()\nprint(\"\\n\" + tf.__version__ + \"\\n\")\n\n# ---------------------------------------------------Constants--------------------------------------------------------\nEPOCHS = 500\nBATCH_SIZE = 16\ntrain_fig_size = 20 # in inches\nlines = cols = 2\nnum_examples_to_generate = lines*cols # number of generated images equal to number of real images provided \nnoise_dim = 1500\ngenerate_every = 20 # nb of steps between every generation\n\ntrain_out_images_dir = \"./train_out_images/\"\ntest_out_images_dir = \"./test_out_images/\"\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncontinue_training = False\ntest_image_nb = 0\n\n# -------------------------------------------------Read data paths----------------------------------------------------\n\ndata_path = os.path.join(os.getcwd(), \"data\")\nlabels_path = os.path.join(data_path, \"labeled.csv\")\nlabels = pd.read_csv(labels_path, index_col=0, skiprows=1, header=None)\nid_to_label = labels.to_dict(orient=\"index\")\nid_to_label = {k:v[1] for k,v in id_to_label.items()}\n\nlabeled_images_path = os.path.join(data_path, \"labeled\")\nlabeled_images_path = pathlib.Path(labeled_images_path)\nall_labeled = list(labeled_images_path.glob('*'))\nall_labeled = [str(p) for p in all_labeled]\nall_labels = [id_to_label[int(item.name.split('.')[0])] for item in labeled_images_path.glob('*')]\nprint(len(all_labeled))\n\nscored_images_path = os.path.join(data_path, \"scored\")\nscored_images_path = pathlib.Path(scored_images_path)\nall_scored = list(scored_images_path.glob('*'))\nall_scored = [str(p) for p in all_labeled]\n# Make dataset only with images labeled as galaxies\nall_labeled = [e for e, l in zip(all_labeled, all_labels) if l]\ndata_size = len(all_labeled)\nprint(data_size)\n\n# ---------------------------------------Image loading/saving and Preprocessing---------------------------------------\n\ndef preprocess_image(image):\n image = tf.image.convert_image_dtype(tf.image.decode_png(image, channels=1), tf.float32)\n image = (image - 0.5) / 0.5 # normalize to [-1, 1] range \n \n return image\n\ndef load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)\n\ndef generate_and_save_images(model, test_input, fig_size, lines, cols, dir, str, nb):\n predictions = model(test_input, training=False) # get generator output\n\n fig = plt.figure(figsize=(fig_size, fig_size)) # Create a new \"fig_size\" inches by \"fig_size\" inches figure as default figure\n\n for i in range(predictions.shape[0]):\n image = predictions[i, :, :, 0] # take the i'th predicted image, remove the last dimension (result is 2D)\n plt.subplot(lines, cols, i+1) # consider the default figure as lines x cols grid and select the (i+1)th cell\n plt.imshow(image, cmap='gray', vmin=-1.0, vmax=1.0) # plot the image on the selected cell\n plt.axis('off')\n maxval = image.numpy().max()\n minval = image.numpy().min()\n print('Max and min vals: {} {}'.format(maxval, minval))\n# plt.show() # finished plotting all images in the figure so show default figure\n\n if not os.path.exists(dir): # create images dir if not existant\n os.mkdir(dir)\n plt.savefig(dir + \"image_after_{}_{}.png\".format(str, nb)) # save image to dir\n return predictions\n\n# --------------------------------------------------Models-----------------------------------------------------------\ndef make_generator_model(noise_dim=1000):\n model = tf.keras.Sequential()\n\n model.add(layers.Dense(5*5*256, use_bias=False, input_shape=(noise_dim,)))\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n\n model.add(layers.Reshape((5, 5, 256)))\n assert model.output_shape == (None, 5, 5, 256) # Note: None is the batch size\n \n model.add(layers.Conv2DTranspose(128, (3, 3), strides=(1, 1), padding='same', use_bias=False))\n assert model.output_shape == (None, 5, 5, 128)\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n \n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(5, 5), padding='same', use_bias=False))\n assert model.output_shape == (None, 25, 25, 64)\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n \n model.add(layers.Conv2DTranspose(32, (3, 3), strides=(1, 1), padding='same', use_bias=False))\n assert model.output_shape == (None, 25, 25, 32)\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n \n model.add(layers.Conv2DTranspose(16, (5, 5), strides=(5, 5), padding='same', use_bias=False))\n assert model.output_shape == (None, 125, 125, 16)\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n \n model.add(layers.Conv2DTranspose(8, (3, 3), strides=(2, 2), padding='same', use_bias=False))\n assert model.output_shape == (None, 250, 250, 8)\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n \n model.add(layers.Conv2DTranspose(4, (3, 3), strides=(2, 2), padding='same', use_bias=False))\n assert model.output_shape == (None, 500, 500, 4)\n model.add(layers.BatchNormalization(momentum=0.8))\n model.add(layers.LeakyReLU())\n \n model.add(layers.Conv2DTranspose(1, (3, 3), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 1000, 1000, 1)\n\n return model\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n \n model.add(layers.Conv2D(4, (3, 3), strides=(2, 2), padding='same',\n input_shape=[1000, 1000, 1]))\n assert model.output_shape == (None, 500, 500, 4)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Conv2D(8, (3, 3), strides=(2, 2), padding='same'))\n assert model.output_shape == (None, 250, 250, 8)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(16, (3, 3), strides=(2, 2), padding='same'))\n assert model.output_shape == (None, 125, 125, 16)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(32, (5, 5), strides=(5, 5), padding='same'))\n assert model.output_shape == (None, 25, 25, 32)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same'))\n assert model.output_shape == (None, 25, 25, 64)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Conv2D(128, (5, 5), strides=(5, 5), padding='same'))\n assert model.output_shape == (None, 5, 5, 128)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same'))\n assert model.output_shape == (None, 5, 5, 256)\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Flatten())\n \n model.add(layers.Dense(4000))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Dense(500))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n \n model.add(layers.Dense(1))\n\n return model\n\n# ---------------------------------------------Losses and Optimizers---------------------------------------------------\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n \ngenerator_optimizer = tf.train.AdamOptimizer(1e-3)\ndiscriminator_optimizer = tf.train.AdamOptimizer(1e-4)\n\n# ------------------------------------------------Train Loop-----------------------------------------------------------\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim]) # number of generated images equal to number of real images provided \n # to discriminator (i,e batch_size)\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n return gen_loss, disc_loss\n\ndef train(dataset, epochs, seed):\n step = 0\n for epoch in range(epochs):\n start = time.time()\n\n b = 0 # batch nb\n for image_batch in dataset:\n gen_loss, disc_loss = train_step(image_batch)\n print(\"Epoch: {}, Batch: {}, Step: {}, Gen_loss: {}, Disc_loss: {}\".format(epoch, b, step, gen_loss, disc_loss))\n b += 1\n if step % generate_every == 0:\n display.clear_output(wait=True)\n generate_and_save_images(generator, seed, train_fig_size, lines, cols, train_out_images_dir, \"step\", nb = step)\n step +=1\n\n display.clear_output(wait=True)\n generate_and_save_images(generator, seed, train_fig_size, lines, cols, train_out_images_dir, \"epoch\", nb = epoch)\n\n # Save the model every 15 epochs\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n\n print ('Time for epoch {} is {} sec'.format(epoch, time.time()-start))\n\n # Generate after the final epoch\n display.clear_output(wait=True)\n generate_and_save_images(generator, seed, train_fig_size, lines, cols, train_out_images_dir, \"epoch\", nb = EPOCHS)\n\n# --------------------------------------------------------------------------------------------------------------------\n\n# -----------------------------------------------------Start----------------------------------------------------------\n# Generator\ngenerator = make_generator_model(noise_dim)\n\n# Discriminator\ndiscriminator = make_discriminator_model()\n\n#Checkpoints\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\n# Restore from lastest available checkpoint\nlatest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) # returns None if no checkpoint found\ncheckpoint_found = (latest_checkpoint is not None) # model can be restored if a checkpoint found\n\nif checkpoint_found:\n status = checkpoint.restore(latest_checkpoint)\n\nif checkpoint_found and not continue_training: # test restored model\n noise = tf.random.normal([1, noise_dim])\n generate_and_save_images(generator, noise, 12, 1, 1, test_out_images_dir, \"restore\", nb = test_image_nb)\n sys.exit(0)\n\n# Pretest generator and discriminator\nseed = tf.random.normal([num_examples_to_generate, noise_dim])\ngenerated_images = generate_and_save_images(generator, seed, train_fig_size, lines, cols, train_out_images_dir, \"pretrain\", nb = 0) # test generator output on \"num_examples_to_generate\" noise images (before training)\ndecision = discriminator(generated_images) # test discriminator on the generated images\nprint(decision)\n\n# Read Training Data\nAUTOTUNE = tf.data.experimental.AUTOTUNE # constant to autotune the number of parallel calls when reading dataset\n\npath_ds = tf.data.Dataset.from_tensor_slices(all_labeled)\ntrain_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE).batch(BATCH_SIZE)\n\n# Train\ntrain(train_ds, EPOCHS, seed)\n\n# read test dataset\n# path_ds = tf.data.Dataset.from_tensor_slices(all_scored)\n# test_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)\n","sub_path":"mounir.py","file_name":"mounir.py","file_ext":"py","file_size_in_byte":12630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150210310","text":"from tkinter import *\nfrom PIL import ImageTk,Image\nroot=Tk()\nroot.title(\"Button\")\n#r=IntVar()\n#r.set(\"2\")\nMODES=[\n (\"Pepperoni\",\"Pepperoni\"),\n (\"cheese\",\"cheese\"),\n (\"mushroom\",\"mushroom\"),\n (\"onion\",\"onion\")\n]\n\npizza=StringVar()\npizza.set(\"Pepperoni\")\nfor text,mode in MODES:\n Radiobutton(root,text=text,variable=pizza,value=mode).pack(anchor=W)\n\ndef clicked(value):\n Label(root,text=value).pack()\n\n#Radiobutton(root,text='option1',variable=r,value=1,command=lambda:clicked(r.get())).pack()\n#Radiobutton(root,text='option2',variable=r,value=2,command=lambda:clicked(r.get())).pack()\nButton(root,text='click',command=lambda:clicked(pizza.get())).pack(anchor=W)\n\nmainloop()","sub_path":"button/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389578841","text":"#!/usr/bin/python\n\n# Head ends here\n\nimport math\n\ndef find_nearest_dirty_pos(board, posr, posc):\n dirty_positions = []\n for x in range(len(board)):\n for y in range(len(board)):\n if board[x][y] == 'd':\n dirty_positions.append([x, y])\n \n nearest = []\n for dirty_pos in dirty_positions:\n eucli_dis = math.sqrt(((dirty_pos[0] - posr) ** 2) + ((dirty_pos[1] - posc) ** 2))\n nearest.append(eucli_dis)\n \n min_index = nearest.index(min(nearest))\n nearest_pos = dirty_positions[min_index]\n return nearest_pos\n\ndef next_move(posr, posc, dirty_pos):\n bot_pos = [posr, posc]\n if bot_pos[1] < dirty_pos[1]:\n print(\"RIGHT\")\n elif bot_pos[1] > dirty_pos[1]:\n print(\"LEFT\")\n elif bot_pos[0] < dirty_pos[0]:\n print(\"DOWN\")\n elif bot_pos[0] > dirty_pos[0]:\n print(\"UP\")\n else:\n print(\"CLEAN\")\n \n\n# Tail starts here\n\nif __name__ == \"__main__\":\n pos = [int(i) for i in input().strip().split()]\n board = [[j for j in input().strip()] for i in range(5)]\n dirty_pos = find_nearest_dirty_pos(board, pos[0], pos[1])\n next_move(pos[0], pos[1], dirty_pos)","sub_path":"Artificial Intelligence/Bot Building/Bot Clean.py","file_name":"Bot Clean.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404682618","text":"\"\"\"\nCopyright 2020, All rights reserved.\nAuthor : SangJae Kang\nMail : craftsangjae@gmail.com\n\"\"\"\nimport asyncio\nimport requests\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nimport dateutil\nfrom dateutil.parser import parse as parse_date\nfrom service.query import GETLIMIT_QUERY, GITHUB_GQL\nimport os\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nGITHUB_KEY_PATH = os.path.join(ROOT_DIR, \"credentials/github.txt\")\n\n\nclass GithubKeyGen(object):\n \"\"\"\n 복수개의 Github API Key를 관리하는 Singleton 객체를 생성하는 클래스\n GITHUB_KEY_PATH 에는 line 별로 github API Key가 저장되어 있다.\n github key가 Round-Robin 방식으로 돌면서 할당량을 소모해간다.\n\n **의견**\n Github Key가 수백개로 넘어가게 되면, Max-Heap 방식을 고민할 테지만,\n 지금은 Round-Robin 방식으로도 충분하다고 생각\n\n Issues\n * https://github.com/vienna-project/recohub/issues/2\n\n Usages\n\n # 아래에 선언된 GithubKey을 이용해야 함\n >>> GithubKey = GithubKeyGen()\n\n # API 할당량이 남은 키 가져오기\n >>> (await GithubKey.get_async())\n\n # 키의 할당량 갱신하기\n >>> (await GithubKey.set_async(remain, resetAt))\n\n \"\"\"\n def __init__(self, github_key_path):\n self.key_cache = OrderedDict()\n self.github_key_path = github_key_path\n self.load_keys()\n\n def load_keys(self):\n with open(self.github_key_path, 'r') as f:\n for key in f.readlines():\n key = key.strip()\n try:\n remain, resetAt = self.get_resource_limit(key)\n except ValueError as e:\n print(e)\n continue\n self.key_cache[key] = (remain, resetAt)\n if len(self.key_cache) == 0:\n raise ValueError(f\"{self.github_key_path} have no github key.\")\n\n def __repr__(self):\n key_infos = []\n for key, (remain, resetAt) in self.key_cache.items():\n key_infos.append(f\"{key} - ({remain},{resetAt})\")\n return \"\\n\".join(key_infos)\n\n def __len__(self):\n return sum(remain for remain, _ in self.key_cache.values())\n\n async def get_async(self):\n \"\"\" API 할당량이 남은 키 가져오기\n :return: key\n \"\"\"\n while True:\n min_resetAt = None\n for _ in range(len(self.key_cache)):\n key, (remain, resetAt) = self.key_cache.popitem(last=False)\n self.key_cache[key] = (remain - 1, resetAt)\n min_resetAt = min(min_resetAt, resetAt) if min_resetAt else resetAt\n\n if remain > 0:\n return key\n\n if min_resetAt:\n # key를 반환하지 못한 경우 : 할당량이 바닥남\n # 이 경우 resetAt이 가장 빠르게 도래하는 것을 기준으로 waiting 해주어야 함\n duration = (min_resetAt.replace(tzinfo=None)\n - datetime.utcnow().replace(tzinfo=None)\n + timedelta(seconds=10)).total_seconds()\n if duration > 0:\n await asyncio.sleep(duration)\n\n # resource 갱신\n self.update_resource_limit()\n\n else:\n # key_cache가 없는 상황 (예외 상황)\n break\n\n async def set_async(self, key: str, remain: int, resetAt: datetime=None):\n \"\"\" 키의 할당량, 시간을 저장\n \"\"\"\n if resetAt is None:\n resetAt = datetime.now(tz=dateutil.tz.tzutc()) + timedelta(hours=1)\n\n curr_remain, curr_resetAt = self.key_cache[key]\n # 비동기적으로 갱신하기 때문에 Skew가 발생할 수 있기 때문에\n # 비교를 통해 최소/ 최대값으로 넣어주어야 함\n self.key_cache[key] = (min(remain, curr_remain),\n max(resetAt, curr_resetAt))\n\n def update_resource_limit(self):\n \"\"\" github Key의 resource limit을 조회 후 갱신하는 함수\n \"\"\"\n for key in self.key_cache:\n remain, resetAt = self.get_resource_limit(key)\n self.key_cache[key] = (remain, resetAt)\n\n @staticmethod\n def get_resource_limit(key: str):\n \"\"\" 해당 Key의 resource limit을 가져오는 함수\n\n :param key: githubAPI Key\n :return:\n \"\"\"\n auth = {\"Authorization\": \"bearer \" + key}\n query = {\"query\": GETLIMIT_QUERY}\n\n with requests.post(GITHUB_GQL, headers=auth, json=query) as res:\n if res.status_code == 200:\n query_results = res.json()\n\n remain = query_results['data']['rateLimit']['remaining']\n resetAt = parse_date(query_results['data']['rateLimit']['resetAt'])\n return remain, resetAt\n else:\n raise ValueError(f\"{key} is Bad credentials\")\n","sub_path":"service/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39789653","text":"#!/usr/bin/env python3\n\n# Author: Francesco Regazzoni - MOX, Politecnico di Milano\n# Email: francesco.regazzoni@polimi.it\n# Date: 2020\n\nimport numpy as np\nimport json\nimport time\n\nclass model_RDQ18:\n \"\"\"\n Class implementing the ODE model for sarcomere activation proposed in [1].\n\n References\n ----------\n\n [1] F. Regazzoni, L. Dedè, A. Quarteroni \"Active contraction of cardiac\n cells: a reduced model for sarcomere dynamics with cooperative\n interactions\", Biomechanics and Modeling in Mechanobiology (2018)\n https://doi.org/10.1007/s10237-018-1049-0\n \"\"\"\n\n def __init__(self, params = '../params/params_RDQ18.json'):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n params : dictionary containing the model parameters, or path of a json\n file\n \"\"\"\n\n self.model_name = 'RDQ18'\n\n if isinstance(params, str):\n with open(params) as json_file:\n params = json.load(json_file)\n\n self.LA = float(params['geometry']['LA']) # [micro m]\n self.LM = float(params['geometry']['LM']) # [micro m]\n self.LB = float(params['geometry']['LB']) # [micro m]\n self.n_RU = int(params['geometry']['n_RU']) # [-]\n self.Lsmooth = float(params['geometry']['Lsmooth']) # [micro m]\n self.Q0 = float(params['RU_steady_state']['Q0']) # [-]\n self.SLQ = float(params['RU_steady_state']['SLQ']) # [micro m]\n self.alphaQ = float(params['RU_steady_state']['alphaQ']) # [micro m^-1]\n self.mu = float(params['RU_steady_state']['mu']) # [-]\n self.gamma = float(params['RU_steady_state']['gamma']) # [-]\n self.Kon = float(params['RU_kinetics']['Kon']) # [micro M^-1 * s^-1]\n self.Koff = float(params['RU_kinetics']['Koff']) # [s^-1]\n self.Kbasic = float(params['RU_kinetics']['Kbasic']) # [s^-1]\n self.TaMax = float(params['upscaling']['TaMax']) # [kPa]\n\n # Numerical parameters\n self.dt = 2.5e-5 # [s]\n self.freqenceRatesUpdate = 10 # [-]\n\n # Initialization\n self.x_i = lambda i: (self.LM-self.LB)*.5 * i/self.n_RU\n self.xAZ = lambda SL: (SL-self.LB)/2.\n self.xLA = lambda SL: self.LA -self.xAZ(SL) - self.LB\n self.xRA = lambda SL: self.xAZ(SL) - self.LA\n self.ChiRA = lambda SL,i: (self.x_i(i) <= self.xRA(SL)) * np.exp(-(self.xRA(SL)-self.x_i(i))**2 /self.Lsmooth**2) + \\\n (self.x_i(i) > self.xRA(SL)) * (self.x_i(i) < self.xAZ(SL)) + \\\n (self.x_i(i) >= self.xAZ(SL)) * np.exp(-(self.x_i(i)-self.xAZ(SL))**2 /self.Lsmooth**2)\n self.ChiLA = lambda SL,i: (self.x_i(i) <= self.xLA(SL)) * np.exp(-(self.xLA(SL)-self.x_i(i))**2 /self.Lsmooth**2) + \\\n (self.x_i(i) > self.xLA(SL))\n self.Q = lambda SL: self.Q0 - self.alphaQ*(self.SLQ-SL)*(SL>> Solution().single_number([3, 3, 4, 5, 5])\n \"\"\"\n\n num = 0\n for i in range(len(A)):\n num ^= A[i]\n return num\n","sub_path":"codelab/single_element.py","file_name":"single_element.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124701528","text":"# -*- coding:utf-8 -*-\n\nimport scrapy\nfrom artile_url.items import CbItem\nimport re\n\n'''\nusername :eddie12138\npassword :123456\n\nonly can crawl 100 pages\n'''\nclass cecbid_spider(scrapy.Spider):\n name = 'cecbid'\n allow_domains = ['cecbid.org.cn']\n url = 'http://so.cecbid.org.cn/so.php?t=tender&d=&a=&q='\n curr_page = 1\n kw = ''\n\n def start_requests(self):\n self.kw = getattr(self, 'kw', None)\n if self.kw is None:\n print(\"please enter key word, like -a kw=yourkeyword\")\n return\n self.url += self.kw\n print(self.url)\n yield scrapy.Request(url=self.url, callback=self.parse)\n\n def parse(self, response):\n #get article urls\n contents = response.xpath(\"//a[@class = 'text-success']/@href\")\n for c in contents:\n item = CbItem()\n item['url'] = c.extract()\n print(item)\n yield item\n\n #get next page url\n contain_sum_itemnum = response.xpath(\"((//small[@class = 'text-muted'])[last()-1]/text())[last()]\").extract()\n nums = re.findall(u'\\d+ 个', contain_sum_itemnum[0])\n if len(nums) == 0:\n return\n num = re.findall(('\\d+'), nums[0])\n print(num)\n n = int(num[0]) if len(num) > 0 else 0\n if self.curr_page * 10 < n:\n self.curr_page += 1\n u = self.url + '&p=' + str(self.curr_page)\n print(u)\n yield scrapy.Request(url=u, callback=self.parse)\n else:\n return\n\n\n","sub_path":"src/crawl/artile_url/artile_url/spiders/cecbid.py","file_name":"cecbid.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"118130311","text":"import requests\nfrom bs4 import BeautifulSoup\n\nURL = \"https://www.idoc.state.il.us/subsections/search/ISinms2.asp?idoc=\"\n\ndef getIdocProfile(idoc):\n out = {}\n html = BeautifulSoup(requests.post(URL + str(idoc)).content, 'html.parser')\n if \"Inmate NOT found\" in html.text:\n out[\"found\"] = False\n return out\n else:\n out[\"found\"] = True\n name_tag = html.find('font', attrs={'size':'4'})\n name = [text.replace(' ', '') for text in name_tag.text.split(' - ')[1].split(',')]\n out[\"name\"] = name[0] + \", \" + name[1]\n\n physicalProfTable = None\n admissionInfoTable = None\n sentencingTable = None\n institutionTable = None\n\n tables = html.find_all('table', attrs={'width':'390'})\n for table in tables:\n if 'Date of Birth' in table.text:\n physicalProfTable = table\n if \"Admission Date\" in table.text:\n admissionInfoTable = table\n if \"MITTIMUS\" in table.text:\n sentencingTable = table\n if \"Parent Institution\" in table.text:\n institutionTable = table\n \n\n\n\n for row in physicalProfTable.find_all('tr'):\n vals = row.find_all('td')\n if 'Date of Birth' in vals[0].text:\n out[\"dob\"] = vals[1].text\n if 'Sex' in vals[0].text:\n out[\"sex\"] = vals[1].text\n if 'Race' in vals[0].text:\n out[\"race\"] = vals[1].text.split()[0]\n for row in admissionInfoTable.find_all('tr'):\n vals = row.find_all('td')\n if 'Admission Date' in vals[0].text:\n out[\"admission_date\"] = vals[1].text\n if 'Projected Discharge Date' in vals[0].text:\n out[\"discharge_date\"] = vals[1].text\n if 'Projected Parole Date' in vals[0].text:\n out[\"msr_date\"] = vals[1].text\n for row in institutionTable.find_all('tr'):\n vals = row.find_all('td')\n if 'Parent Institution' in vals[0].text:\n out[\"parent_institution\"] = \" \".join(vals[1].text.split())\n if sentencingTable is None:\n return out\n rows = sentencingTable.find_all('tr')\n i = 0\n sentences = []\n\n while i < len(rows):\n curRow = rows[i]\n if \"MITTIMUS\" in curRow.text:\n sentence = {}\n while \"CLASS\" not in curRow.text:\n i += 1\n curRow = rows[i]\n sentence['class'] = curRow.find_all('td')[1].text\n while \"OFFENSE\" not in curRow.text:\n i += 1\n curRow = rows[i]\n sentence['offense'] = curRow.find_all('td')[1].text\n while \"CUSTODY DATE\" not in curRow.text:\n i += 1\n curRow = rows[i]\n sentence['custody_date'] = curRow.find_all('td')[1].text\n while \"SENTENCE\" not in curRow.text:\n i += 1\n curRow = rows[i]\n sentence['sentence'] = curRow.find_all('td')[1].text\n sentences.append(sentence)\n i += 1\n \n\n longestSent = {}\n length = [-1,-1]\n for sentence in sentences:\n \n tmp = sentence['sentence'].split()\n if len(tmp) == 0:\n pass\n elif 'DEATH' in tmp[0]:\n length = [\"DEATH\", 0]\n longestSent = sentence\n break\n elif 'LIFE' in tmp[0]:\n length = ['LIFE', 0]\n longestSent = sentence\n break\n elif 'SDP' in tmp[0]:\n if length[0] == -1:\n length = ['SDP', 0]\n longestSent = sentence\n \n else:\n years = int(tmp[0])\n months = int(tmp[2])\n if not isinstance(length[0], int) or years > length[0]:\n length = [years, months]\n longestSent = sentence\n elif years == length[0]:\n if months > length[1]:\n length = [years, months]\n longestSent = sentence\n\n out[\"crime_class\"] = \"Class \" + longestSent['class']\n out[\"holding_offense\"] = \" \".join(longestSent['offense'].split())\n out[\"sentence_years\"] = length[0]\n out[\"sentence_months\"] = length[1]\n out[\"custody_date\"] = longestSent['custody_date']\n \n\n #next_table = name_tag.find_parent('table').next_sibling()\n #rows=next_table.find_all('tr')\n\n return out\n","sub_path":"api/idoc.py","file_name":"idoc.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290819183","text":"\"\"\"This module contains a fitness function for sudoku puzzles.\n\nIt is specialized for 9x9 puzzles, but could be extended to 4x4, 16x16, 25x25,\netc, by making DIM a parameter and fixing one_box and boxes.\n\nTo use this module, you want to call either sudoku_fitness or ga_sudoku.\n\nAn example puzzle and solution is provided in PUZZLE and SOLUTION.\n\"\"\"\n\n#The dimension of the puzzle.\nDIM = 9\n\n#A sudoku puzzle, that I found somewhere. I guess it is of average difficulty.\nPUZZLE = [[0, 0, 4, 8, 0, 0, 0, 1, 7],\n [6, 7, 0, 9, 0, 0, 0, 0, 0],\n [5, 0, 8, 0, 3, 0, 0, 0, 4],\n [3, 0, 0, 7, 4, 0, 1, 0, 0],\n [0, 6, 9, 0, 0, 0, 7, 8, 0],\n [0, 0, 1, 0, 6, 9, 0, 0, 5],\n [1, 0, 0, 0, 8, 0, 3, 0, 6],\n [0, 0, 0, 0, 0, 6, 0, 9, 1],\n [2, 4, 0, 0, 0, 1, 5, 0, 0]]\n\n#The correct solution.\nSOLUTION = [[9, 3, 4, 8, 2, 5, 6, 1, 7],\n [6, 7, 2, 9, 1, 4, 8, 5, 3],\n [5, 1, 8, 6, 3, 7, 9, 2, 4],\n [3, 2, 5, 7, 4, 8, 1, 6, 9],\n [4, 6, 9, 1, 5, 3, 7, 8, 2],\n [7, 8, 1, 2, 6, 9, 4, 3, 5],\n [1, 9, 7, 5, 8, 2, 3, 4, 6],\n [8, 5, 3, 4, 7, 6, 2, 9, 1],\n [2, 4, 6, 3, 9, 1, 5, 7, 8]]\n\ndef one_box(solution, i):\n \"\"\"Extract the 9 elements of a 3 x 3 box in a 9 x 9 sudoku solution.\n\n @param solution: The sudoku solution as a flat vector with 81 elements.\n\n @param i: The upper left corner\n\n @return: A vector with 9 elements, representing a sudoku box.\n \"\"\"\n return solution[i:i+3] + solution[i+9:i+12] + solution[i+18:i+21]\n\ndef boxes(solution):\n \"\"\"Divide a flat vector into vectors with 9 elements, representing 3 x 3\n boxes in the corresponding 9 x 9 2D vector. These are the standard\n sudoku boxes.\n\n @param solution: A flat vector, containing a sudoku solution.\n\n @return: A vector of vectors with 9 elements, representing sudoku boxes.\n \"\"\"\n return [one_box(solution, i) for i in [0, 3, 6, 27, 30, 33, 54, 57, 60]]\n\ndef splitup(solution):\n \"\"\"Take a flat vector and make it 2D\n\n @param solution: A flat vector with DIM * DIM elements\n\n @return: A 2D vector with DIM rows with DIM elements each.\n \"\"\"\n return [solution[i * DIM:(i + 1) * DIM] for i in xrange(DIM)]\n\ndef consistent(solution):\n \"\"\"Check how many different elements there are in each row.\n\n Ideally there should be DIM different elements, if there are no duplicates.\n\n @param solution: A 9 x 9 2D vector with a puzzle\n\n @return: The sum of duplicates in each row\n \"\"\"\n return sum(DIM - len(set(row)) for row in solution)\n\ndef compare(xs1, xs2):\n \"\"\"Compare two flat vectors and return how much they differ\n\n @param xs1: The puzzle as a flat vector. Zeroes are not compared.\n\n @param xs2: The solution as a flat vector.\n\n @return: The number of elements that differ and where xs1 is not 0.\n \"\"\"\n return sum(1 if x1 and x1 != x2 else 0 for x1, x2 in zip(xs1, xs2))\n\ndef sudoku_fitness(flatsolution, puzzle, flatpuzzle=None):\n \"\"\"Evaluate the fitness of flatsolution.\n\n @param flatsolution: A flat vector with 81 integer elements between 1 and 9.\n\n @param puzzle: A 9 x 9 2D vector with a sudoku puzzle. Zero means unknown.\n \n @param flatpuzzle: A 1D flattened version of puzzle. If it is not supplied\n it is calculated from puzzle. If you want to run this fitness function\n millions of times, it might be a good idea to precalculate this.\n\n @return: A fitness value. Higher is worse and 0 is perfect.\n \"\"\"\n if not flatpuzzle:\n flatpuzzle = sum(puzzle, [])\n solution = splitup(flatsolution)\n fitness = consistent(solution)\n fitness += consistent(zip(*solution))\n fitness += consistent(boxes(flatsolution))\n fitness += compare(flatpuzzle, flatsolution) * 10\n return fitness\n\ndef ga_sudoku(puzzle):\n \"\"\"Return a fitness function wrapper that extracts the .genes attribute from\n an individual and sends it to sudoku_fitness.\n\n @param puzzle: A 9 x 9 2D vector with a sudoku puzzle. Zero means unknown.\n\n @return: A fitness function that takes a linear genome class and returns a\n fitness value, where higher is worse and 0 is perfect.\n \"\"\"\n flatpuzzle = sum(puzzle, [])\n def fit(guy):\n \"\"\"A GA wrapper for sudoku_fitness\"\"\"\n return sudoku_fitness(guy.genes, puzzle, flatpuzzle)\n\n return fit\n","sub_path":"Taller 1/optopus/examples/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82562370","text":"import turtle\n\ndef draw_square(some_turtle):\n for i in range(1,5):\n some_turtle.forward(100)\n some_turtle.right(90)\ndef draw_art():\n window = turtle.Screen()\n window.bgcolor(\"white\")\n #Creat the turtle Brad - Draws a square\n bard = turtle.Turtle()\n bard.shape(\"turtle\")\n bard.color(\"black\")\n bard.speed(5)\n for i in range(1,37):\n draw_square(bard)\n bard.right(10)\n #Create the turtle Angie - Draws a circle\n window.exitonclick()\ndraw_art()\n","sub_path":"Udcity/draw_square.py","file_name":"draw_square.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2488932","text":"import os\nfrom flask import Flask, render_template, redirect, request, url_for, session\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId \n\napp = Flask(__name__)\napp.secret_key = 'some_secret'\n\napp.config[\"MONGO_DBNAME\"] = 'digital_kitchen'\napp.config[\"MONGO_URI\"] = os.getenv('MONGO_URI', 'mongodb://localhost')\n\nmongo = PyMongo(app)\n\n@app.route('/')\n\n@app.route('/index')\ndef index():\n if 'username' in session:\n return render_template(\"index.html\",\n recipes=mongo.db.recipes.find(),\n cuisines=mongo.db.cuisines.find(),\n categories=mongo.db.categories.find(),\n difficulties=mongo.db.difficulties.find(),\n main_ingredients=mongo.db.main_ingredients.find(),\n allergens=mongo.db.allergens.find(),\n message='Welcome, '+ str(session['username']) + ', to The Digital Kitchen!')\n return render_template(\"index.html\", \n message='Welcome to The Digital Kitchen!',\n recipes=mongo.db.recipes.find(),\n categories=mongo.db.categories.find(),\n difficulties=mongo.db.difficulties.find(),\n main_ingredients=mongo.db.main_ingredients.find(),\n allergens=mongo.db.allergens.find(),\n cuisines=mongo.db.cuisines.find())\n \n\n@app.route('/all_recipes')\ndef all_recipes():\n \"\"\"\n Display all recipes on one page\n \"\"\"\n recipes = mongo.db.recipes.find()\n recipes_total = recipes.count()\n return render_template(\"allrecipes.html\",\n recipes_total=recipes_total,\n recipes=recipes)\n\n@app.route('/recipes_by_category/')\ndef recipes_by_category(category_name):\n \"\"\"\n Get all recipes of a chosen category and display\n these recipes on one page\n \"\"\"\n # Counts total amount of chosen category recipes\n recipes_total = mongo.db.recipes.find({\n \"category_name\": category_name\n }).count()\n return render_template(\n \"recipes_by_category.html\",\n recipes=mongo.db.recipes.find({\"category_name\": category_name}),\n categories=mongo.db.categories.find(),\n category_name=category_name,\n recipes_total=recipes_total)\n\n\n@app.route('/recipes_by_main/')\ndef recipes_by_main(main_ingredient):\n \"\"\"\n Get all recipes of a chosen ingredient and display\n these recipes on one page\n \"\"\"\n # Counts total amount of chosen ingredient recipes\n recipes_total = mongo.db.recipes.find({\n \"main_ingredient\": main_ingredient\n }).count()\n return render_template(\n \"recipes_by_main.html\",\n recipes=mongo.db.recipes.find({\"main_ingredient\": main_ingredient}),\n main_ingredients=mongo.db.main_ingredients.find(),\n main_ingredient=main_ingredient,\n recipes_total=recipes_total)\n\n@app.route('/signin', methods=['GET', 'POST'])\ndef signin():\n \"\"\"\n Direct user to signin page where they can enter their username\n and password and manage their recipes\n \"\"\"\n if 'username' in session:\n return render_template('index.html',\n message=\"You are already signed in!\")\n \n if request.method == 'POST':\n users = mongo.db.users\n user_signin = users.find_one({'username': request.form['username']})\n\n if user_signin:\n if request.form['password'] == user_signin['password']:\n session['username'] = request.form['username']\n return redirect(url_for('index'))\n return render_template('signin.html', \n message='Invalid username or password')\n return render_template('signin.html', message='')\n \n \n@app.route('/signout')\ndef signout():\n \"\"\"\n Sign user out of the session\n \"\"\"\n if 'username' in session:\n session.pop('username')\n return render_template('message.html',\n message='Signed out. See you later!')\n return render_template('message.html',\n message='You have already signed out!')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"\n Register new user into the database\n \"\"\"\n if 'username' in session:\n return render_template('register.html', \n message='You are already signed in and registered')\n if request.method == 'POST':\n users = mongo.db.users\n existing_user = users.find_one({'username' : request.form['username']})\n if request.form['username'] and request.form['password']:\n # Check for existing user to avoid re-registering the same user\n if existing_user is None:\n password = request.form['password']\n users.insert({'username': request.form['username'],\n 'password': password})\n session['username'] = request.form['username']\n return redirect(url_for('index'))\n return render_template('register.html',\n message='Username ' + str(existing_user['username']) + ' already exists')\n return render_template('register.html',\n message='Enter a username and password')\n return render_template('register.html', message='')\n\n@app.route('/get_recipe/')\ndef get_recipe(recipe_id):\n \"\"\"\n Get recipe and display it on getrecipe.html\n \"\"\"\n the_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n # Splits ingredients input into a list \n ingredient_split = the_recipe['recipe_ingredients'].split('\\n')\n for ingredient in ingredient_split:\n print(ingredient)\n # Splits methods input into a list \n method_split = the_recipe['recipe_method'].split('\\n')\n for method in method_split:\n print(method)\n return render_template(\"getrecipe.html\",\n recipe=the_recipe,\n ingredient_split=ingredient_split,\n method_split=method_split)\n\n@app.route('/recipes_by_cuisine/')\ndef recipes_by_cuisine(cuisine_name):\n \"\"\"\n Get all recipes of a chosen cuisine and display\n these recipes on one page\n \"\"\"\n # Counts total amount of chosen cuisine recipes\n recipes_total = mongo.db.recipes.find({\n \"cuisine_name\": cuisine_name\n }).count()\n return render_template(\n \"recipes_by_cuisine.html\",\n recipes=mongo.db.recipes.find({\"cuisine_name\": cuisine_name}),\n cuisines=mongo.db.cuisines.find(),\n cuisine_name=cuisine_name,\n recipes_total=recipes_total)\n \n@app.route('//add_recipe', methods=['GET','POST'])\ndef add_recipe(username):\n \"\"\"\n If user is signed in, allow user to create and insert\n new recipe into the database\n \"\"\"\n if 'username' in session:\n username = session['username']\n if request.method == 'POST':\n recipe = mongo.db.recipes\n recipe.insert({\n 'recipe_name' : request.form['recipe_name'],\n 'recipe_description' : request.form['recipe_description'],\n 'cuisine_name' : request.form.get('cuisine_name'),\n 'recipe_ingredients' : request.form.get('recipe_ingredients'),\n 'recipe_method' : request.form.get('recipe_method'),\n 'cook_time': request.form.get('cook_time'),\n 'image_url': request.form.get('image_url'),\n 'author' : session['username'],\n 'category_name': request.form.getlist('category_name'),\n 'main_ingredient': request.form.get('main_ingredient')\n })\n # Check for existing ingredient to avoid \n # re-entering the same ingredient into database\n # Adds new main ingredient into database if does not exist\n main_ingredient = mongo.db.main_ingredients\n existing_ingredient = main_ingredient.find_one({\n 'main_ingredient' : request.form['main_ingredient']\n })\n if request.form['main_ingredient']:\n if existing_ingredient is None:\n main_ingredient.insert({\n 'main_ingredient': request.form.get('main_ingredient')\n })\n return redirect(url_for('index'))\n return render_template('addrecipe.html', \n cuisines=mongo.db.cuisines.find(),\n difficulties=mongo.db.difficulties.find(),\n categories=mongo.db.categories.find(),\n main_ingredients=mongo.db.main_ingredients.find(),\n allergens=mongo.db.allergens.find(),\n username=session['username']) \n return render_template('signin.html')\n\n\n@app.route('//edit_recipe/', methods=[\"GET\",'POST'])\ndef edit_recipe(username, recipe_id):\n \"\"\"\n Direct user to editrecipe.html and update chosen recipe\n once user presses 'update recipe' button\n \"\"\"\n the_recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n cuisines = mongo.db.cuisines.find()\n difficulties = mongo.db.difficulties.find()\n if 'username' in session:\n return render_template('editrecipe.html',\n recipe=the_recipe,\n cuisines=cuisines,\n difficulties=difficulties,\n categories=mongo.db.categories.find(),\n main_ingredients=mongo.db.main_ingredients.find(),\n allergens=mongo.db.allergens.find(),\n username=session['username'])\n return render_template('signin.html',\n message='Please sign in or register to edit a recipe!')\n\n\n@app.route('/update_recipe/', methods=['POST'])\ndef update_recipe(recipe_id):\n \"\"\"\n Update the recipe in the database and direct user\n back to the recipe\n \"\"\"\n if 'username' in session:\n recipes = mongo.db.recipes\n recipes.update(\n {'_id': ObjectId(recipe_id)},\n {\n 'recipe_name': request.form.get('recipe_name'),\n 'cuisine_name': request.form.get('cuisine_name'),\n 'recipe_ingredients': request.form.get('recipe_ingredients'),\n 'recipe_method': request.form.get('recipe_method'),\n 'cook_time': request.form.get('cook_time'),\n 'recipe_description': request.form.get('recipe_description'),\n 'image_url': request.form.get('image_url'),\n 'author': session['username'],\n 'category_name': request.form.getlist('category_name'),\n 'main_ingredient': request.form.get('main_ingredient')\n })\n return redirect(url_for('get_recipe', recipe_id=recipe_id,\n username=session['username']))\n\n\n@app.route('//delete_recipe/')\ndef delete_recipe(username, recipe_id):\n \"\"\"\n Delete recipe from database\n \"\"\"\n if 'username' in session:\n mongo.db.recipes.remove({'_id': ObjectId(recipe_id)})\n return redirect(url_for('my_recipes', username=session['username']))\n return render_template('signin.html',\n message='Please sign in or register to delete a recipe!')\n\n\n@app.route('/my_recipes/')\ndef my_recipes(username):\n \"\"\"\n Display all recipes created by user in session\n \"\"\"\n if 'username' in session:\n user = mongo.db.users.find_one({\"username\": username})\n user_recipes = mongo.db.recipes.find({\"author\": session['username']})\n recipes_total = user_recipes.count()\n return render_template(\"myrecipes.html\",\n recipes_total=recipes_total,\n user=user,\n user_recipes=user_recipes,\n message=\"Your Recipes\",)\n else:\n return redirect(url_for('index',\n message=\"You do not have any recipes!\"))\n \n@app.route('/get_recipes')\ndef get_recipes():\n return render_template(\"recipes.html\", recipes=mongo.db.recipes.find())\n\nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'),\n port=int(os.environ.get('PORT')),\n debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162555593","text":"import sys\r\nROOTP = sys.path[-1].replace('.ipython','').replace('//','/')\r\nsys.path.insert(0, ROOTP + 'Documents/Synced/_Promotion/scripts/helperfunctions')\r\nimport filehandling\r\nimport plotting\r\nimport dataconversions\r\n\r\nimport os \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nBASEP = ROOTP + 'Documents/Synced/_Promotion/Projects/Leo/'\r\n\r\n#%%\r\npath_data = BASEP + 'data/F15/'\r\npath_olddata = BASEP + 'data/F15/_old (Alireza)/MAX_C03_tumor-masked.tif'\r\nregion = filehandling.pload(path_data + 'region')\r\nsegmentation = filehandling.pload(path_data + 'segmentation')\r\nprojections = filehandling.pload(path_data + 'projections')\r\n\r\n\r\n(volume, FijiMeta) = filehandling.readFijiTIFF(path_olddata)\r\no = region['partitioning']['cropping_offset']\r\nb = region['partitioning']['cropping_offset'] + np.asarray(region['partitioning']['cropping_boundingbox'])\r\nz_old = volume[o[0]:b[0],o[1]:b[1],:,:].squeeze().astype(np.bool)\r\n\r\nplotting.maskcomparison(projections['z'].astype(np.uint8), z_old.astype(np.uint8), ahandle=None)\r\n\r\nvols = np.zeros(len(segmentation['metastases']))\r\nfor i, m in enumerate(segmentation['metastases']):\r\n vols[i] = m['volume']\r\nplt.figure()\r\nplt.hist(vols,300)\r\n\r\n","sub_path":"analyses/Compare_GT_vs_OldSegmentation.py","file_name":"Compare_GT_vs_OldSegmentation.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485260725","text":"# -*- coding: utf-8 -*-\n# Grastyele\n# Date: 2013-08-24\n\nimport re\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom FileModel import FileModel\nfrom NavPanel import NavPanel\nfrom common import *\n\nclass NavController():\n\tdef __init__(self, parent):\n\t\tself.widget = NavPanel(self, parent.getWidget())\n\t\tself.parent = parent\n\t\tself.getNoteList()\n\n\tdef getWidget(self):\n\t\treturn self.widget\n\n\tdef getNoteList(self, pattern = \"\"):\n\t\tfiles = FileModel.getFileList()\n\t\tself.widget.clear()\n\t\tgroups = list()\n\t\tfor f in files:\n\t\t\tif FileModel.isDir(f):\n\t\t\t\tnewGroup = QTreeWidgetItem(QStringList(f))\n\t\t\t\tif f == defaultGroup:\n\t\t\t\t\tgroups.insert(0, newGroup)\n\t\t\t\telse:\n\t\t\t\t\tgroups.append(newGroup)\n\t\t\t\tsubFiles = FileModel.getFileList(f)\n\t\t\t\tfor ff in subFiles:\n\t\t\t\t\tif re.search(pattern, ff):\n\t\t\t\t\t\tnewGroup.addChild(QTreeWidgetItem(QStringList(ff)))\n\t\tself.widget.addTopLevelItems(groups)\n\t\tfor g in groups: g.setExpanded(True)\n\n\tdef createNote(self, group, noteName):\n\t\tgroupid = 0\n\t\tfor i in xrange(1, self.widget.getTopLevelItemCount()):\n\t\t\tif self.widget.getTopLevelItemText(i) == group:\n\t\t\t\tgroupid = i\n\t\t\t\tbreak\n\t\tnewNote = QTreeWidgetItem([noteName])\n\t\tself.widget.getTopLevelItem(groupid).addChild(newNote)\n\t\treturn newNote\n\n\tdef setSelected(self, note):\n\t\tself.widget.setSelected(note)\n\n\tdef removeGroup(self, group):\n\t\tself.widget.removeItem(self.widget.getIndexOfTopLevelItem(group))\n\n\tdef createGroup(self, groupName):\n\t\tnewGroup = QTreeWidgetItem([groupName])\n\t\tself.widget.addTopLevelItem(newGroup)\n","sub_path":"controller/NavController.py","file_name":"NavController.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585468916","text":"from sources.functions import *\nfrom collections import *\ntext = {}\nnon_ascii_chars = set()\nwith open(\"SB Bible.txt\", 'r', encoding='cp1252') as f:\n lines = list(f)\n for i, line in enumerate(lines):\n lines[i] = lines[i].replace(\"@\", \"\")\n lines[i] = lines[i].replace(\" \", \"\\t\").replace(\"\\t\\t\", \"\\t\")\n if len(lines[i].strip().split()) == 1:\n book = lines[i].split()[0].strip().title()\n chapter = 1\n text[book] = defaultdict(dict)\n text[book][chapter] = defaultdict(dict)\n else:\n sc_texts = re.findall(\"(.*?)\", lines[i])\n for sc_text in sc_texts:\n lines[i] = lines[i].replace(f\"{sc_text}\", f\"{sc_text.upper()}\")\n if lines[i].split()[0].isdigit():\n verse, comm = lines[i].split(\"\\t\", 1)\n if verse.strip() == \"1\":\n chapter += 1\n text[book][chapter] = defaultdict(dict)\n elif \":\" in lines[i].split()[0]:\n verse = lines[i].split()[0]\n comm = lines[i].replace(verse, \"\").strip()\n poss_chapter, poss_verse = verse.split(\":\")\n verse = int(poss_verse)\n poss_chapter = int(poss_chapter)\n assert poss_chapter - chapter in [0, 1]\n chapter = poss_chapter\n else:\n comm = lines[i]\n comm = comm.replace(\"\\t\", \" \").replace(\"
\", \"
\").replace(\" \", \" \").strip()\n\n\n text[book][chapter][verse] = comm\n\n\n with open(\"results.csv\", 'w') as f:\n writer = csv.writer(f)\n for book in text:\n chapter_diff = len(text[book].values()) - len(library.get_index(book).all_section_refs())\n if chapter_diff != 0:\n print(\"{} has chapter diff of {}\".format(book, chapter_diff))\n for ch in text[book]:\n pasuk_diff = len(text[book][ch].values()) - len(Ref(\"{} {}\".format(book, ch)).all_segment_refs())\n if pasuk_diff != 0:\n print(\"{} {} has pasuk diff of {}\".format(book, ch, pasuk_diff))\n for p, pasuk in text[book][ch].items():\n writer.writerow([\"{} {} {}\".format(book, ch, p), pasuk])\nprint(non_ascii_chars)","sub_path":"sources/Everett Fox/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606070645","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='gift',\n name='image',\n field=models.ImageField(null=True, upload_to=b'gifts/', blank=True),\n ),\n migrations.AlterField(\n model_name='gift',\n name='value',\n field=models.PositiveSmallIntegerField(default=1),\n ),\n ]\n","sub_path":"apps/shop/migrations/0002_auto_20151122_0947.py","file_name":"0002_auto_20151122_0947.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533432329","text":"#!/usr/bin/python\n#encoding:utf-8\n\"\"\" Main App class definition module\"\"\"\n# system imports\nimport os\n\nimport logging\n\n#system libs\n\nfrom gi.repository import Gtk #pylint: disable = E0611\n\n#internal imports\n\nfrom app.controller import ControllerModule, PlayEventManager\n\nimport ui.subtitles, ui.ui_utils, ui.videotorrent_list_control\nfrom serie.fs_store import FsSeriesStore, FsManagedSeriesData\nfrom serie.serie_manager import SeriesStore, SeriesData\n\nfrom datasource.play_subdl import Subdownloader, TVsubtitlesSubdownloader\\\n\nfrom snakeguice.modules import Module\n\nfrom app.config import Config\nfrom app.service import PipeService, async_start\n\nfrom twisted.internet import defer\n\nclass ControllerFactory(object):\n\t\"\"\" Factory creating a standard controller\"\"\"\n\tdef create(self, app, series, injector):\n\t\t\"\"\" factory method\"\"\"\n\t\treturn PlayEventManager(app, series, injector)\nclass VideoFinderService(object):\n\tpass\n\nclass AppModule(Module):\n\t\"\"\" snake guice application module configurator\"\"\"\n\n\tdef __init__(self, debug = None):\n\t\tself.debug = None\n\n\tdef configure(self, binder):\n\t\t\"\"\" binding definition \"\"\"\n\t\t\n\t\tbinder.bind(Subdownloader, to=TVsubtitlesSubdownloader)\n\t\tself.install(binder, ControllerModule())\n\t\tbinder.bind(ControllerFactory, to=ControllerFactory)\n\n\t\tstore = FsSeriesStore()\n\t\tconfig = Config()\n\t\t\n\t\tbinder.bind(SeriesStore, to_instance = store)\n\t\tbinder.bind(Config, to_instance = config)\n\t\t\n\t\tbinder.bind(SeriesData, to = FsManagedSeriesData)\n\t\tbinder.bind(VideoFinderService, \n\t \t\tto_instance = PipeService([\"video_finder_server.py\", \"-D\", \"debug\"]))\n\nclass App(object):\n\t\"\"\"Class for main Manager app\"\"\"\n\tvideo_finder_key = \"TorrentDl\"\n\t\n\tdef async_start_service(self, service, key):\n\t\t\"\"\" async start a service \"\"\"\n\t\tasync_start(service, self.services, key)\n\n\tdef start_services(self):\n\t\t\"\"\"Add services used by app\n\t\tTODO: complete service\n\t\t\"\"\"\n\t\t# beginning of a Code Goldberg Machine\n\t\t# Keep it overly complex\n\t\tself.async_start_service(self.injector.get_instance(VideoFinderService),\n\t\t\t\t\t\tself.video_finder_key)\n\n\tdef _get_service(self, key):\n\t\treturn self.services[key]\n\n\tdef get_service(self, key):\n\t\t\"\"\" Returns a MaybeDeferred which waits for service for starting \n\t\t(if not started) to trigger its callback\n\t\t\"\"\"\n\t\treturn defer.maybeDeferred(self._get_service, key)\n\n\n\tdef stop_app(self, widg):\n\t\t\"\"\" Stop App \n\t\tTODO : see if must move from event_mgr.end()\"\"\"\n\t\tself.stop()\n\t\tself.event_mgr.end(widg)\n\t\n\tdef stop(self):\n\t\t\"\"\" Stop services \"\"\"\n\t\tfor serv in self.services.iterkeys():\n\t\t\tself.get_service(serv).addCallback(lambda serv: serv.stop())\n\n\n\tdef __init__(self, injector):\n\t\tself.injector = injector\n\n\t\tstore = injector.get_instance(SeriesStore) \n\t\tconfig = injector.get_instance(Config)\n\t\tcontroller_factory = injector.get_instance(ControllerFactory)\n\n\t\t# Loading the main UI file\n\t\tgladefile = os.path.join(ui.ui_file)\n\t\tbuilder = Gtk.Builder()\n\t\tbuilder.add_from_file(gladefile)\n\n\t\tself.widg_tree = builder \n\t\n\t\tself.services = {}\n\n\t\tself.start_services()\n\n\t\t# Model initialization\n\t\n\t\tself.store = store\n\t\tself.config = config\n\n\t\t#TODO: wtf ?\n\t\t# bash_factory = BashManagedSerieFactory(self.store)\n\t\tserie_list = self.store.get_serie_list()\n\n\t\tlogging.info(\"creating serie manager\")\n\t\tself.series = injector.get_instance(SeriesData) # bash_factory.create_serie_manager()\n\t\tlogging.info(\"created serie manager\")\n\t\n\t\t# View initialization : serie list combo\n\n\t\tserie_list.insert(0, self.series.current_serie.name)\n\t\t\n\t\tself.event_mgr = controller_factory.create(self, self.series, injector)\n\t\tui.ui_utils.populate_combo_with_items(self.getitem(\"SerieListCombo\"), \\\n\t\t\t\tserie_list)\n\t\t\n\t\t# Control : data getter for serie initialization\n\t\t\n\t\tself.event_mgr.set_manager(self.store)\n\t\t\t\n\t\t# View : initial screen setup \n\t\t\n\n\t\t# control : monitoring current season \n\t\t# TODO: move to serie change control init\n\t\tself.event_mgr.put_monitor_on_saison()\n\t\n\t\t\n\t\t# Control initialization setting up callbacks \n\t\t# on view alteration by user events\n\n\t\tdic = { \"on_Play_clicked\" : self.event_mgr.play,\n\t\t\t\"on_SlaveMplayerPlay_clicked\" : \\\n\t\t\t\t\tself.event_mgr.play_windowed,\n\t\t\t\"on_SerieListCombo_changed\" : \\\n\t\t\t\t\tself.event_mgr.selected_serie_changed,\n\t\t\t\"on_MainWindow_destroy\" : self.stop_app, \\\n\n\t\t\t\"on_numSaisonSpin_value_changed\" : \\\n\t\t\t\t\tself.event_mgr.update_season_number,\n\t\t\t\"on_numEpSpin_value_changed\" : self.event_mgr.update_episode_number,\n\t\t\t\"on_skipTimeSpin_value_changed\" : \\\n\t\t\t\t\tself.event_mgr.update_skip_time,\n\t\t\t\"on_decayTimeSpin_value_changed\" : \\\n\t\t\t\t\tself.event_mgr.update_decay_time,\n\t\t\t# \"on_FPSComboBox_changed\": self.event_mgr.update_fps,\n\t\t\t\"on_CandidateSubsCombo_changed\": \\\n\t\t\t\t\tself.event_mgr.update_subtitle_file,\n\t\t\t\"video_keypress\": self.event_mgr.video_keypress,\n\t\t\t\"on_SubtitlesTreeView_row_activated\": \\\n\t\t\t\t\tself.event_mgr.subtitle_seek ,\n\t\t\t\"on_Synchro_Button_pressed\": \\\n\t\t\t\t\tself.event_mgr.subtitle_sync , \n\t\t\t\"on_SubtitlesTreeView_start_interactive_search\" : \\\n\t\t\t\t\tui.subtitles.started_search,\n\t\t\t\"on_OpenRep_clicked\": \\\n\t\t\t\t\tself.event_mgr.open_filemanager,\n\t\t\t\"on_DlSub_clicked\": \\\n\t\t\t\t\tself.event_mgr.search_subtitles\n\t\t\t\n\t\t}\n\n\t\tself.window = self.widg_tree.get_object(\"MainWindow\")\n\t\tself.widg_tree.connect_signals(dic)\n\t\t\n\t\t# Control : \n\t\t# callback for quitting application on main window destroying installation\n\t\t\n\t\tif (self.window):\n\t\t\tself.window.connect(\"destroy\", self.event_mgr.end )\n\t\telse:\n\t\t\tlogging.info (\"Pas trouvé\")\n\n\tdef getitem(self, key):\n\t\t\"\"\" Utility function, get a widget from is string ID \"\"\"\n\t\treturn self.widg_tree.get_object(key)\n\n\tseason_number_spin_name = \"numSaisonSpin\"\n\tepisode_number_spin_name = \"numEpSpin\"\n\t\n\tdef selected_season(self):\n\t\t\"\"\" Getter : selected season number\"\"\"\n\t\treturn self.getitem(self.season_number_spin_name).get_value()\n\n\tdef selected_numep(self):\n\t\t\"\"\" Getter : selected episode number\"\"\"\n\t\treturn self.getitem(self.episode_number_spin_name).get_value()\n\n","sub_path":"app/main_app.py","file_name":"main_app.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556094041","text":"\"\"\"\nTest supervdsm\nMake sure to run this test with \"remove_packages\" plugin to test the\ninstallation of supervdsm when adding the host to the Data Center\nhttps://polarion.engineering.redhat.com/polarion/#/project/RHEVM3/wiki/\nStorage/3_3_Storage_SuperVdsm\n\"\"\"\nimport logging\nimport time\nimport pytest\nfrom art.unittest_lib import (\n tier2,\n tier4,\n storages,\n)\nimport config\nfrom art.unittest_lib import StorageTest as TestCase\nfrom rhevmtests.storage.fixtures import (\n init_host_resource, init_host_or_engine_executor\n)\nfrom art.rhevm_api.tests_lib.low_level import (\n hosts as ll_hosts\n)\nfrom fixtures import (\n check_host_up,\n restore_supervdsm_files\n)\nfrom art.test_handler.tools import polarion\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.mark.usefixtures(\n check_host_up.__name__,\n init_host_resource.__name__,\n init_host_or_engine_executor.__name__\n)\nclass BaseTestCase(TestCase):\n \"\"\"\n Base test case with common setup and teardown\n \"\"\"\n\n\n@storages((config.NOT_APPLICABLE,))\nclass TestCase6269(BaseTestCase):\n \"\"\"\n supervdsm test case, sanity\n https://polarion.engineering.redhat.com/polarion/#/project/RHEVM3/wiki/\n Storage/3_3_Storage_SuperVdsm\n \"\"\"\n __test__ = True\n\n @polarion(\"RHEVM3-6269\")\n @tier2\n @storages(('iscsi',))\n def test_supervdsm_sanity(self):\n \"\"\"\n Test basic functionality is running after host is installed\n \"\"\"\n logger.info(\n \"Make sure services are running after host is added to rhevm in \"\n \"the setup Class\"\n )\n assert self.host_resource.service(name=config.VDSMD).status(), (\n \"VDSM is not running\"\n )\n assert self.host_resource.service(\n name=config.SUPERVDSMD\n ).status(), \"superVDSM is not running\"\n\n rc, out, err = self.executor.run_cmd(config.HW_INFO_COMMAND)\n assert not rc, config.ERROR_HW_OUTPUT % err\n\n logger.info(\"Make sure log files exists\")\n assert self.host_resource.fs.exists(path=config.SUPERVDSM_LOG), (\n config.FILE_DOES_NOT_EXIST % config.SUPERVDSM_LOG\n )\n assert self.host_resource.fs.exists(path=config.VDSM_LOG), (\n config.FILE_DOES_NOT_EXIST % config.VDSM_LOG\n )\n\n\nclass TestCase6270(BaseTestCase):\n \"\"\"\n supervdsm test case, command options\n https://polarion.engineering.redhat.com/polarion/#/project/RHEVM3/wiki/\n Storage/3_3_Storage_SuperVdsm\n \"\"\"\n __test__ = True\n\n @polarion(\"RHEVM3-6270\")\n @tier4\n def test_command_options_test(self):\n \"\"\"\n Test command options\n \"\"\"\n def run_system_init_supervdsmd(cmd):\n command = [config.SERVICE_CMD, cmd, config.SUPERVDSMD]\n try:\n rc, out, err = self.executor.run_cmd(command)\n if rc:\n logger.error(\n \"Executed %s, output: %s, error: %s\" % (\n command, out, err\n )\n )\n return False\n return True\n except IndexError:\n self.fail(\"Couldn't find supervdsm PID\")\n\n logger.info(\"Stopping supervdsm\")\n assert self.host_resource.service(config.SUPERVDSMD).stop(), (\n config.ERROR_EXEC_SERVICE_ACTION % (\"stop\", \"supervdsm\")\n )\n time.sleep(config.SLEEP_SERVICE)\n logger.info(\"Starting supervdsm\")\n assert self.host_resource.service(config.SUPERVDSMD).start(), (\n config.ERROR_EXEC_SERVICE_ACTION % (\"start\", \"supervdsm\")\n )\n time.sleep(config.SLEEP_SERVICE)\n # for supporting rhel versions that stopping supervdsm stopps vdsm\n # (rhel7 and up)\n logger.info(\"Starting vdsmd\")\n self.host_resource.service(config.VDSMD).start()\n time.sleep(config.SLEEP_SERVICE)\n restart_commands = [\n 'restart', 'condrestart', 'force-reload', 'try-restart'\n ]\n for command in restart_commands:\n logger.info(\"Restarting supervdsm\")\n assert run_system_init_supervdsmd(\n command\n ), config.ERROR_EXEC_SERVICE_ACTION % (command, \"supervdsmd\")\n\n\nclass TestCase6271(BaseTestCase):\n \"\"\"\n supervdsm test case, communication between supervdsm and vdsm\n https://polarion.engineering.redhat.com/polarion/#/project/RHEVM3/wiki/\n Storage/3_3_Storage_SuperVdsm\n \"\"\"\n __test__ = True\n\n @polarion(\"RHEVM3-6271\")\n @tier4\n def test_communication(self):\n \"\"\"\n Test that both services work when one is stopped\n \"\"\"\n logger.info(\"Stopping vdsmd\")\n assert self.host_resource.service(config.VDSMD).stop(), (\n \"vdsm didn't stop\"\n )\n time.sleep(config.SLEEP_SERVICE)\n assert self.host_resource.service(config.SUPERVDSMD).status(), (\n config.ERROR_SERVICE_NOT_UP % (\n config.SUPERVDSMD\n )\n )\n logger.info(\"Starting supervdsmd\")\n self.host_resource.service(config.SUPERVDSMD).start()\n logger.info(\"Starting vdsmd\")\n assert self.host_resource.service(config.VDSMD).start(), (\n \"vdsm didn't start\"\n )\n # After restart vdsm wait for host to be up\n assert ll_hosts.wait_for_hosts_states(\n True, config.HOSTS[0], states=config.HOST_UP, timeout=60\n ), \"Host never activated after vdsm restarted.\"\n time.sleep(config.SLEEP_SERVICE)\n rc, out, err = self.executor.run_cmd(config.HW_INFO_COMMAND)\n assert not rc, config.ERROR_HW_OUTPUT % err\n\n logger.info(\"Stopping supervdsmd\")\n assert self.host_resource.service(config.SUPERVDSMD).stop(), (\n \"Supervdsm didn't stop\"\n )\n time.sleep(config.SLEEP_SERVICE)\n logger.info(\"Starting supervdsmd\")\n assert self.host_resource.service(config.SUPERVDSMD).start(), (\n \"Supervdsm didn't start\"\n )\n # for supporting rhel versions that stopping supervdsm stopps vdsm\n # (rhel7 and up)\n logger.info(\"Starting vdsmd\")\n self.host_resource.service(config.VDSMD).start()\n # After restart vdsm wait for host to be up\n assert ll_hosts.wait_for_hosts_states(\n True, config.HOSTS[0], states=config.HOST_UP, timeout=60\n ), \"Host never activated after vdsm restarted.\"\n time.sleep(config.SLEEP_SERVICE)\n rc, out, err = self.executor.run_cmd(config.HW_INFO_COMMAND)\n assert not rc, config.ERROR_HW_OUTPUT % err\n\n\nclass TestCase6272(BaseTestCase):\n \"\"\"\n supervdsm test case, supervdsm stress test\n https://polarion.engineering.redhat.com/polarion/#/project/RHEVM3/wiki/\n Storage/3_3_Storage_SuperVdsm\n \"\"\"\n __test__ = True\n\n @polarion(\"RHEVM3-6272\")\n @tier4\n def test_supervdsm_stress_test(self):\n \"\"\"\n supervdsm stress tests\n \"\"\"\n N = 100\n # is much faster run it with one ssh session\n cmd = (\n \"for i in `seq 0 %(iter)d`; do vdsm-client Host getHardwareInfo\"\n \" >& /dev/null; if [ $? -ne 0 ]; then exit -1; fi; done;\" %\n {'iter': N}\n )\n\n logger.info(\"Executing vdsClient get HW Info for %d times\" % N)\n # ~ 2 sec per execution for vdsm-client , vds-client was 0.3 sec\n rc, out, err = self.executor.run_cmd(cmd.split(' '), io_timeout=N * 10)\n assert not rc, (\n \"Couldn't execute %(iter)d times the command. %(output)s:\" %\n {\n \"iter\": N,\n \"output\": out,\n }\n )\n\n\n@pytest.mark.usefixtures(\n restore_supervdsm_files.__name__,\n)\nclass TestCase6273(BaseTestCase):\n \"\"\"\n deleting supervdsm log and changing log file permissions\n https://polarion.engineering.redhat.com/polarion/#/project/RHEVM3/wiki/\n Storage/3_3_Storage_SuperVdsm\n \"\"\"\n __test__ = True\n\n @polarion(\"RHEVM3-6273\")\n @tier4\n def test_change_supervdsm_log(self):\n \"\"\"\n change permissions and delete supervdsm log\n \"\"\"\n logger.info(\"Removing supervdsm log file to test recovery\")\n assert self.host_resource.fs.remove(\n config.SUPERVDSM_LOG\n ), \"Error removing %s file\" % config.SUPERVDSM_LOG\n assert self.host_resource.service(config.SUPERVDSMD).status(), (\n config.ERROR_SERVICE_NOT_UP % config.SUPERVDSMD\n )\n rc, out, err = self.executor.run_cmd(config.HW_INFO_COMMAND)\n assert not rc, (\n \"Supervdsm didn't recover from removing log file, out=%s\" % out\n )\n assert self.host_resource.fs.exists(\n config.SUPERVDSM_LOG\n ), \"%s should be created\" % config.SUPERVDSM_LOG\n\n logger.info(\"Changing supervdsm log file permissions to test recovery\")\n rc, out, err = self.executor.run_cmd(\n [\"chmod\", \"0000\", config.SUPERVDSM_LOG]\n )\n assert not rc, \"Error changing %s permissions %s\" % (\n config.SUPERVDSM_LOG, out\n )\n assert self.host_resource.service(config.SUPERVDSMD).status(), (\n config.ERROR_SERVICE_NOT_UP % config.SUPERVDSMD\n )\n rc, out, err = self.executor.run_cmd(config.HW_INFO_COMMAND)\n assert not rc, (\n \"Supervdsm didn't recover from changing log file's permissions\"\n )\n","sub_path":"art/tests/rhevmtests/storage/storage_supervdsm/test_supervdsm.py","file_name":"test_supervdsm.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456729541","text":"import urllib, os, xbmc, xbmcgui\nimport base64\naddon_id = 'plugin.video.all.iptv.evolucion'\ndata_folder = 'special://home/addons/plugin.video.all.iptv.evolucion/'\nUrl = base64.b64decode('aHR0cDovL3BsYXlpcHR2LndlYnNpdGUvZG93bmxvYWQvbGlzdGExYS8=')\nFile = ['source_file']\n\ndef download(url, dest, dp = None):\n if not dp:\n dp = xbmcgui.DialogProgress()\n dp.create(\"ACTIVANDO ALL IPTV EVOLUCION\",\"Introduciendo Codigo\",' ', ' ')\n dp.update(0)\n urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: _pbhook(nb,bs,fs,url,dp))\n \ndef _pbhook(numblocks, blocksize, filesize, url, dp):\n try:\n percent = min((numblocks*blocksize*100)/filesize, 100)\n dp.update(percent)\n except:\n percent = 100\n dp.update(percent)\n if dp.iscanceled(): \n raise Exception(\"Cancelar\")\n dp.close()\n\nfor file in File:\n\turl = Url + file\n\tfix = xbmc.translatePath(os.path.join( data_folder, file))\n\tdownload(url, fix)\n\t\n\n\t\t\n\t\n#import xbmcaddon, util\t\n#addon = xbmcaddon.Addon('plugin.playoniptv.activador')\t\n\t\n#util.playMedia(addon.getAddonInfo('name'), addon.getAddonInfo('icon'), \n #'special://home/addons/plugin.playoniptv.activador/intro.mp4')\t\n\t\n\t\n","sub_path":"Plugins/plugin.all iptv.activador/resources/lib/activador.py","file_name":"activador.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489062212","text":"#code to retrive firstname of students with marks in maths above 90\n\nlist = [\n {\n 'id' : 1,\n 'firstname': 'Ravi',\n 'marks' : { 'maths' : 90,\n 'physics' : 70\n }\n },\n {\n 'id' : 2,\n 'firstname': 'Ajay',\n 'marks' : { 'maths' : 70,\n 'physics' : 70\n }\n },\n {\n 'id' : 3,\n 'firstname': 'Ravi',\n 'marks' : { 'maths' : 93,\n 'physics' : 70\n }\n },\n {\n 'id' : 4,\n 'firstname': 'Vijay',\n 'marks' : { 'maths' : 70,\n 'physics' : 70\n }\n },\n {\n 'id' : 5,\n 'firstname': 'Jai',\n 'marks' : { 'maths' : 95,\n 'physics' : 70\n }\n }\n]\nlist2 = []\n\nfor item in list:\n if item['marks']['maths'] >=90:\n name = item['firstname']\n list2.append(name)\n\nprint (\"List of students with marks in maths above 90: \")\nprint (list2)\n#by converting list into set duplicate entry is deleted.\nprint (\"Set (Duplicate entry is deleted): \")\nnewset = set(list2)\nprint (newset)\n","sub_path":"ExampleList.py","file_name":"ExampleList.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"303349453","text":"#! python3\n#Files and Folders\n\n#File Attributes\n#filename\n#filepath\n#file extension\n\n\nimport os\n\n#create a valid folder path using os.path.join() for any OS\nprint(os.path.join('folder1','folder2','folder3', 'test.py'))\n\n#get current working directory\nprint(os.getcwd())\n\n#change working directory\n#os.chdir()\n\n\n#absolute and relative file paths\n\n#/home/jonr/Desktop/Projects/ATBS/test.py# is an absolute file path\n\n###############################\n# Pathway shortcuts\n###############################\n\n\n#imagine we are trying to access the following path, but our working directory is folder1\n# root/folder1/folder2/spam.png\n\n#we can do\n# ./folder2/spam.png\n\n#to go up into the directory of root we can do\n#this gives us the parent directory of the current working dir\n# ../someotherthing.png\n\n###############################\n# OS module functions\n###############################\n\n\nprint(os.path.abspath('ListsAndStrings.py'))\nprint(os.path.dirname(os.path.abspath('ListsAndStrings.py')))\n\nprint(os.path.exists(os.path.abspath('ListsAndStrings.py')))\nprint(os.path.exists(os.path.abspath('banana.py')))\n\nprint(os.path.getsize('ListsAndStrings.py'))\nprint(os.listdir('./'))\n\nif not (os.path.exists('./peanuts/cream')):\n os.makedirs('./peanuts/cream')\n\n\n###############################\n# writing plain text files\n###############################\n\n#plaintext files are simple txt files as we know them\n\n#other type is binary program (pretty much everything!)\n\nif not (os.path.exists('./filetest')):\n os.makedirs('./filetest')\n\n#when opening a file, make sure you are opening it with the right param\n#open, append, write, read, etc\n# this is handled by the second param\n\nhelloFile = open('./filetest/pytest.txt', 'w')\n#print(helloFile.read())\n#print(helloFile.readlines())\n\nhelloFile.write('its a bird!\\n')\nhelloFile.write('its a plane!\\n')\nhelloFile.write('No! its a python test!!\\n')\nhelloFile.close()\n\nhelloFile = open('./filetest/pytest.txt', 'r')\nprint(helloFile.read())\nprint(helloFile.readlines())\n\nhelloFile.close()\n\n\n###############################\n# shelve module\n###############################\n\n#shelve objects are similar to a dictionary\n#can store dictionaires to a file very easily to access them later\n\nimport shelve\n\nshelfFile = shelve.open('./filetest/mydata')\nshelfFile['dogs'] = ['Buster', 'Phoenix', 'Taco']\nshelfFile.close()\n\n#i have saved this key and values to a file in the directory specified\n#we can reaccess them with this\n\nshelfFile = shelve.open('./filetest/mydata')\nprint(shelfFile['dogs'])\nprint(list(shelfFile.keys()))\nprint(list(shelfFile.values()))\nshelfFile.close()\n\n###############################\n# copying and moving files\n###############################\n\nimport shutil\n\nif not (os.path.exists('./filetest/folder1')):\n os.makedirs('./filetest/folder1')\n\n#copy the file to a directory\nshutil.copy('./filetest/pytest.txt', './filetest/folder1')\n#copy and rename it\nshutil.copy('./filetest/pytest.txt', './filetest/folder1/pytestEXTRA.txt')\n\n#copy directory to another and rename it!\n#useful for taking backups\n\nif not (os.path.exists('./filetest/folder1-Copy')):\n shutil.copytree('./filetest/folder1', './filetest/folder1-Copy')\n\n#move file\n#shutil.move('./filetest/pytest.txt', './filetest/folder1-Copy/MEGATEST.txt')\n\n#name a file\nshutil.move('./filetest/pytest.txt', './filetest/pytest-renamed.txt')\n\n###############################\n# deleting files\n###############################\n\n#import os\n#import shutil\n\n#delete single file\nos.unlink('./filetest/pytest-renamed.txt')\n\n#delete an EMPTY directory\n#os.rmdir('./filetest/pytest.txt')\n\n#check a directory for files before deletion\n\nfor filename in os.listdir('./filetest/folder1-Copy'):\n print(filename)\n\n#delete a popualted directory\nshutil.rmtree('./filetest/folder1-Copy')\n\n###############################\n# walking directory tree\n###############################\n\n#import os\n\nfor folderName, subfolders, fileNames in os.walk('./'):\n print('Folder: ' + folderName)\n print(subfolders)\n print(fileNames)\n \n\n\n\n\n\n","sub_path":"Python/FilesAndFolders.py","file_name":"FilesAndFolders.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486157070","text":"'''\nScript retrieves cases and deaths data from JHU's CSSE COVID-19 data repository (https://github.com/CSSEGISandData/COVID-19/) and saves relevant data to files for graphs and table.\nChangelong:\n--24/12/20: File created. Original file used ECDC data, which is now only updated weekly. This is a much more efficient, tidier script that uses a more reliable data source.\n'''\nimport csv\nimport os\nimport requests\nimport codecs\nimport datetime\nimport json\nfrom contextlib import closing\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nprint(str(datetime.datetime.now()) + \" Starting JHU CSSE data fetch...\")\n\ndef getDateAWeekAgo(date):\n '''\n Takes a date as a string in the form dd/mm/YYYY and returns\n the date as a string 7 days ago.\n '''\n datetimeDate = datetime.datetime.strptime(date, '%d/%m/%Y')\n datetimeSevenDaysAgo = datetimeDate - datetime.timedelta(days=7)\n minimumDate = datetime.datetime(2020, 1, 22)\n if datetimeSevenDaysAgo < minimumDate:\n datetimeSevenDaysAgo = datetime.datetime(2020, 1, 22)\n stringSevenDaysAgo = datetimeSevenDaysAgo.strftime(\"%d/%m/%Y\")\n return stringSevenDaysAgo\n\n# countries to include in graphs\ncountriesToInclude = [\"United Kingdom\", \"Italy\", \"US\", \"Spain\", \"Korea, South\", \"Germany\", \"Japan\", \"Sweden\", \"Switzerland\", \"France\", \"Ireland\", \"Netherlands\", \"Russia\", \"Brazil\", \"India\"]\n\ngraphData = {}\nwith closing(requests.get('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv', stream=True)) as deathsResponse:\n '''\n Fetch deaths data from JHU's CSSE COVID-19 data repository (https://github.com/CSSEGISandData/COVID-19/). Lines in CSV file are retrieved iteratively to conserve memory.\n '''\n reader = csv.reader(codecs.iterdecode(deathsResponse.iter_lines(), 'utf-8'), delimiter=',')\n line = 0\n headers = []\n for row in reader:\n if line == 0:\n headers = row\n country = row[1]\n if country in countriesToInclude and row[0]==\"\":\n cols = len(row)\n graphData[country] = {}\n for col in range(0, cols):\n if col < 4: # province, country, lat, long, 1/22/20, 1/23/20...\n continue\n date = datetime.datetime.strptime(headers[col], '%m/%d/%y').strftime('%d/%m/%Y') # convert US date to UK\n graphData[country][date] = {\"totalDeathsToDate\": int(row[col])}\n if date==\"22/01/2020\": # first date data is available for\n graphData[country][date]['newDeaths'] = int(row[col])\n else:\n dateYesterday = (datetime.datetime.strptime(date, '%d/%m/%Y') - datetime.timedelta(days=1)).strftime('%d/%m/%Y')\n graphData[country][date][\"newDeaths\"] = int(row[col]) - int(graphData[country][dateYesterday][\"totalDeathsToDate\"])\n line += 1\n print(str(datetime.datetime.now()) + \" Deaths fetch and processing complete.\")\n\nwith closing(requests.get('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv', stream=True)) as casesResponse:\n '''\n Fetch cases data from JHU's CSSE COVID-19 data repository (https://github.com/CSSEGISandData/COVID-19/). Lines in CSV file are retrieved iteratively to conserve memory.\n '''\n reader = csv.reader(codecs.iterdecode(casesResponse.iter_lines(), 'utf-8'), delimiter=',')\n line = 0\n headers = []\n for row in reader:\n if line == 0:\n headers = row\n country = row[1]\n if country in countriesToInclude and row[0]==\"\":\n cols = len(row)\n for col in range(0, cols):\n if col < 4: # province, country, lat, long, 1/22/20, 1/23/20...\n continue\n date = datetime.datetime.strptime(headers[col], '%m/%d/%y').strftime('%d/%m/%Y') # convert US date to UK\n graphData[country][date][\"totalCasesToDate\"] = int(row[col])\n if date==\"22/01/2020\":\n graphData[country][date]['newCases'] = int(row[col])\n else:\n dateYesterday = (datetime.datetime.strptime(date, '%d/%m/%Y') - datetime.timedelta(days=1)).strftime('%d/%m/%Y')\n graphData[country][date][\"newCases\"] = int(row[col]) - int(graphData[country][dateYesterday][\"totalCasesToDate\"])\n line += 1\n print(str(datetime.datetime.now()) + \" Cases fetch and processing complete.\")\n\n'''\nLoop over countries and dates, setting and calculating new deaths/cases, deaths/cases in last week, and total deaths/cases up to that date.\n\n'''\nfor country in graphData:\n for date in graphData[country]:\n if date==\"22/01/2020\": # first date available, set all values to beginning ones\n graphData[country][date]['newDeaths'] = int(graphData[country][\"22/01/2020\"][\"totalDeathsToDate\"])\n graphData[country][date]['totalDeathsToDate'] = int(graphData[country][\"22/01/2020\"][\"totalDeathsToDate\"])\n graphData[country][date]['totalDeathsInLastWeek'] = int(graphData[country][\"22/01/2020\"][\"totalDeathsToDate\"])\n graphData[country][date]['newCases'] = int(graphData[country][\"22/01/2020\"][\"totalCasesToDate\"])\n graphData[country][date]['totalCasesToDate'] = int(graphData[country][\"22/01/2020\"][\"totalCasesToDate\"])\n graphData[country][date]['totalCasesInLastWeek'] = int(graphData[country][\"22/01/2020\"][\"totalCasesToDate\"])\n else:\n dateWeekAgo = getDateAWeekAgo(date)\n deathsAWeekAgo = int(graphData[country][dateWeekAgo][\"totalDeathsToDate\"])\n dateYesterday = (datetime.datetime.strptime(date, '%d/%m/%Y') - datetime.timedelta(days=1)).strftime('%d/%m/%Y')\n newDeaths = int(graphData[country][date][\"totalDeathsToDate\"]) - int(graphData[country][dateYesterday][\"newDeaths\"])\n weeklyDeaths = int(graphData[country][date][\"totalDeathsToDate\"]) - deathsAWeekAgo\n graphData[country][date][\"totalDeathsInLastWeek\"] = weeklyDeaths\n casesAWeekAgo = int(graphData[country][dateWeekAgo][\"totalCasesToDate\"])\n newCases = int(graphData[country][date][\"totalCasesToDate\"]) - int(graphData[country][dateYesterday][\"newCases\"])\n weeklyCases = int(graphData[country][date][\"totalCasesToDate\"]) - casesAWeekAgo\n graphData[country][date][\"totalCasesInLastWeek\"] = weeklyCases\n\ngraphJSON = json.dumps(graphData)\nf = open(\"{}/graphData.json\".format(dir_path), \"w\")\nf.write(graphJSON)\nf.close()\n\nf = open(\"{}/lastUpdate.txt\".format(dir_path), \"w\")\nlastUpdated = datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M\") + \" UTC\"\nf.write(lastUpdated)\nf.close()\n\nprint(str(datetime.datetime.now()) + \" Finished updating graph data\")","sub_path":"data/getGraphData.py","file_name":"getGraphData.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46958708","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom google.appengine.api import users\n\nimport models\nimport bforms\n\ndef respond(request, template, payload):\n user = users.get_current_user()\n if user:\n url = users.create_logout_url(request.path)\n url_linktext = 'Logout'\n payload['user_name'] = user.nickname()\n else:\n url = users.create_login_url(request.path)\n url_linktext = 'Login'\n payload['user_name'] = 'Hi, welcome! Please'\n payload['url'] = url\n payload['url_linktext'] = url_linktext\n payload['recents'] = models.Expert.all().order('-name').fetch(5) \n return render_to_response(template, payload)\n\ndef index(request):\n experts = models.Expert.all().order('-name').fetch(20)\n payload = dict(experts = experts)\n return respond(request, 'index.html', payload)\n\ndef create(request):\n if request.method == 'GET':\n expertform = bforms.ExpertForm()\n \n if request.method == 'POST':\n expertform = bforms.ExpertForm(request.POST)\n if expertform.is_valid():\n expert = expertform.save()\n return HttpResponseRedirect(expert.get_absolute_url())\n payload = dict(expertform=expertform)\n return respond(request, 'create.html', payload)\n\ndef search(request):\n if request.method == 'GET':\n searchform = bforms.SearchForm()\n \n if request.method == 'POST':\n searchform = bforms.SearchForm(request.POST)\n if searchform.is_valid():\n searchcondition = searchform.save()\n knowledge_area = searchcondition.knowledge_area\n experts = models.Expert.gql(\"WHERE sap_experience = %:1%\", knowledge_area)\n payload = dict(experts=experts)\n return respond(request, 'search_result.html', payload)\n payload = dict(searchform=searchform)\n return respond(request, 'search.html', payload)\n\ndef show(request, expert_key):\n expert = models.Expert.get(expert_key)\n payload = dict(expert=expert)\n return respond(request, 'show.html', payload)\n\ndef edit(request, expert_key):\n expert = models.Expert.get(expert_key)\n if request.method == 'GET':\n expertform = bforms.ExpertForm(instance=expert) \n \n if request.method == 'POST':\n expertform = bforms.ExpertForm(request.POST, instance=expert)\n if expertform.is_valid():\n expertform.save()\n return HttpResponseRedirect(expert.get_absolute_url())\n payload = dict(expertform=expertform)\n return respond(request, 'edit.html', payload)","sub_path":"python/sep_v1/apps/expert/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105026340","text":"#!/usr/bin/env python3\n\n\"\"\"\n Takes LSP commands and communicates them with the LSP server.\n\n This script is useful for testing the LSP server: you can feed in a payload and\n confirm that the responses are correct.\n\n The LSP spec can be found here:\n https://github.com/Microsoft/language-server-protocol/blob/master/protocol.md\n\n Input format: the first word is the LSP method (see the LSP spec for a full\n listing). The rest of the line is the JSON payload. You can also comment out a\n line by prepending with a `#`. See 'init.txt' for example of format.\n\n Suggested command-line usage:\n\n python talk.py filename.txt\n\"\"\"\nimport fileinput\nfrom lspcommand import LspCommandProcessor\n\n\ndef main():\n with LspCommandProcessor.create() as lsp_proc:\n for line in fileinput.input():\n command = lsp_proc.build_command(line)\n if command:\n print_section(\"SENDING:\", command)\n response = lsp_proc.send(command)\n print_section(\"LSP SAID:\", response.decode())\n\n\ndef print_section(header, message):\n print(f\"{header}\\n{message}\\n\\n{'-' * 80}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hphp/hack/test/tools/lsp/talk.py","file_name":"talk.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"555493196","text":"#coding=gbk\r\n'''\r\nCreated on 2019年2月3日\r\n\r\n@author: G2435\r\n'''\r\n''' 字典的创建和引用'''\r\nphoneBook ={\"Book\":1234,\"Marry\":4321} \r\nprint(phoneBook)\r\nprint(phoneBook[\"Marry\"])\r\n\r\n'''用dict函数将元组或者是列表转换成字典'''\r\nitems = [['Bill','4321'],(\"Mike\",\"7891\")]\r\nd = dict(items)\r\nprint(d)\r\ndict1 = dict(name = \"Bill\",number = 6543,age = 34)\r\nprint(dict1)\r\narry = []\r\nwhile True:\r\n key = input(\"请输入key:\")\r\n if key == \"end\":\r\n break\r\n value = input(\"请输入value\")\r\n arry.append([key,value])\r\nd = dict(arry)\r\nprint(d)","sub_path":"Python学习基础知识/python基础/第六节:字典/创建字典.py","file_name":"创建字典.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"466578886","text":"def main():\n import serial\n import unidecode\n\n port = \"/dev/\" + input(\"Quel port voulez-vous ? \")\n speed = int(input(\"Quel vitesse voulez-vous ? \"))\n # port = \"/dev/ttyACM0\"\n # port = \"/dev/rfcomm4\"\n # port = \"/dev/ttyUSB0\"\n with serial.Serial(port, speed, timeout=10, writeTimeout=10) as port_serie:\n if port_serie.isOpen():\n mode = input(\n \"Entrez un nombre (1 pour lecture,\\\n Autre chose pour écriture) : \"\n )\n while True:\n if mode == \"1\":\n ligne = port_serie.readline()\n ligne = str(ligne)\n ligne = ligne.replace(\"b'\", \"\", 1)\n ligne = ligne.replace(\"'\", \"\")\n ligne = ligne.replace(\"\\\\n\", \"\")\n ligne = ligne.replace(\"\\\\r\", \"\")\n if ligne != \"\":\n print(ligne)\n else:\n text = input(\"Entrez un texte à envoyer à l'arduino : \")\n text = unidecode.unidecode(text)\n text += \"\\n\"\n port_serie.write(text.encode(\"ascii\"))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Port série.py","file_name":"Port série.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424537756","text":"import networkx as nx\nimport random as rd\nimport graph6\nimport graph7\nimport graph8\nimport graph9\nimport graph10\n\n\ndef bfs(G,a,b):\n G.add_nodes_from(G.nodes(), label = -1) #initialization of all labels to -1 / indicates an unlabelled vertex\n G.node[a]['label'] = 0 #labels vertex a with 0 (0 distance from a)\n i=0 #initialises variable i at 0\n\n while G.node[b]['label'] == -1: #iterate while vertex b remains unlabelled\n for u in G.nodes(): #for each vertex in graph G\n if G.node[u]['label'] == i: #if the vertex label matches the layer we are currently traversing - looks for vertices that are distance i away from a\n for neighbor in G.neighbors(u): #for each neighbor of our current vertex\n if G.node[neighbor]['label'] == -1: #check to see if the neighbor is unlabelled (therefore is 1 extra unit of distance away from a)\n G.node[neighbor]['label'] = i+1 #label the neighbor with its distance from a, which is i+1\n i+=1 #increment the layer number by 1 now that all unvisited neighbors of vertices in layer i have been labelled\n return G.node[b]['label'] #returns the label of vertex b - equal to its distance from a\n\ndef max_distance(G):\n max_dist = 0\n for a in G.nodes():\n for b in G.nodes():\n k = bfs(G,a,b)\n if max_dist < k:\n max_dist = k\n return max_dist\n\n\ndef random_distance(G):\n n = len(G.nodes()) #assigns the number of vertices in graph G to variable n\n av_dist = 0 #variable denotes distance from a to v\n for i in range(1,6): #iterates from 1 to 5\n a = rd.randint(1,n) #assigns a random vertex number to variable a\n b = rd.randint(1,n) #assigns a random vertex number to variable b\n while a==b: #while a and b are referencing the same vertex\n b = rd.randint(1,n) #select a new vertex for variable b\n if bfs(G,a,b)>av_dist: #if the distance between a and b is greater than av_dist\n av_dist = bfs(G,a,b) #assigns bfs(G,a,b) to av_dist\n return av_dist #returns the max distance (av_dist) between two randomly selected vertices in graph G\n\n\nprint()\nG6=graph6.Graph()\nprint('The diameter of G6 (i.e. the maximum distance between two vertices) is:', max_distance(G6))\nG6=graph6.Graph() # we initialize again the attributes of the graph G6\nprint('I found the distance between two random vertices in G6 to be:', random_distance(G6))\nprint()\n\n\nG7=graph7.Graph()\nprint('The diameter of G7 (i.e. the maximum distance between two vertices) is:', max_distance(G7))\nG7=graph7.Graph() # we initialize again the attributes of the graph G7\nprint('I found the distance between two random vertices in G7 to be:', random_distance(G7))\nprint()\n\n\nG8=graph8.Graph()\nprint('The diameter of G8 (i.e. the maximum distance between two vertices) is:', max_distance(G8))\nG8=graph8.Graph() # we initialize again the attributes of the graph G8\nprint('I found the distance between two random vertices in G8 to be:', random_distance(G8))\nprint()\n\n\nG9=graph9.Graph()\nprint('The diameter of G9 (i.e. the maximum distance between two vertices) is:', max_distance(G9))\nG9=graph9.Graph() # we initialize again the attributes of the graph G9\nprint('I found the distance between two random vertices in G9 to be:', random_distance(G9))\nprint()\n\n\nG10=graph10.Graph()\nprint('The diameter of G10 (i.e. the maximum distance between two vertices) is:', max_distance(G10))\nG10=graph10.Graph() # we initialize again the attributes of the graph G10\nprint('I found the distance between two random vertices in G10 to be:', random_distance(G10))\nprint()\n","sub_path":"random_distance.py","file_name":"random_distance.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265279989","text":"#!/usr/bin/env python3\nimport socket\nimport requests\nimport random\n\n\nhost = '127.0.0.1'\nport = 5053\nheaders = {'accept': 'application/dns-message', 'content-type': 'application/dns-message'}\nupstreams = ['https://1.1.1.1/dns-query', 'https://1.0.0.1/dns-query']\nconns = []\n\n\ndef main():\n\t# Setup UDP server\n\tprint('Starting UDP server listening on: %s#%d' % (host, port))\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tsock.bind((host, port))\n\n\t# Connect to upstream servers\n\tfor upstream in upstreams:\n\t\tprint('Connecting to upstream server: %s' % (upstream))\n\t\tconns.append(upstream_connect())\n\n\t# Serve forever\n\ttry:\n\t\twhile True:\n\t\t\t# Accept requests from a client\n\t\t\tdata, addr = sock.recvfrom(4096)\n\n\t\t\t# Select upstream server to forward to\n\t\t\tindex = random.randrange(len(upstreams))\n\n\t\t\t# Forward request to upstream server and get response\n\t\t\tdata = upstream_forward(upstreams[index], data, conns[index])\n\n\t\t\t# Send response to client\n\t\t\tsock.sendto(data, addr)\n\texcept (KeyboardInterrupt, SystemExit):\n\t\tpass\n\n\t# Close upstream connections\n\tfor conn in conns:\n\t\tupstream_close(conn)\n\n\t# Close UDP server\n\tsock.shutdown(socket.SHUT_RDWR)\n\tsock.close()\n\n\ndef upstream_connect():\n\t\"\"\"\n\tCreate an upstream connection that will later be bound to a url.\n\n\tReturns:\n\t\tA requests session object\n\t\"\"\"\n\n\t# Create connection with default DNS message headers\n\tsession = requests.Session()\n\tsession.headers = headers\n\treturn session\n\n\ndef upstream_forward(url, data, conn):\n\t\"\"\"\n\tSend a DNS request over HTTPS using POST method.\n\n\tParams:\n\t\turl - url to forward queries to\n\t\tdata - normal DNS packet data to forward\n\t\tconn - HTTPS connection to upstream DNS server\n\n\tReturns:\n\t\tA normal DNS response packet from upstream server\n\n\tNotes:\n\t\tUsing DNS over HTTPS POST format as described here:\n\t\thttps://tools.ietf.org/html/draft-ietf-doh-dns-over-https-12\n\t\thttps://developers.cloudflare.com/1.1.1.1/dns-over-https/wireformat/\n\t\"\"\"\n\n\treturn conn.post(url, data).content\n\n\ndef upstream_close(conn):\n\t\"\"\"\n\tClose an upstream connection.\n\n\tParams:\n\t\tconn - requests session object to close\n\t\"\"\"\n\n\tconn.close()\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"concepts/doh-simple.py","file_name":"doh-simple.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375859322","text":"#!python3\n# -*- coding: utf-8 -*-\n# hide a window with hotkey Ctrl+1, show the hidden window with hotkey Ctrl+2\n# run notepad.exe first and then press the hotkey for test\nimport os\nimport sys\nimport time\nimport ctypes\nimport subprocess\nimport psutil\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # not required after 'pip install uiautomation'\nimport uiautomation as auto\n\nWindowsWantToHide = ('Warcraft III', 'Valve001', 'Counter-Strike', 'Notepad')\n\n\ndef hide():\n root = auto.GetRootControl()\n for window in root.GetChildren():\n if window.ClassName in WindowsWantToHide:\n auto.Logger.WriteLine('hide window, handle {}'.format(window.NativeWindowHandle))\n window.Hide()\n fin = open('hide_windows.txt', 'wt')\n fin.write(str(window.NativeWindowHandle) + '\\n')\n fin.close()\n\n\ndef show():\n fout = open('hide_windows.txt')\n lines = fout.readlines()\n fout.close()\n for line in lines:\n handle = int(line)\n window = auto.ControlFromHandle(handle)\n if window:\n auto.Logger.WriteLine('show window: {}'.format(handle))\n window.Show()\n\ndef HideWindowFunc(stopEvent):\n scriptName = os.path.basename(__file__)\n cmd = r'python.exe {} hide {}'.format(scriptName, ' '.join(sys.argv[1:]))\n auto.Logger.ColorfullyWriteLine('HideWindowFunc call {}'.format(cmd))\n p = subprocess.Popen(cmd)\n while True:\n if None != p.poll():\n break\n if stopEvent.is_set():\n childProcesses = [pro for pro in psutil.process_iter() if pro.ppid == p.pid or pro.pid == p.pid]\n for pro in childProcesses:\n auto.Logger.WriteLine('kill process: {}, {}'.format(pro.pid, pro.cmdline()), auto.ConsoleColor.Yellow)\n p.kill()\n break\n stopEvent.wait(0.01)\n auto.Logger.WriteLine('HideWindowFunc exit')\n\ndef ShowWindowFunc(stopEvent):\n scriptName = os.path.basename(__file__)\n cmd = r'python.exe {} show {}'.format(scriptName, ' '.join(sys.argv[1:]))\n auto.Logger.ColorfullyWriteLine('ShowWindowFunc call {}'.format(cmd))\n p = subprocess.Popen(cmd)\n while True:\n if None != p.poll():\n break\n if stopEvent.is_set():\n childProcesses = [pro for pro in psutil.process_iter() if pro.ppid == p.pid or pro.pid == p.pid]\n for pro in childProcesses:\n auto.Logger.WriteLine('kill process: {}, {}'.format(pro.pid, pro.cmdline()), auto.ConsoleColor.Yellow)\n p.kill()\n break\n stopEvent.wait(0.01)\n auto.Logger.WriteLine('ShowWindowFunc exit')\n\nif __name__ == '__main__':\n if 'hide' in sys.argv[1:]:\n hide()\n elif 'show' in sys.argv[1:]:\n show()\n else:\n subprocess.Popen('notepad')\n auto.GetConsoleWindow().SetActive()\n auto.Logger.ColorfullyWriteLine('Run Notepad\\nPress Ctr+1 to hide\\nPress Ctr+2 to show\\n')\n auto.RunByHotKey({(auto.ModifierKey.Control, auto.Keys.VK_1): HideWindowFunc, (auto.ModifierKey.Control, auto.Keys.VK_2): ShowWindowFunc}, (auto.ModifierKey.Control, auto.Keys.VK_4)\n )\n","sub_path":"demos/hide_window_with_hotkey.py","file_name":"hide_window_with_hotkey.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572435740","text":"import xml.etree.ElementTree as ET\n\ndef getMob(udidString):\n start = udidString.find('')\n end = udidString.find('')\n xml = udidString[start:end + 7]\n print(xml)\n root = ET.fromstring(xml)\n map = {}\n for index, item in enumerate(root):\n if item.tag == 'key':\n if root[index + 1].tag == 'string':\n map[item.text] = root[index + 1].text\n return map","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497572753","text":"#\n# Copyright 2021 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport pytest\nfrom enforce_typing import enforce_types\nfrom ocean_lib.web3_internal.contract_base import ContractBase\nfrom ocean_lib.web3_internal.currency import to_wei\nfrom ocean_lib.web3_internal.wallet import Wallet\nfrom web3.contract import ContractCaller\n\n\n@enforce_types\nclass MyFactory(ContractBase):\n CONTRACT_NAME = \"DTFactory\"\n\n # super-simple functionality, because our main point here is to\n # test ContractBase itself, not a child class.\n def createToken(\n self, blob: str, name: str, symbol: str, cap: int, from_wallet: Wallet\n ) -> str:\n return self.send_transaction(\n \"createToken\", (blob, name, symbol, cap), from_wallet\n )\n\n\ndef test_name_is_None(web3):\n with pytest.raises(AssertionError):\n # self.name will become None, triggering the error\n ContractBase(web3, None)\n\n\ndef test_nochild(web3):\n with pytest.raises(AssertionError):\n ContractBase(web3, None)\n\n\ndef test_main(network, alice_wallet, alice_ocean, dtfactory_address, web3):\n\n # test super-simple functionality of child\n factory = MyFactory(web3, dtfactory_address)\n factory.createToken(\"foo_blob\", \"DT1\", \"DT1\", to_wei(1000), alice_wallet)\n\n # test attributes\n assert factory.name == \"DTFactory\"\n assert isinstance(factory.contract.caller, ContractCaller)\n assert factory.contract is not None\n assert factory.contract.address == dtfactory_address\n assert ContractBase.to_checksum_address(dtfactory_address) == dtfactory_address\n\n # test methods\n assert \"configured_address\" in dir(factory)\n assert factory.contract_name == \"DTFactory\"\n assert factory.address == dtfactory_address\n assert factory.events\n assert str(factory) == f\"{factory.contract_name} @ {factory.address}\"\n assert (\n \"createToken\" in factory.function_names\n ), \"The function createToken from the contract does not exist.\"\n assert \"getCurrentTokenCount\" in factory.function_names\n assert \"getTokenTemplate\" in factory.function_names\n assert not factory.is_tx_successful(\"nohash\")\n with pytest.raises(ValueError):\n assert factory.get_event_signature(\"noevent\")\n\n assert factory.subscribe_to_event(\"TokenCreated\", 30, None) is None\n assert factory.get_event_argument_names(\"TokenCreated\") == ()\n block = web3.eth.block_number\n block_confirmations = alice_ocean.config.block_confirmations.value\n assert (\n len(\n factory.get_event_logs(\n \"TokenCreated\",\n block - block_confirmations,\n block - block_confirmations,\n None,\n )\n )\n == 1\n ), \"The token was not created.\"\n log = factory.get_event_log(\n \"TokenCreated\", block - block_confirmations, block - block_confirmations, None\n )\n assert len(log) == 1, \"The token was not created.\"\n assert log[0][\"event\"] == \"TokenCreated\"\n assert log[0][\"address\"] == dtfactory_address\n\n with pytest.raises(TypeError):\n ContractBase.getLogs(None)\n\n\ndef test_static_functions(web3):\n assert (\n ContractBase.get_tx_receipt(web3, \"nohash\") is None\n ), \"The transaction receipt exists for the wrong hash.\"\n\n\ndef test_gas_price(web3, alice_wallet, dtfactory_address, monkeypatch):\n monkeypatch.setenv(\"GAS_PRICE\", \"1\")\n factory = MyFactory(web3, dtfactory_address)\n assert factory.createToken(\n \"foo_blob\", \"DT1\", \"DT1\", to_wei(1000), alice_wallet\n ), \"The token could not be created by configuring the gas price env var.\"\n","sub_path":"ocean_lib/web3_internal/test/test_contract_base.py","file_name":"test_contract_base.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345011776","text":"T = input()\nT = int(T)\nfor rep in range (0,T):\n n, k = map(int,input().split())\n s = input()\n goodness = 0\n\n for i in range(n//2):\n if s[i] != s[n-i-1]:\n goodness += 1\n \n r = abs(k - goodness)\n\n print ('Case #{}:'.format(rep+1), r)\n","sub_path":"Google Kick Start/2021/Round A/K-Goodness_String.py","file_name":"K-Goodness_String.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418729858","text":"import numpy as np\nimport pandas as pd\nimport argparse\nimport gensim\nimport tensorflow\n#tf.config.experimental_run_functions_eagerly(True)\nimport csv\nimport os\nimport uuid\nfrom sklearn.metrics import roc_auc_score, average_precision_score,precision_score, recall_score,roc_curve,auc\nfrom tensorflow.keras.layers import Dense,Input,Conv1D,MaxPooling1D,Dropout,LeakyReLU,LSTM,GRU,Embedding, Reshape, Dot, Multiply,Lambda,GlobalAveragePooling1D,Activation\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import optimizers\n#from keras.preprocessing import sequence\nimport keras_metrics\n\n\ndef pad_trun_sequences(seq, len_seq):\n new_seq = list()\n print('pad_trun_sequences length ', len(seq),'...')\n if len_seq==-1:\n \tlen_seq = max([len(element) for element in seq])\n for i in seq:\n #print(len(i))\n i = [j for j in i if j not in ['', 'nan']]\n length= len(i)\n if length>len_seq:\n new_seq = new_seq + [i[length-len_seq-1:-1]]\n if length<=len_seq:\n new_seq = new_seq + [['00000']*(len_seq-length) +i]\n \n return new_seq \n \n\ndef encoder(seq, word_index):\n seq_encoded = list()\n for i in seq:\n seq_encoded = seq_encoded + [[word_index[j] for j in i]] #if j!='nan' \n return seq_encoded\n\n\n\n#def model(base, layer_num, dim)\n\ndef training(train_data, val_data, test_data, onehot_train, onehot_val, onehot_test, embedding_matrix, curr_cross_val = 0, \n dim = 256, len_seq = 50, cnn_dim = 200, ksize=2,res_block = 1, epoch_num = 20, lr = 0.001):\n # fix random seed for reproducibility\n np.random.seed(100)\n from tcn import TCN\n batch_size, timesteps, input_dim = None, len_seq, dim\n \n \n #ksize=2\n sequence_input = Input(shape=(len_seq,), dtype='int32')\n embedded_sequences = Embedding(embedding_matrix.shape[0], dim,\n weights=[embedding_matrix],\n input_length=len_seq,\n trainable=False)(sequence_input) \n #o = Conv1D(cnn_dim, kernel_size=ksize, strides=ksize, padding='valid', dilation_rate=1, activation=None, use_bias=True)(embedded_sequences)\n #o = LeakyReLU(alpha=0.3)(o)\n #o = Conv1D(cnn_dim, kernel_size=ksize, strides=ksize, padding='valid',dilation_rate=1, activation=None, use_bias=True)(embedded_sequences)\n #o = LeakyReLU(alpha=0.3)(o)\n #o = Conv1D(cnn_dim, kernel_size=ksize, strides=ksize, padding='valid', dilation_rate=1, activation=None, use_bias=True)(embedded_sequences)\n #o = LeakyReLU(alpha=0.3)(o)\n #o = TCN(nb_filters = cnn_dim, kernel_size= ksize, dilations = [1,2,4,8], nb_stacks = res_block, dropout_rate = 0.3, return_sequences=True)(embedded_sequences)\n #o = LeakyReLU(alpha=0.3)(o)\n #o = TCN(nb_filters = cnn_dim, kernel_size= ksize, dilations = [1,2,4], nb_stacks = res_block, return_sequences=False)(embedded_sequences)\n o = GRU(cnn_dim,dropout = 0.2,return_sequences=True)(embedded_sequences)\n h = Dense(cnn_dim, activation='tanh')(o)\n s = Reshape((1, cnn_dim))(GlobalAveragePooling1D()(h))\n a = Dot(axes=-1)([o, s])\n a = Reshape((len_seq, 1))(Activation('softmax')(a))\n x = Multiply()([o, a])\n x = Lambda(lambda z:z*len_seq)(x)\n # AVG\n x = GlobalAveragePooling1D()(x)\n o = Dense(1, activation='sigmoid')(x)\n\n\n #o = tensorflow.keras.layers.Concatenate()([o1,o2])\n\n m = Model(inputs=[sequence_input], outputs=[o])\n m.compile(optimizer=optimizers.Adam(lr=lr),loss='binary_crossentropy', metrics= ['accuracy',tensorflow.keras.metrics.AUC(), tensorflow.keras.metrics.Precision(), tensorflow.keras.metrics.Recall()])\n print(m.summary())\n #import datetime\n #curr_run_time= datetime.datetime.now()\n #if not os.path.exists(\"model_checkpoints/tcn/\"+target+\"/\"+str(run_num)+ \"/cv\"+ str(curr_cross_val)+\"/\"):\n #os.mkdir(\"model_checkpoints/tcn/\"+target+\"/\"+str(run_num)+ \"/cv\"+ str(curr_cross_val)+\"/\")\n #filepath = \"model_checkpoints/tcn/\"+target+\"/\"+str(run_num)+ \"/cv\"+ str(curr_cross_val)+\"/saved-model-{epoch:02d}-{val_loss:.4f}.hdf5\"\n #checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)\n \n \n #tsbd = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')\n callbacks_list = [tensorflow.keras.callbacks.EarlyStopping(monitor = 'val_loss', patience= 10, restore_best_weights = True)]\n #print(np.asarray(onehot_val))\n\n m.fit(np.array(train_data), np.array(onehot_train), validation_data=(np.array(val_data), np.array(onehot_val)), epochs=epoch_num, batch_size=128, verbose = 2, callbacks= callbacks_list)\n \n predicted_val = m.predict(np.array(val_data))\n predicted_test = m.predict(np.array(test_data))\n scores_val = m.evaluate(np.array(val_data), np.array(onehot_val), verbose=0)\n scores_test = m.evaluate(np.array(test_data), np.array(onehot_test), verbose=0)\n # Final evaluation of the model\n scores_val = m.evaluate(np.array(val_data), np.array(onehot_val), verbose=0)\n scores_test = m.evaluate(np.array(test_data), np.array(onehot_test), verbose=0)\n return [predicted_val, predicted_test,scores_val,scores_test]\n\n\n\n\n#Start of main script ....\ndef main_pipeline (perm = 'noperm', perm_file = 'None', lr = 0.001, epoch_num = 10, cnn_dim = 256, len_seq = 256, skip_gram = 1, dim = 50, win_size = 20, run_num = 0):\n\n\t#####################################\n\t# Load train/val/test subject lists #\n\t#####################################\n\n\ttrain_list = pd.read_csv('train_list_with_labels.csv', sep = '\\t') #set_learning/\n\tval_list = pd.read_csv('val_list_with_labels.csv', sep = '\\t') #set_learning/\n\ttest_list = pd.read_csv('test_list_with_labels.csv', sep = '\\t') #set_learning/\n\n\n\t######################################################\n\t# Load data and labels under three scenarios #\n\t######################################################\n\n\t#1) when no permutation\n\tif perm == 'noperm':\n\n\t\t\n\t\ttrain_data = pd.read_csv('train_lab_abnorm_sc1.csv', sep = ',')\n\t\tval_data = pd.read_csv('val_lab_abnorm_sc1.csv', sep = ',')\n\t\t\n\t\t####temp modification for testing the robustness of noperm data on perm test set\n\t\t#perm_sequences = pd.read_csv(perm_file+\".csv\")\n\n\t\ttest_perm_sequences = pd.read_csv('test_lab_abnorm_sc1.csv', sep=',')\n\t\t#val_data = perm_sequences[perm_sequences.subject_id.isin(val_list.subject_id)]\n\t\ttest_data = test_perm_sequences[test_perm_sequences.subject_id.isin(test_list.subject_id)]\n\t\t####### end of temp modification\n\t\t\n\t\ttrain_y = list(train_data.HF)\n\t\tval_y = list(val_data.HF)\n\t\ttest_y = list(test_data.HF)\n\t \n\n\t#2) when permutation\n\telif perm == 'both':\n\t \n\t\ttrain_data_1 = pd.read_csv('train_lab_abnorm_sc1.csv', sep = ',')\n\t\tval_data_1 = pd.read_csv('val_lab_abnorm_sc1.csv', sep = ',')\n\t\t\n\t\ttrain_y_1 = list(train_data_1.HF)\t\t\n\t\tval_y_1 = list(val_data_1.HF)\n\n\n\n\t\tperm_sequences = pd.read_csv(perm_file+\".csv\")\n\t\t\n\t\ttrain_data_2 = perm_sequences[perm_sequences.subject_id.isin(train_list.subject_id)]\n\t\tval_data_2 = perm_sequences[perm_sequences.subject_id.isin(val_list.subject_id)]\n\t\t\n\t\ttrain_y_2 = list(train_data_2.HF)\n\t\tval_y_2 = list(val_data_2.HF)\n\n\n\n\t\t#combine the two parts\n\t\ttrain_data = pd.concat([train_data_1[['subject_id','seq']], train_data_2[['subject_id','seq']]])\n\t\tval_data = pd.concat([val_data_1[['subject_id','seq']], val_data_2[['subject_id','seq']]])\n\t\ttrain_y = train_y_1 + train_y_2\n\t\tval_y = val_y_1 + val_y_2\n\t\t\n\t\ttest_perm_sequences = pd.read_csv('test_lab_abnorm_sc1.csv', sep=',')\n\n\t\t#Load the perm sequences\n\t\t#val_data = perm_sequences[perm_sequences.subject_id.isin(val_list.subject_id)]\n\t\ttest_data = test_perm_sequences[test_perm_sequences.subject_id.isin(test_list.subject_id)]\n\n\t\tdel perm_sequences\n\t\t\n\t\t#val_y = list(val_data.HF)\n\t\ttest_y = list(test_data.HF)\n\n\n\n\telse: #perm data only\n\t\tprint('reading perm data...')\n\t\tperm_sequences = pd.read_csv(perm_file+\".csv\")\n\t\t#test_perm_sequences = pd.read_csv(perm_file+\".csv\")\n\n\t\ttrain_data = perm_sequences[perm_sequences.subject_id.isin(train_list.subject_id)]\n\n\t\tprint('train data ready...')\n\t\t#val_data = perm_sequences[perm_sequences.subject_id.isin(val_list.subject_id)]\n\t\tval_data = perm_sequences[perm_sequences.subject_id.isin(val_list.subject_id)]\n\t\tprint('val data ready...')\n\t\t\n\t\ttest_perm_sequences = pd.read_csv('test_lab_abnorm_sc1.csv', sep=',')\n\t\ttest_data = test_perm_sequences[test_perm_sequences.subject_id.isin(test_list.subject_id)]\n\t\tprint('test data ready...')\n\n\t\tdel perm_sequences\n\n\t\ttrain_y = list(train_data.HF)\n\t\tval_y = list(val_data.HF)\n\t\ttest_y = list(test_data.HF)\n\n\n\t########################################\n\t# Map train/val/test data into vectors #\n\t########################################\n\t\n\n\tif perm_file == 'None':\n\n\t\tval_x = pad_trun_sequences([i.split(' ') for i in val_data.seq][:val_data.count()[0]], len_seq )\n\t\tdel val_data\n\t\ttrain_x = pad_trun_sequences([i.split(' ') for i in train_data.seq][:train_data.count()[0]], len_seq )\n\t\tdel train_data\n\n\n\telif perm=='perm':\n\t\t#val_x = pad_trun_sequences([i.split(' ') for i in val_data.seq][:val_data.count()[0]], len_seq )\n\t\t#val_x = pd.DataFrame(([' '.join(element) for element in val_x])).rename_axis(None)\n\t\t#val_x.columns = ['seq']\n\t\t#val_x.to_csv(perm_file+'_val_x_'+str(len_seq)+'.csv', index=False)\n\t\t#val_x = pd.read_csv(perm_file+'_val_x_'+str(len_seq)+'.csv', sep = '\\t')['seq'].tolist()\n\t\t#val_x = [element.split(' ') for element in val_x]\n\t\t#print(\"perm val data loaded\")\n\t\tdel val_data \n\t\n\t\t#train_x = pad_trun_sequences([i.split(' ') for i in train_data.seq][:train_data.count()[0]], len_seq )\n\t\t#train_x = pd.DataFrame(([' '.join(element) for element in train_x])).rename_axis(None)\n\t\t#train_x.columns = ['seq']\n\t\t#train_x.to_csv(perm_file+'_train_x_'+str(len_seq)+'.csv', index=False)\n\t\t#train_x = pd.read_csv(perm_file+'_train_x_'+str(len_seq)+'.csv', sep = '\\t')['seq'].tolist()\n\t\t#train_x = [element.split(' ') for element in train_x]\n\t\t#print(\"perm train data loaded\")\n\t\tdel train_data\n\t\n\t\n\n\ttest_x = pad_trun_sequences([i.split(' ') for i in test_data.seq][:test_data.count()[0]], len_seq )\n\tdel test_data\n\tprint('train/val/test sequences ready...')\n\t\n\t#perm_sequences = pd.read_csv(perm_file + \".csv\")\n\t#pad_perm_sequences = pad_trun_sequences([i.split(' ') for i in perm_sequences.seq][:perm_sequences.count()[0]], len_seq ) \n\n\n\t#train_data = pd.read_csv('train_lab_abnorm_sc1.csv', sep = ',')\n\t#val_data = pd.read_csv('val_lab_abnorm_sc1.csv', sep = ',')\n\t#test_data = pd.read_csv('test_lab_abnorm_sc1.csv', sep = ',')\n\t#pad_ori_sequences = pad_trun_sequences([i.split(' ') for i in train_data.seq][:train_data.count()[0]], len_seq )+pad_trun_sequences([i.split(' ') for i in val_data.seq][:val_data.count()[0]], len_seq )+pad_trun_sequences([i.split(' ') for i in test_data.seq][:test_data.count()[0]], -1 ) \n\n\t#_model = gensim.models.Word2Vec(pad_ori_sequences, sg=1, window = 5, iter=5, size= 256, min_count=1, workers=20)#train_x+val_x+test_x\n\t#_model.save(\"word2vec.model\")\n\t#print(' model saved...')\n\t#_model = gensim.models.Word2Vec.load(\"word2vec.model\")\n\n\n\t_model = gensim.models.Word2Vec.load(\"word2vec.model\")\n\tprint('w2v model loaded')\n\tembeddings_index = {}\n\tprint(len(_model.wv.vocab))\n\tembedding_matrix = np.zeros((len(_model.wv.vocab) + 1, dim))\n\n\tfor i, word in enumerate(_model.wv.vocab):\n\t\tcoefs = np.asarray(_model.wv[word], dtype='float32')\n\t\tembeddings_index[word] = coefs\n\t\tembedding_matrix[i] = coefs\n\tprint('Found %s word vectors.' % len(embeddings_index))#embedding_matrix.shape[0])\n\n\n\t#generate word -> word index mapping\n\tword_index = {}\n\tfor i, word in enumerate(_model.wv.vocab):\n\t\tword_index[word] = i\n\n\t#map code sequence to code index sequence\t\n\t\n\t\n\n\tif perm_file == 'None':\n\t\ttrain_x = encoder(train_x, word_index)\n\t\tval_x = encoder(val_x, word_index)\n\telif perm == 'perm':\n\t\t#train_x = encoder(train_x, word_index)\n\t\t#pd.DataFrame(np.array(train_x)).to_csv(perm_file+'_train_x_encoded'+str(len_seq)+'.csv', index=False)\n\t\t#print(\"perm train data encoded saved\")\n\t\ttrain_x = np.array(pd.read_csv(perm_file+'_train_x_encoded'+str(len_seq)+'.csv', sep = ','))\n\t\t#val_x = encoder(val_x, word_index)\n\t\t#pd.DataFrame(np.array(val_x)).to_csv(perm_file+'_val_x_encoded'+str(len_seq)+'.csv', index=False)\n\t\t#print(\"perm val data encoded saved\")\n\t\tval_x = np.array(pd.read_csv(perm_file+'_val_x_encoded'+str(len_seq)+'.csv', sep = ','))\n\n\ttest_x = encoder(test_x, word_index)\n\n\n\t#################################\n\t# Train predictive model for HF #\n\t#################################\n\tacc_val = []\n\tacc_test = []\n\tauc_vals = []\n\tauc_test = []\n\tprauc_vals = []\n\tprauc_test = []\n\tprec_val = []\n\tprec_test = []\n\trec_val = []\n\trec_test = []\n\n\tfor curr_cross_val in np.arange(1):\n\t\tpredicted_val, predicted_test,scores_val,scores_test = training(train_x, val_x, test_x, train_y, val_y, test_y, embedding_matrix,\n\t\t\tdim = dim, len_seq = len_seq, cnn_dim = cnn_dim, epoch_num = epoch_num, lr = lr)\n #TODO: pick the threshold that gives the best F1\n\t\tacc_val.append(scores_val[1])\n\t\tacc_test.append(scores_test[1])\n\t\tfpr, tpr, _ = roc_curve(val_y,predicted_val)\n\t\tauc_vals.append(auc(fpr, tpr))\n\t\tfpr, tpr, _ = roc_curve(test_y,predicted_test)\n\t\tauc_test.append(auc(fpr, tpr))\n\t\tprec_val.append(precision_score(val_y,(predicted_val>0.5)*1))\n\t\tprec_test.append(precision_score(test_y,(predicted_test>0.5)*1))\n\t\trec_val.append(recall_score(val_y,(predicted_val>0.5)*1))\n\t\trec_test.append(recall_score(test_y,(predicted_test>0.5)*1))\n\t\tprauc_vals.append(average_precision_score(val_y,predicted_val))\n\t\tprauc_test.append(average_precision_score(test_y,predicted_test))\n\t\tif not os.path.exists(\"tcn_abnormlabs_baseline/\"):\n\t\t\ttensorflow.gfile.MkDir(\"tcn_abnormlabs_baseline/\")\n\t\tif not os.path.exists(\"tcn_abnormlabs_baseline/\"+ \"/cv\"):\n\t\t\ttensorflow.gfile.MkDir(\"tcn_abnormlabs_baseline/\"+ \"/cv\")\n\t\tif not os.path.exists(\"tcn_abnormlabs_baseline/\"+ \"/cv\"+ str(curr_cross_val)+\"/\"):\n\t\t\ttensorflow.gfile.MkDir(\"tcn_abnormlabs_baseline/\"+ \"/cv\"+ str(curr_cross_val)+\"/\")\n\n\t\t#with open(\"tcn_abnormlabs_baseline/\"+ \"/cv\"+ str(curr_cross_val)+\"/\"+\"test_pred\"+str(uuid.uuid4())+\".csv\", 'a', newline='') as csvFile: \n\t\t\t#writer = csv.DictWriter(csvFile, fieldnames=['predicted_test','true_test'])\n\t\t\t#writer.writerow({'predicted_test':'predicted_test','true_test':'true_test'})\n\t\t\t#for i in range(len(test_y)):\n\t\t\t\t#writer.writerow({'predicted_test':predicted_test[i],'true_test':test_y[i]})\n\t\t#csvFile.close()\n\n\t\t#with open(\"tcn_abnormlabs_baseline/\"+ \"/cv\"+ str(curr_cross_val)+\"/\"+\"validation_pred\"+str(uuid.uuid4())+\".csv\", 'a', newline='') as csvFile: \n\t\t\t#writer = csv.DictWriter(csvFile, fieldnames=['predicted_val','true_val'])\n\t\t\t#writer.writerow({'predicted_val':'predicted_val','true_val':'true_val'})\n\t\t\t#for i in range(len(val_y)):\n\t\t\t\t#writer.writerow({'predicted_val':predicted_val[i],'true_val':val_y[i]})\n\t\t#csvFile.close()\n\t\n\twith open(\"tcn_abnormlabs_baseline/\"+\"sanity_check_exp_logs_gru_att_test_on_original_p1_100_unique0_fixed_w2v.csv\", 'a', newline='') as csvFile: \n\t\twriter = csv.DictWriter(csvFile, fieldnames=['acc_val','auc_vals','prec_val','rec_val', 'acc_test','auc_test','prec_test','rec_test',\"prauc_vals\",\"prauc_test\",\"dim\",\n \t\t\"cnn_dim\",\"len_seq\", \"perm\",\"lr\",\"epoch_num\",\"len_seq\", \"curr_dim\",\"win_size\", \"perm_file\",\"run_num\"])\n\t\twriter.writerow({'acc_val': str(np.mean(acc_val)),'auc_vals': str(np.mean(auc_vals)),'prec_val': str(np.mean(prec_val)),'rec_val': str(np.mean(rec_val)),\n \t'acc_test': str(np.mean(acc_test)),'auc_test': str(np.mean(auc_test)),'prec_test': str(np.mean(prec_test)),'rec_test': str(np.mean(rec_test)), \n \t\"prauc_vals\":str(np.mean(prauc_vals)),\"prauc_test\":str(np.mean(prauc_test)),\n \t'dim':str(dim),\"cnn_dim\":str(cnn_dim),\"len_seq\":str(len_seq), 'perm': perm, \"lr\": str(lr), \"epoch_num\":epoch_num, \n \t\"len_seq\": str(len_seq), \"curr_dim\":str(dim), \"win_size\": str(win_size), \"perm_file\": str(perm_file), \"run_num\": str(run_num)})\n\tcsvFile.close()\n\n\n\n\n\nimport gc\nrun_num=0\nlr = 0.001\nskip_gram = 1\nepoch_num = 100\n\nwin_size = 5\ndim=256\n#perm_file = 'None'\n#res_block = 1\n#for win_size in [5,10,20]:\nfor iterator in [1,2,3,4,5]:\n\tfor len_seq in [256]:#128,256,\n\t\tfor perm in ['noperm']:#'perm', \n\t\t\tfor cnn_dim in [256]:#,64,256\n\t\t\t\tfor perm_file in ['tcn_abnormlabs_baseline/permutation_percent_1_100_unique0_label']:#,'tcn_abnormlabs_baseline/permutation_1_10_label','tcn_abnormlabs_baseline/permutation_1_6_label','tcn_abnormlabs_baseline/permutation_1_1_label', 'tcn_abnormlabs_baseline/permutation_1_2_label']:\t\t\n\t\t\t\t\t#for perm_file in ['tcn_abnormlabs_baseline/permutation_1_10_label','tcn_abnormlabs_baseline/permutation_1_6_label','tcn_abnormlabs_baseline/permutation_1_1_label', 'tcn_abnormlabs_baseline/permutation_1_2_label']:\n\t\t\t\t\trun_num = run_num+1\n\t\t\t\t\tprint(\"iteration: \", iterator, \" run_num: \", run_num)\n\t\t\t\t\t#if iterator ==1 and run_num < 4:\n\t\t\t\t\t\t#continue;\n\t\t\t\t\tif perm=='noperm':\n\t\t\t\t\t\t#epoch_num = 100\n\t\t\t\t\t\tperm_file = 'None'\n\t\t\t\t\t#else: epoch_num = 5\n\t\t\t\t\tmain_pipeline (perm = perm, perm_file = perm_file, lr = lr, epoch_num = epoch_num, cnn_dim = cnn_dim, \n\t\t\t\t\t\tlen_seq = len_seq, skip_gram = skip_gram, dim = dim, win_size = win_size, run_num = run_num)\n\t\t\t\t\ttensorflow.keras.backend.clear_session()\n\t\t\t\t\tgc.collect()","sub_path":"gru_att_perm_p1_100_unique0_tested_on_original.py","file_name":"gru_att_perm_p1_100_unique0_tested_on_original.py","file_ext":"py","file_size_in_byte":17550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221689063","text":"# -*- coding unix -*-\n\nimport lmrt4u.renderer as renderer\nimport lmrt4u.loader as loader\nimport lmrt4u.validator as validator\nimport lmrt4u.parser as parser\n\ndef process(input):\n \"\"\"Proceses and passes input between modules\"\"\"\n if input is None:\n return\n activity = input[\"activity\"]\n if (activity == \"Burndown Chart\" or activity == \"Burnup Chart\" or activity == \"Burndown Exit\"):\n contents = loader.loadDocument()\n if (validator.validate(contents)):\n backlog = parser.parseBacklogContents(contents)\n for sprintKey in backlog.sprints:\n sprint = backlog.sprints[sprintKey]\n renderer.renderAscii(sprint.totalPoints, sprint.pointList, sprint.totalDays)\n else:\n print(\"There was an error parsing information from the Lmrt4ufile, please check contents!\")\n","sub_path":"lmrt4u/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254440368","text":"# Microsoft's current CVE list from\n# circl.lu\n# pulling all the CVE's\n\nimport urllib.request\nimport json\nimport pandas as pd\n\n# getting the url\nf = urllib.request.urlopen('https://cve.circl.lu/api/search/microsoft/windows')\n\n# decoding the text\njson_string = f.read().decode('utf-8')\n\n# parsing the information\nparsed_json = json.loads(json_string)\n\nrecords = []\nnum = 0\nif id != None:\n cve = parsed_json[num].get('id', None)\n cwe = parsed_json[num].get('cwe', None)\n summary = parsed_json[num]['summary']\n published = parsed_json[num]['Published']\n last_modified = parsed_json[num].get('last-modified', None)\n records.append((cve, cwe, summary, published, last_modified))\n num = num + 1\nelse:\n# export to csv\n df = pd.DataFrame(records, columns=['cve', 'cwe', 'summary', 'published', 'last_modified'])\n df.to_csv('cve_microsoft.csv', index=False, encoding='utf-8')\n f.close()\n","sub_path":"Scraping/microsoft_cve.py","file_name":"microsoft_cve.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496599574","text":"# -*- coding: utf-8 -*-\nfrom keras.layers import Input, LSTM, Dense, concatenate, crf, RepeatVector,\\\n TimeDistributed, Multiply, Embedding, Convolution1D, Bidirectional, GlobalAveragePooling1D, Flatten, Dropout\nfrom keras.layers.core import Lambda\nfrom keras.models import Model\nfrom keras import backend as K\nimport numpy as np\nimport sys\nsys.path.append('../..')\nfrom src.Sentence2Matrix import Sentence2Matrix\n\nimport tensorflow as tf\nfrom keras.backend import tensorflow_backend\nconfig = tf.ConfigProto(\n gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5),\n device_count={'GPU': 1}\n)\nconfig.gpu_options.allow_growth = True\ntensorflow_backend.set_session(tf.Session(config=config))\n\n\n''' this model is a model implemented https://arxiv.org/pdf/1511.04108.pdf\n with LSTM/CNN with attention to the question and shared the cnn layer'''\nclass AllenAIModel:\n def __init__(self):\n self.sentence_matrix_helper = Sentence2Matrix(\"../../model_new_and_wiki\")\n self.sentence_len = self.sentence_matrix_helper.sentence_len\n self.word_dim = self.sentence_matrix_helper.word_dim\n self.model = None\n self.margin = 0.2\n self.db_file = \"../../BoP2017_DBAQ_dev_train_data/BoP2017-DBQA.pre_train.txt\"\n self.batch_m = 120\n self.output_dim = 128\n\n def compile_model(self):\n def cosine_similarity(x):\n q_vec, a_vec = x\n q_vec = K.l2_normalize(q_vec, axis=1)\n a_vec = K.l2_normalize(a_vec, axis=1)\n return K.sum(q_vec * a_vec, axis=1, keepdims=True)\n\n def cosine_ranking_loss(y_true, y_pred):\n cos_true = y_pred[0::2]\n cos_true = K.reshape(cos_true, shape=(-1,))\n cos_false = y_pred[1::2]\n cos_false = K.reshape(cos_false, shape=(-1,))\n cosine_ = cos_true - cos_false\n maximum_ = K.maximum(K.zeros(shape=(self.batch_m,)), self.margin - cosine_)\n return K.mean(maximum_, axis=-1, keepdims=False)\n\n dropout_rate = 0.05\n q_input = Input(shape=(self.sentence_len, self.word_dim), name=\"question_input_layer\")\n question_answer_shared_biLSTM = Bidirectional(\n LSTM(self.output_dim, dropout=dropout_rate, return_sequences=True)\n )\n # question represent\n q_lstm = question_answer_shared_biLSTM(q_input)\n share_conv_layer = Convolution1D(\n filters=self.output_dim,\n kernel_size=3, name='share_conv_layer',\n activation='tanh')\n q_pooling_layer = GlobalAveragePooling1D(\n name='question_pooling_layer')(Dropout(dropout_rate)(share_conv_layer(q_lstm)))\n q_output_layer = Lambda(lambda x: K.reshape(x, (-1, self.output_dim,)), name='q_output_layer')(q_pooling_layer)\n\n # answer representation in lstm layer\n a_input = Input(shape=(self.sentence_len, self.word_dim), name=\"answer_input_layer\")\n a_lstm = question_answer_shared_biLSTM(a_input)\n\n # the attention of question using the out put of q_lstm\n # add softmax weight\n alpha_tanh = Dense(self.sentence_len, activation='tanh')(concatenate([q_lstm, a_lstm]))\n alpha = Dense(1, activation='softmax')(alpha_tanh)\n softmax_weight = Multiply()([a_lstm, alpha])\n\n\n a_pooling_layer = GlobalAveragePooling1D(\n name='answer_pooling_layer'\n )(Dropout(dropout_rate)(share_conv_layer(softmax_weight)))\n # )(a_conv_layer)\n a_output_layer = Lambda(lambda x: K.reshape(x, (-1, self.output_dim,)), name='a_output_layer')(a_pooling_layer)\n\n cosine_layer = Lambda(function=cosine_similarity, name='cosine_layer')([q_output_layer, a_output_layer])\n # hidden_layer = Dropout(dropout_rate)(Dense(128, activation='tanh')(pooling_layer))\n # out_layer = Dropout(dropout_rate)(Dense(128, activation='tanh')(hidden_layer))\n self.model = Model(inputs=[q_input, a_input], outputs=[cosine_layer])\n self.model.compile(optimizer='rmsprop', loss=cosine_ranking_loss)\n\n # self.model.summary()\n\n ''':return each batch of question + correct answer + wrong answer'''\n def get_batch_generator(self):\n def parse_line(line):\n keys = line.split('\\t')\n return keys[0], keys[1], keys[2]\n with open(self.db_file, mode='r', encoding='utf-8') as f:\n line = f.readline()\n input_qs = []\n input_as = []\n output_vectors = []\n batch_index = 0\n while line != -1 and len(line) > 0:\n question, correct, wrong = parse_line(line)\n q = self.sentence_matrix_helper.get_sentence_matrix(question)\n correct_answer = self.sentence_matrix_helper.get_sentence_matrix(correct)\n wrong_answer = self.sentence_matrix_helper.get_sentence_matrix(wrong)\n input_qs.append(q)\n input_qs.append(q)\n input_as.append(correct_answer)\n input_as.append(wrong_answer)\n for i in range(2):\n output_vectors.append(np.zeros(1,))\n batch_index += 1\n if batch_index >= self.batch_m:\n yield ([np.array(input_qs), np.array(input_as)], [np.array(output_vectors)])\n batch_index = 0\n input_qs = []\n input_as = []\n output_vectors = []\n line = f.readline()\n\n def fit(self, sample_num):\n self.model.fit_generator(self.get_batch_generator(), steps_per_epoch=sample_num / self.batch_m, epochs=1)\n\n def save_model(self, model_file):\n self.model.save_weights(model_file)\n\n def load_model(self, model_file):\n try:\n self.compile_model()\n self.model.load_weights(model_file)\n return self.model\n except:\n print(\"warning the model_file is not the model belong this module\")\n return None\n ''':param input_qes and input_ees should be type() as np.array'''\n def predict(self, questions, answers):\n input_qs = []\n input_as = []\n for q, a in zip(questions, answers):\n q = self.sentence_matrix_helper.get_sentence_matrix(q)\n a = self.sentence_matrix_helper.get_sentence_matrix(a)\n input_qs.append(q)\n input_as.append(a)\n input_qs = np.array(input_qs)\n input_as = np.array(input_as)\n return self.model.predict([input_qs, input_as])\n\n def evaluation(self, max_iter=None, write_into_file=True):\n def parse_line(line):\n keys = line.split('\\t')\n try:\n label = int(keys[0])\n except:\n label = int(keys[0][1])\n return keys[1], keys[2], label\n\n def compare_score_and_label(score_list, label_list):\n rank_list = np.ones((len(score_list),), dtype=np.int32)\n label_list = np.array(label_list)\n score_list = np.array(score_list)\n sort_list = np.argsort(score_list)[::-1]\n r = 1\n for arg_index in sort_list:\n rank_list[arg_index] = r\n r += 1\n rank_list_cp = np.array(rank_list)\n rank_list_cp[label_list == 0] = 0\n s = np.sum(rank_list_cp)\n if s == 0:\n score = 0\n else:\n score = 1 / s\n return score, rank_list\n\n cur_iter = 0\n q_buffer = []\n a_buffer = []\n ls = []\n q_label_ls = []\n qs = []\n ans = []\n preds = []\n scores = []\n last_q = None\n total_scores = 0\n q_num = 0\n with open(\"../../BoP2017_DBAQ_dev_train_data/BoP2017-DBQA.dev.txt\", mode='r', encoding='utf-8') as f:\n line = f.readline()\n while line != -1 and len(line) > 0:\n q, a, l = parse_line(line)\n ls.append(l)\n qs.append(q)\n ans.append(a)\n # predict once when question is changed\n if last_q is None:\n last_q = q\n q_num += 1\n elif last_q != q:\n last_q = q\n q_num += 1\n score_list = self.predict(q_buffer, a_buffer)\n score_list = [s[0] for s in score_list]\n for s in score_list:\n scores.append(s)\n score, rank = compare_score_and_label(score_list, q_label_ls)\n total_scores += score\n for r in rank:\n preds.append(r)\n q_buffer = []\n a_buffer = []\n q_label_ls = []\n\n q_label_ls.append(l)\n q_buffer.append(q)\n a_buffer.append(a)\n line = f.readline()\n if max_iter is not None:\n cur_iter += 1\n if cur_iter > max_iter:\n break\n\n # predict the last question\n score_list = self.predict(q_buffer, a_buffer)\n score_list = [s[0] for s in score_list]\n for s in score_list:\n scores.append(s)\n q_correct, rank = compare_score_and_label(score_list, q_label_ls)\n total_scores += score\n for r in rank:\n preds.append(r)\n\n ret = total_scores / q_num\n print(ret)\n if write_into_file:\n print('writing to file')\n detail_lines = []\n scores_lines = []\n for pred, l, q, a, score in zip(preds, ls, qs, ans, scores):\n line = \"\\t\".join([str(pred), str(l), q, a])\n detail_lines.append(line)\n scores_lines.append(str(score))\n with open(\"../../BoP2017_DBAQ_dev_train_data/allenAI5ScoreResult.txt\", mode='w', encoding='utf-8') as f:\n # print(scores_lines)\n f.writelines(\"\\n\".join(scores_lines))\n with open(\"../../BoP2017_DBAQ_dev_train_data/allenAI5Result.txt\", mode='w', encoding='utf-8') as f:\n # print(detail_lines)\n f.writelines(detail_lines)\n return ret\n\n\nif __name__ == '__main__':\n m = AllenAIModel()\n m.compile_model()\n epochs = 20\n for epoch in range(epochs):\n # 224160\n m.fit(224160)\n score = m.evaluation(5000, write_into_file=False)\n model_save_name = \"AllenAI5_aver_attention_epoch\" + str(epoch) + '_' + str(score)\n m.save_model(model_save_name)\n print('AllenAI5 aver_attention epoch', str(epoch), 'finished')\n\n\n\n","sub_path":"src/models/AllenAIModel5.py","file_name":"AllenAIModel5.py","file_ext":"py","file_size_in_byte":10713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148429347","text":"import numpy as np\nimport argparse\nimport cv2\nimport time\nimport os.path\nfrom util import writePFM\nfrom disp_mgr import dispMgr\n\nparser = argparse.ArgumentParser(description='Disparity Estimation')\nparser.add_argument('--input-left', default='../../data/Synthetic/TL3.png', type=str, help='input left image')\nparser.add_argument('--input-right', default='../../data/Synthetic/TR3.png', type=str, help='input right image')\nparser.add_argument('--output', default='./TL3.pfm', type=str, help='left disparity map')\n\ndef main():\n args = parser.parse_args()\n\n print(args.output)\n print('Compute disparity for %s' % args.input_left)\n img_left = cv2.imread(args.input_left)\n img_right = cv2.imread(args.input_right)\n tic = time.time()\n DM = dispMgr(img_left,img_right)\n disp, outlier = DM.computeDisp()\n #cv2.imwrite('outlier/' + os.path.split(args.output)[1][:-3] + 'png', outlier)\n toc = time.time()\n writePFM(args.output, disp)\n print('Elapsed time: %f sec.' % (toc - tic))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"final/src/test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457212640","text":"__all__ = [\"adapted_fastai_learner\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.metrics import *\nfrom mantisshrimp.engines.fastai.imports import *\nfrom mantisshrimp.engines.fastai.adapters import *\n\n\ndef adapted_fastai_learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model,\n metrics=None,\n device=None,\n **kwargs,\n):\n # convert dataloaders to fastai\n fastai_dls = []\n for dl in dls:\n if isinstance(dl, DataLoader):\n fastai_dl = convert_dataloader_to_fastai(dl)\n elif isinstance(dl, fastai.DataLoader):\n fastai_dl = dl\n else:\n raise ValueError(f\"dl type {type(dl)} not supported\")\n\n fastai_dls.append(fastai_dl)\n\n device = device or fastai.default_device()\n fastai_dls = fastai.DataLoaders(*fastai_dls).to(device)\n\n # convert metrics to fastai\n metrics = metrics or []\n fastai_metrics = [\n FastaiMetricAdapter(metric) if isinstance(metric, Metric) else metric\n for metric in metrics\n ]\n\n return fastai.Learner(dls=fastai_dls, model=model, metrics=fastai_metrics, **kwargs)\n","sub_path":"mantisshrimp/engines/fastai/learner/adapted_fastai_learner.py","file_name":"adapted_fastai_learner.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178436731","text":"from __future__ import print_function\n\nimport sys\nsys.path.append('../')\n\nfrom include_lite import *\nimport simple_district\n\nimport pickle, json, fnmatch\nfrom collections import namedtuple\n\ndef json_read_obj(f):\n return json.load(f, object_hook = lambda d: namedtuple('X', d.keys())(*d.values()) )\n\ndef dump_conf(conf, f):\n json.dump(conf.__dict__, f)\n\ndef load_conf(f):\n return json_read_obj(f)\n\ndef districts_to_json(dist_arr):\n dd = {}\n dd['code'] = [d.get_code() for d in dist_arr]\n dd['vote'] = [d.get_dem_ratio() for d in dist_arr]\n\n return dd\n\ndef districts_from_json(js):\n assert 'code' in js\n assert 'vote' in js\n assert len(js['code']) == len(js['vote'])\n\n ret = []\n \n for i in range(len(js['vote'])):\n ret.append(simple_district.SimpleDistrict(js['code'][i], js['vote'][i]))\n\n return ret\n\ndef dump_districts(dist_arr, f):\n json.dump(districts_to_json(dist_arr), f)\n\ndef load_districts(f):\n return districts_from_json(json.load(f))\n\ndef recursive_files(path):\n for root, directories, filenames in os.walk(path):\n for filename in filenames:\n yield root, filename\n\ndef repickle_samples(path):\n for d, f in recursive_files(path):\n \n if fnmatch.fnmatch(f, \"z*.pickle\"):\n full_in = os.path.join(d, f)\n full_out = os.path.join(d, f.replace('pickle', 'json'))\n \n with open(full_in, 'rb') as fh:\n dist_arr = pickle.load(fh)\n \n with open(full_out, 'w') as fh:\n my_serializer.dump_districts(dist_arr, fh)\n \n if f == 'setup.pickle':\n full_in = os.path.join(d, f)\n full_out = os.path.join(d, f.replace('pickle', 'json'))\n\n print(full_out)\n \n with open(full_in, 'rb') as fh:\n conf = pickle.load(fh)\n \n with open(full_out, 'w') as fh:\n my_serializer.dump_conf(conf, fh)\n\nif __name__ == '__main__':\n repickle_samples('simulations')\n","sub_path":"anton-code/gerrymander-to/old_code/src/my_serializer.py","file_name":"my_serializer.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560894447","text":"# -*- coding: utf-8 -*-\nfrom zope.interface import implements\nfrom zope.interface import alsoProvides\n\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.app.portlets.portlets import base\n\nfrom zope import schema\nfrom zope.component import getUtility\n\nfrom zope.formlib import form\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\n\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\nfrom plone.registry.interfaces import IRegistry\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom collective.prettydate.interfaces import IPrettyDate\n\nfrom collective.facebook.portlets import _\nfrom collective.facebook.portlets.config import GRAPH_URL\nfrom collective.facebook.portlets.config import PROJECTNAME\n\nfrom zope.security import checkPermission\n\nfrom plone.memoize import ram\nfrom time import time\n\nfrom DateTime import DateTime\n\nimport json\nimport urllib\n\nimport logging\n\nlogger = logging.getLogger(PROJECTNAME)\n\n\ndef FacebookAccounts(context):\n registry = getUtility(IRegistry)\n accounts = registry['collective.facebook.accounts']\n if accounts:\n keys = accounts.keys()\n else:\n keys = []\n\n if keys:\n for i in keys:\n vocab = SimpleVocabulary(\n [SimpleTerm(value=id, title=accounts[id]['name']) for id in keys])\n else:\n vocab = SimpleVocabulary.fromValues(keys)\n\n return vocab\n\n\nalsoProvides(FacebookAccounts, IContextSourceBinder)\n\n\ndef cache_key_simple(func, var):\n #let's memoize for 20 minutes or if any value of the portlet is modified\n timeout = time() // (60 * 20)\n return (timeout,\n var.data.wall_id,\n var.data.only_self,\n var.data.max_results)\n\n\nclass IFacebookWallPortlet(IPortletDataProvider):\n \"\"\"A portlet\n\n It inherits from IPortletDataProvider because for this portlet, the\n data that is being rendered and the portlet assignment itself are the\n same.\n \"\"\"\n\n header = schema.TextLine(title=_(u'Header'),\n description=_(u\"The header for the portlet. \"\n \"Leave empty for none.\"),\n required=False)\n\n fb_account = schema.Choice(title=_(u'Facebook account'),\n description=_(u\"Which Facebook account to \"\n \"use.\"),\n required=True,\n source=FacebookAccounts)\n\n wall_id = schema.TextLine(title=_(u'Wall ID'),\n description=_(u\"ID for the wall you are trying \"\n \"to fetch from. More info in: \"\n \"https://developers.facebook.com/\"\n \"docs/reference/api/\"),\n required=True)\n\n max_results = schema.Int(title=_(u'Maximum results'),\n description=_(u\"The maximum results number.\"),\n required=True,\n default=5)\n\n only_self = schema.Bool(title=_(u'Show only from owner'),\n description=_(u\"Only show posts made by the \"\n \"wall owner.\"),\n required=False)\n\n pretty_date = schema.Bool(title=_(u'Pretty dates'),\n description=_(u\"Show dates in a pretty format \"\n \"(ie. '4 hours ago').\"),\n default=True,\n required=False)\n\n\nclass Assignment(base.Assignment):\n \"\"\"Portlet assignment.\n\n This is what is actually managed through the portlets UI and associated\n with columns.\n \"\"\"\n\n implements(IFacebookWallPortlet)\n\n header = u\"\"\n fb_account = u\"\"\n wall_id = u\"\"\n max_results = 20\n only_self = False\n pretty_date = True\n\n def __init__(self,\n fb_account,\n wall_id,\n max_results,\n header=u\"\",\n only_self=False,\n pretty_date=True):\n\n self.header = header\n self.fb_account = fb_account\n self.wall_id = wall_id\n self.max_results = max_results\n self.only_self = only_self\n self.pretty_date = pretty_date\n\n @property\n def title(self):\n \"\"\"This property is used to give the title of the portlet in the\n \"manage portlets\" screen.\n \"\"\"\n return _(u\"Facebook wall Portlet\")\n\n\nMAX_FETCHES = 15\n\n\nclass Renderer(base.Renderer):\n \"\"\"Portlet renderer.\n\n This is registered in configure.zcml. The referenced page template is\n rendered, and the implicit variable 'view' will refer to an instance\n of this class. Other methods can be added and referenced in the template.\n \"\"\"\n\n render = ViewPageTemplateFile('fbwall.pt')\n\n def getHeader(self):\n \"\"\"\n Returns the header for the portlet\n \"\"\"\n return self.data.header\n\n def canEdit(self):\n return checkPermission('cmf.ModifyPortalContent', self.context)\n\n def isValidAccount(self):\n registry = getUtility(IRegistry)\n accounts = registry.get('collective.facebook.accounts', None)\n\n if self.data.fb_account not in accounts:\n logger.debug(\"The account '%s' is invalid.\" % self.data.fb_account)\n return False\n else:\n logger.debug(\"'%s' is a valid account.\" % self.data.fb_account)\n if accounts[self.data.fb_account]['expires']:\n expires = DateTime(accounts[self.data.fb_account]['expires'])\n if expires and expires < DateTime():\n logger.debug(\"But it already expired...\")\n return False\n\n return True\n\n @ram.cache(cache_key_simple)\n def getSearchResults(self):\n logger.debug(\"Going to Facebook to fetch results.\")\n registry = getUtility(IRegistry)\n accounts = registry.get('collective.facebook.accounts', None)\n\n result = []\n if self.data.fb_account in accounts:\n logger.debug(\"Using account '%s'\" % self.data.fb_account)\n access_token = accounts[self.data.fb_account]['access_token']\n\n wall = self.data.wall_id + '/feed'\n params = access_token + '&limit=%s' % self.data.max_results\n url = GRAPH_URL % (wall, params)\n\n logger.debug(\"URL: %s\" % url)\n query_result = json.load(urllib.urlopen(url))\n\n # I wanted to do this using fql, but i couldn't\n # Specificaly, i couldn't find a way to obtain links titles\n # I managed to get this:\n # /fql?q=SELECT+created_time,message,comments,likes,action_links,\n # message_tags+FROM+stream+\n # WHERE+filter_key+=+'owner'+\n # AND+source_id+=+[uid]&access_token=\n if self.data.only_self:\n logger.debug(\"Only get posts from self.\")\n # Let's get the ID for the wall owner\n uurl = GRAPH_URL % (self.data.wall_id, access_token)\n logger.debug(\"URL to get ID: %s\" % uurl)\n account_data = json.load(urllib.urlopen(uurl))\n uid = None\n if 'id' in account_data.keys():\n uid = account_data['id']\n\n # Now, let's iterate on each result until we have the amount\n # we wanted\n logger.debug(\"About to start getting results...\")\n #we need to give a max number of fetches.. or we may have a big\n #and long loop\n fetch_number = 0\n while ('paging' in query_result and\n len(result) < self.data.max_results and\n fetch_number < MAX_FETCHES):\n try:\n post = query_result['data'].pop(0)\n except IndexError:\n logger.debug(\"%s results so far. Need to fetch \"\n \"some more...\" % len(result))\n # If we are here, it means, we need to query for the\n # next page of results\n fetch_number += 1\n url = query_result['paging']['next']\n logger.debug(\"Next URL: %s\" % url)\n query_result = json.load(urllib.urlopen(url))\n post['avatar'] = \"http://graph.facebook.com/%s/picture\" % \\\n post['from']['id']\n post['username'] = post['from']['name']\n post['user_url'] = \"http://www.facebook.com/%s\" % \\\n post['from']['id']\n if 'object_id' in post.keys():\n post['post_url'] = \"http://www.facebook.com/%s\" % \\\n post['object_id']\n if self.data.only_self:\n if post['from']['id'] == uid:\n result.append(post)\n else:\n result.append(post)\n logger.debug(\"Done. returning %s results\" % len(result))\n return result\n\n def getFacebookLink(self):\n\n return \"https://www.facebook.com/%s\" % self.data.wall_id\n\n def getDate(self, str_date):\n if self.data.pretty_date:\n # Returns human readable date for the wall post\n date_utility = getUtility(IPrettyDate)\n date = date_utility.date(str_date)\n else:\n date = DateTime(str_date)\n\n return date\n\n\nclass AddForm(base.AddForm):\n \"\"\"Portlet add form.\n\n This is registered in configure.zcml. The form_fields variable tells\n zope.formlib which fields to display. The create() method actually\n constructs the assignment that is being added.\n \"\"\"\n form_fields = form.Fields(IFacebookWallPortlet)\n\n def create(self, data):\n return Assignment(**data)\n\n\nclass EditForm(base.EditForm):\n \"\"\"Portlet edit form.\n\n This is registered with configure.zcml. The form_fields variable tells\n zope.formlib which fields to display.\n \"\"\"\n form_fields = form.Fields(IFacebookWallPortlet)\n","sub_path":"src/collective/facebook/portlets/fbwall.py","file_name":"fbwall.py","file_ext":"py","file_size_in_byte":10199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184437823","text":"from __future__ import print_function\nfrom sys import argv\nimport argparse\nimport os\nimport shutil\n\n# GOOGLE API libraries\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\n\n# DATA MUNGING & FORMATTING libraries\nimport pandas as pd\nimport json\nimport datetime\nimport re # regex\nimport numpy as np\nimport time\n\n# OTHER libraries\nfrom zipfile import ZipFile\nimport warnings\n\n\n# _________________________________________________________________________________________________________________________\n\n# GLOBAL VARIABLES | Set global variables and print display formats\n\nwarnings.filterwarnings('ignore') # FILTER out warnings that are not critical\n\npd.set_option('display.max_columns', 100) # or 1000 or None\npd.set_option('display.max_rows', 1000) # or 1000 or None\nPRINT_OUTPUT_WIDTH = 100 # SET print output length\nPRINT_CENTER = int(PRINT_OUTPUT_WIDTH/2)\nnp.set_printoptions(linewidth=PRINT_OUTPUT_WIDTH)\nPRINT_TUPLE_WIDTH = 2\nPRINT_ARRAY_WIDTH = 11 \n\nELECTION_YEAR = 2020\n\nSTATES_WITH_WARNINGS = [] # STORE states that trigger warnings\n\n# _________________________________________________________________________________________________________________________\n\n# GOOGLE API | Set scopes & Google Spreadsheet IDs (1 scope, 2 IDs)\n\n# PRO-TIP: if modifying these scopes, delete the file token.json\n# NOTE: Scope url should not change year to year unless Google alters syntax\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'\n\n# individual states & STATE_FEED tabs in a single Google Sheet (multiple tabs)\n# https://docs.google.com/spreadsheets/d/1mccKKi7u8DZ5hl5-BPykJyIwj-J4N-fUrTwwp0XSZxI/edit#gid=536611544\nSPREADSHEET_ID = '1mccKKi7u8DZ5hl5-BPykJyIwj-J4N-fUrTwwp0XSZxI' \n\n# ELECTION_AUTHORITIES entire Google Sheet (1 tab) \n# https://docs.google.com/spreadsheets/d/1XxY1pkoiKNAM8nUjrDJ4MMm6x_rGWRHbreW9XfJtdjE/edit#gid=1945124005\nSPREADSHEET_EA_ID = '1XxY1pkoiKNAM8nUjrDJ4MMm6x_rGWRHbreW9XfJtdjE' \n\n# _________________________________________________________________________________________________________________________\n\n\ndef vip_build(state_abbrv, state_feed, state_data, election_authorities):\n \"\"\"\n PURPOSE: transforms state_data and state_feed data into .txt files, exports zip of 9 .txt files\n (election.txt, polling_location.txt, schedule.txt, source.txt, state.txt, locality.txt, \n election_administration.txt, department.txt, person.txt)\n INPUT: state_abbrv, state_data, state_feed, election_authorities\n RETURN: None\n \"\"\"\n\n # PREP | Identify data issues, create/format features, and standardize dataframes\n\n # GENERATE warnings in state_data (6 types of warnings)\n multi_directions_rows, multi_address_rows, cross_street_rows, \\\n missing_data_rows, semi_colon_rows, date_year_rows, \\\n missing_zipcode_rows, missing_state_abbrvs_rows, \\\n timezone_mismatch_rows, ocd_id_rows = generate_warnings(state_data, state_abbrv, state_feed['official_name'].tolist()[0])\n\n # CREATE/FORMAT feature(s) (1 created, 1 formatted)\n state_feed['state_fips'] = state_feed['state_fips'].str.pad(2, side='left', fillchar='0') # first make sure there are leading zeros\n state_feed['state_id'] = state_abbrv.lower() + state_feed['state_fips']\n state_feed['external_identifier_type'] = 'ocd-id' \n # CLEAN/FORMAT state_feed, state_data, and election_authorities (3 dataframes)\n state_feed, state_data, election_authorities = clean_data(state_abbrv, state_feed, state_data, election_authorities)\n # _____________________________________________________________________________________________________________________\n\n # CREATE IDS | Create IDs on dataframes\n\n if election_authorities.empty:\n # CREATE empty election_authorities DataFrame if state not in election administration sheet\n election_authorities = pd.DataFrame(columns=['ocd_division','election_administration_id','homepage_url',\n 'am_i_registered_uri','registration_uri','where_do_i_vote_uri'\n 'official_title','election_official_person_id'])\n\n else:\n # # SELECT desired cols from election_authorities\n election_authorities = election_authorities[['ocd_division','homepage_url', 'official_title', \n 'am_i_registered_uri','registration_uri','where_do_i_vote_uri'\n ]]\n\n # CREATE 'election_adminstration_id'\n temp = election_authorities[['ocd_division']]\n temp.drop_duplicates(['ocd_division'], inplace=True)\n temp.reset_index(drop=True, inplace=True) # RESET index prior to creating id\n temp['election_administration_id'] = 'ea' + (temp.index + 1).astype(str).str.zfill(4)\n election_authorities = pd.merge(election_authorities, temp, on =['ocd_division'])\n election_authorities.drop_duplicates(subset=['election_administration_id'], inplace=True) #REMOVE all except first election administration entry for each ocd-id\n\n # CREATE 'election_official_person_id'\n temp = election_authorities[['ocd_division', 'official_title']]\n temp.drop_duplicates(['ocd_division', 'official_title'], keep='first',inplace=True)\n temp.reset_index(drop=True, inplace=True) # RESET index prior to creating id\n temp['election_official_person_id'] = 'per' + (temp.index + 1).astype(str).str.zfill(4)\n election_authorities = pd.merge(election_authorities, temp, on =['ocd_division', 'official_title'])\n \n # CREATE 'hours_only_id'\n temp_cols = ['location_name', 'structured_line_1', 'structured_line_2', 'structured_city', 'structured_state', 'structured_zip', 'directions', 'is_drop_box', 'is_early_voting']\n temp = state_data[temp_cols]\n #for col in temp_cols:\n # temp['{0}'.format(col)] = temp['{0}'.format(col)].str.strip()\n temp.drop_duplicates(temp_cols, inplace=True)\n temp.reset_index(drop=True, inplace=True) # RESET index prior to creating id\n temp['hours_open_id'] = 'hours' + (temp.index + 1).astype(str).str.zfill(4)\n state_data = pd.merge(state_data, temp, on = temp_cols)\n\n # CREATE 'polling_location_ids'\n temp = state_data[temp_cols]\n #for col in temp_cols:\n # temp['{0}'.format(col)] = temp['{0}'.format(col)].str.strip()\n temp.drop_duplicates(temp_cols, inplace=True)\n temp.reset_index(drop=True, inplace=True) # RESET index prior to creating id\n temp['polling_location_ids'] = 'pol' + (temp.index + 1).astype(str).str.zfill(4)\n state_data = pd.merge(state_data, temp, on = temp_cols)\n \n # _____________________________________________________________________________________________________________________\n\n # FILE CREATION | Generate files for dashboard zip\n\n # GENERATE 9 .txt files\n election = generate_election(state_feed)\n polling_location = generate_polling_location(state_data)\n schedule = generate_schedule(state_data, state_feed)\n source = generate_source(state_feed)\n state = generate_state(state_feed, election_authorities)\n locality = generate_locality(state_feed, state_data, election_authorities)\n election_administration = generate_election_administration(election_authorities)\n department = generate_department(election_authorities)\n person = generate_person(election_authorities)\n\n\n # GENERATE zip file\n generate_zip(state_abbrv, state_feed, {'state':state, \n 'source':source,\n 'election': election,\n 'election_administration':election_administration,\n 'department': department,\n 'person': person,\n 'locality':locality,\n 'polling_location': polling_location,\n 'schedule': schedule})\n \n # _____________________________________________________________________________________________________________________\n\n # REPORT | Print zip file sizes, dataframe descriptions, and data warnings\n\n # PRINT state report\n state_report(multi_directions_rows, multi_address_rows, cross_street_rows, # WARNINGS for state data\n missing_data_rows, semi_colon_rows, date_year_rows, # FATAL ERRORS for state_data\n missing_zipcode_rows, missing_state_abbrvs_rows, \n timezone_mismatch_rows, ocd_id_rows,\n state_abbrv, state_feed, state_data, election_authorities,\n {'state':state, \n 'source':source,\n 'election': election,\n 'election_administration':election_administration,\n 'department': department,\n 'person': person,\n 'locality':locality,\n 'polling_location': polling_location,\n 'schedule': schedule})\n\n\n return\n\n \n\ndef clean_data(state_abbrv, state_feed, state_data, election_authorities):\n \"\"\"\n PURPOSE: cleans and formats state_feed, state_data, & election_authorities to output standards\n INPUT: state_abbrv, state_feed, state_data, election_authorities\n RETURN: state_feed, state_data, election_authorities dataframes\n \"\"\"\n\n # CREATE/FORMAT | Adjust variables to desired standards shared across relevant .txt files\n\n # RESET indexes\n\n state_data.reset_index(drop=True, inplace=True)\n\n # REPLACE empty strings with NaNs\n state_data = state_data.replace('^\\\\s*$', np.nan, regex=True)\n\n # FORMAT OCD IDs (2 formatted)\n state_data['ocd-division'] = state_data['ocd-division'].str.strip()\n\n election_authorities['ocd_division'] = election_authorities['ocd_division'].str.strip().str.lower()\n\n # FORMAT dates (3 formatted)\n state_feed['election_date'] = pd.to_datetime(state_feed['election_date'])\n state_data['start_date'] = pd.to_datetime(state_data['start_date'])\n state_data['end_date'] = pd.to_datetime(state_data['end_date'])\n\n # FORMAT hours (2 formatted)\n state_data['start_time'] = state_data['start_time'].str.replace(' ', '')\n state_data['start_time'] = state_data['start_time'].str.replace(';', ':')\n\n #state_data['start_time'] = state_data['start_time'].str.replace('-',':00-')\n temp = state_data['start_time'].str.split('-', n=1, expand=True)\n temp[0] = temp[0].str.pad(8, side='left', fillchar='0')\n temp[1] = temp[1].str.pad(5, side='left', fillchar='0')\n state_data['start_time'] = temp[0] + '-' + temp[1]\n\n state_data['end_time'] = state_data['end_time'].str.replace(' ', '')\n state_data['end_time'] = state_data['end_time'].str.replace(';', ':')\n #state_data['end_time'] = state_data['end_time'].str.replace('-',':00-')\n temp = state_data['end_time'].str.split('-', n=1, expand=True)\n temp[0] = temp[0].str.pad(8, side='left', fillchar='0')\n temp[1] = temp[1].str.pad(5, side='left', fillchar='0')\n state_data['end_time'] = temp[0] + '-' + temp[1]\n\n # FORMAT booleans (4 formatted)\n true_chars = [char for char in 'true' if char not in 'false'] # SET unique chars in 'true' and not in 'false'\n false_chars = [char for char in 'false' if char not in 'true'] # SET unique chars in 'true' and not in 'false'\n lambda_funct = (lambda x: None if not x else ( \\\n 'true' if any(char in x for char in true_chars) == True else ('false' if any(char in x for char in false_chars) == True else np.nan)))\n if state_data['is_drop_box'].isnull().all(): # NOTE: is_drop_box is often left empty, which should be treated as false\n state_data['is_drop_box'] = 'false'\n else:\n state_data['is_drop_box'] = state_data['is_drop_box'].str.lower().apply(lambda_funct)\n state_data['is_early_voting'] = state_data['is_early_voting'].str.lower().apply(lambda_funct)\n state_data['is_only_by_appointment'] = state_data['is_only_by_appointment'].str.lower().apply(lambda_funct)\n state_data['is_or_by_appointment'] = state_data['is_or_by_appointment'].str.lower().apply(lambda_funct)\n\n\n # FORMAT ocd division ids (2 formatted)\n state_data['ocd-division'] = state_data['ocd-division'].str.strip()\n if not election_authorities.empty:\n election_authorities['ocd_division'] = election_authorities['ocd_division'].str.upper().str.strip()\n\n #state_data['address_line'] = state_data['address_line'].str.strip().str.replace('\\\\s{2,}', ' ')\n state_data['location_name'] = state_data['location_name'].str.strip().str.replace('\\'S', '\\'s')\n \n # _____________________________________________________________________________________________________________________\n\n # ERROR HANDLING | Interventionist adjustments to account for state eccentricities and common hand collection mistakes, etc\n\n # FORMAT address line (1 formatted)\n # NOTE: A common outreach error was missing state abbreviations in the address line\n state_abbrv_insert = state_feed['state_abbrv'].tolist()[0].center(4, ' ')\n \n # FORMAT address line \n # NOTE: There is a ton of random non-standard punctuation. The following regex clears everything except periods in digits\n if not state_data.structured_state.isnull().all():\n state_data['structured_state'] = state_data['structured_state'].str.strip() \\\n .str.strip('.,;:)(') \\\n .str.replace('(? 30: # IF there are more than 30 rows with missing data then simply notify user\n missing_data_rows = ['More than 30 rows with missing data']\n\n \n return missing_data_rows\n\n\n\ndef warning_cross_street(state_data):\n \"\"\"\n PURPOSE: isolate which rows, if any, have invalid cross street (e.g. 1st & Main St)\n INPUT: state_data\n RETURN: cross_street_rows\n \"\"\"\n\n # NOTE: Invalid cross streets sometimes do not map well on Google's end \n cross_street_addresses = state_data[state_data['structured_line_1'].str.contains(' & | and | between', regex = True, na = False)]\n cross_street_rows = sorted(list(cross_street_addresses.index + 1))\n\n\n return cross_street_rows\n\n\n\ndef warning_multi_addresses(state_data):\n \"\"\"\n PURPOSE: isolate which polling locations (OCD_ID, location name), if any, have multiple addresses\n (warning: each unique set of addresses is considered a polling location)\n INPUT: state_data\n RETURN: multi_address_rows\n \"\"\"\n\n # SELECT feature(s) (3 selected)\n addresses = state_data[['ocd-division','location_name', 'structured_line_1', 'structured_line_2', 'structured_city', 'structured_state', 'structured_zip']].drop_duplicates()\n multi_addresses = addresses[addresses.duplicated(subset=['ocd-division','location_name'], keep=False)]\n multi_addresses.index = multi_addresses.index + 1 # INCREASE INDEX to correspond with google sheets index\n\n multi_address_rows = []\n if not multi_addresses.empty: # IF the dataframe is not empty\n multi_address_rows = sorted([tuple(x) for x in multi_addresses.groupby(['ocd-division', 'location_name']).groups.values()])\n\n\n return multi_address_rows\n\n\ndef warning_multi_directions(state_data):\n \"\"\"\n PURPOSE: isolate which polling locations, if any, have multiple directions \n (warning: each unique set of directions is considered a polling location)\n INPUT: state_data\n RETURN: multi_directions_rows\n \"\"\"\n\n # SELECT feature(s) (4 selected)\n unique_rows = state_data[['ocd-division', 'location_name', 'structured_line_1', 'structured_line_2', 'structured_city', 'structured_state', 'structured_zip', 'directions']].drop_duplicates()\n duplicate_locations = unique_rows[unique_rows.duplicated(subset=['ocd-division', 'location_name', 'structured_line_1', 'structured_line_2', 'structured_city', 'structured_state', 'structured_zip'],keep=False)]\n duplicate_locations.index = duplicate_locations.index + 1 # INCREASE INDEX to correspond with google sheets index\n\n multi_directions_rows = []\n if not duplicate_locations.empty: # IF there are polling locations with multiple locations\n multi_directions_rows = sorted([tuple(x) for x in \\\n duplicate_locations.groupby(['ocd-division', 'location_name', 'structured_line_1', 'structured_line_2', 'structured_city', 'structured_state', 'structured_zip']).groups.values()])\n\n\n return multi_directions_rows\n\n\n\ndef warning_ocd_id(state_data, state_abbrv):\n \"\"\"\n PURPOSE: isolate which issues with ocd_ids\n INPUT: state_data\n RETURN: ocd_id_rows\n \"\"\"\n\n ocd_id_rows = []\n\n # ISOLATE if ocd-division is incorrect\n ocd_id_issue = state_data[~state_data['ocd-division'].str.contains('ocd-division', na = False)]\n if not ocd_id_issue.empty:\n ocd_id_rows.append(('ocd-id', str(set(ocd_id_issue.index+1)).strip('{}')))\n\n # ISOLATE if country is incorrect\n country_issue = state_data[~state_data['ocd-division'].str.contains('country:us', na = False)]\n if not country_issue.empty:\n ocd_id_rows.append(('country', str(set(country_issue.index+1)).strip('{}')))\n\n # ISOLATE if state is incorrect \n state_string = 'state:' + state_abbrv.lower()\n state_issue = state_data[~state_data['ocd-division'].str.contains(state_string, na = False)]\n if not state_issue.empty:\n ocd_id_rows.append(('state', str(set(state_issue.index+1)).strip('{}')))\n\n # ISOLATE if country is incorrect\n if state_abbrv != 'AK': # Alaska ocd-ids does not include county/place \n county_place_issue = state_data[~state_data['ocd-division'].str.contains(r'county|place|sldl|parish', na = False)]\n if not county_place_issue.empty:\n ocd_id_rows.append(('county|place|sldl|parish', str(set(county_place_issue.index+1)).strip('{}')))\n\n # ISOLATE if the number of slashes is incorrect\n if state_abbrv != 'AK': # Alaska ocd-ids have 2 and 3 slashes, depending\n slash_number = 3\n slash_issue = state_data[state_data['ocd-division'].str.count('/') != slash_number]\n if not slash_issue.empty:\n ocd_id_rows.append(('slashes', str(set(slash_issue.index+1)).strip('{}')))\n\n\n return ocd_id_rows\n\n\n\ndef warning_date_year(state_data): \n \"\"\"\n PURPOSE: isolate which rows, if any, have a start_date or end_date outside of the election year\n INPUT: state_data\n RETURN: date_year_rows\n \"\"\"\n\n # FORMAT features (2 formatted)\n state_data['start_date'] = pd.to_datetime(state_data['start_date'])\n state_data['end_date'] = pd.to_datetime(state_data['end_date'])\n \n # ISOLATE data errors in 2 features\n incorrect_start_dates = state_data[state_data['start_date'].dt.year != ELECTION_YEAR]\n incorrect_end_dates = state_data[state_data['end_date'].dt.year != ELECTION_YEAR]\n incorrect_dates = incorrect_start_dates.append(incorrect_end_dates)\n\n date_year_rows = sorted(list(set(incorrect_dates.index + 1)))\n\n\n return date_year_rows\n\n\n\ndef warning_semi_colon(state_data):\n \"\"\"\n PURPOSE: isolate which rows, if any, have ;'s instead of :'s in hours\n INPUT: state_data\n RETURN: semi_colon_rows\n \"\"\"\n\n # ISOLATE data errors in 2 features\n semi_colon_start_time = state_data[state_data['start_time'].str.contains(';', na = False)]\n semi_colon_end_time = state_data[state_data['end_time'].str.contains(';', na = False)]\n semi_colon_times = semi_colon_start_time.append(semi_colon_end_time)\n\n semi_colon_rows = sorted(list(set(semi_colon_times.index + 1)))\n\n\n return semi_colon_rows\n\n\n\ndef warning_missing_zipcodes(state_data):\n \"\"\"\n PURPOSE: isolate which rows, if any, are missing zip codes in the `structured_zip` col in state data \n INPUT: state_data\n RETURN: missing_zipcode_rows\n \"\"\"\n\n # SELECT feature(s) (1 feature)\n missing_zipcodes = state_data[['structured_zip']]\n\n # FORMAT feature(s) (1 formatted)\n \"\"\"\n missing_zipcodes['structured_zip'] = missing_zipcodes['structured_zip'].str.strip() \\\n .str.strip('.,;:)(') \\\n .str.replace('\\t', ' ') \\\n .str.replace('(?{PRINT_CENTER-2}} | {len(df.index)} row(s)')\n\n # PRINT count of unique OCD IDs\n sd = state_data[['ocd-division']] # CREATE count of unique OCD IDs in state_data\n sd.drop_duplicates(inplace=True)\n print('\\n'*2) \n print('# of Unique Counties/Places '.center(PRINT_OUTPUT_WIDTH, ' ')) \n print()\n print(f\"{'State Data |':>{PRINT_CENTER}} {len(sd)} counties/places\")\n if not election_authorities.empty:\n ea = election_authorities[['ocd_division']]\n ea.drop_duplicates(inplace=True)\n print(f\"{'Election Authorities |':>{PRINT_CENTER}} {len(ea)} counties/places\")\n else:\n print(f\"{'Election Authorities |':>{PRINT_CENTER}} 0 counties/places\")\n\n\n # _____________________________________________________________________________________________________________________\n\n if multi_directions_rows or multi_address_rows or cross_street_rows or \\\n missing_data_rows or semi_colon_rows or date_year_rows or \\\n missing_zipcode_rows or missing_state_abbrvs_rows or \\\n timezone_mismatch_rows or ocd_id_rows:\n \n STATES_WITH_WARNINGS.append(state_abbrv) # RECORD states with warnings\n\n\n if multi_directions_rows or multi_address_rows or cross_street_rows: \n print('\\n'*2)\n print('----------------------- STATE DATA WARNINGS -----------------------'.center(PRINT_OUTPUT_WIDTH, ' '))\n\n if multi_directions_rows:\n print('\\n')\n print('Rows w/ Multiple Directions for the Same Polling Location'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(multi_directions_rows), PRINT_TUPLE_WIDTH):\n print(str(multi_directions_rows[i:i+PRINT_TUPLE_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n\n if multi_address_rows:\n print('\\n')\n print('Rows w/ Multiple Addresses for the Same Polling Location'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(multi_address_rows), PRINT_TUPLE_WIDTH):\n print(str(multi_address_rows[i:i+PRINT_TUPLE_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if cross_street_rows:\n print('\\n')\n print('Rows w/ Problematic Cross-Street Formats'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(cross_street_rows), PRINT_ARRAY_WIDTH):\n print(str(cross_street_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n\n if missing_data_rows or semi_colon_rows or date_year_rows or \\\n missing_zipcode_rows or missing_state_abbrvs_rows or \\\n timezone_mismatch_rows or ocd_id_rows:\n print('\\n')\n print('--------------------- STATE DATA FATAL ERRORS ---------------------'.center(PRINT_OUTPUT_WIDTH, ' '))\n\n if missing_data_rows:\n print('\\n')\n print('Rows w/ Missing Data'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(missing_data_rows), PRINT_ARRAY_WIDTH):\n print(str(missing_data_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if semi_colon_rows:\n print('\\n')\n print('Rows w/ ;\\'s Instead of :\\'s in Start and/or End Hours'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(semi_colon_rows), PRINT_ARRAY_WIDTH):\n print(str(semi_colon_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if date_year_rows:\n print('\\n')\n print('Rows w/ Invalid Years in Start and/or End Dates'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(date_year_rows), PRINT_ARRAY_WIDTH):\n print(str(date_year_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if missing_zipcode_rows:\n print('\\n')\n print('Rows w/ Missing or Invalid Zip Codes from Location Addresses'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(missing_zipcode_rows), PRINT_ARRAY_WIDTH):\n print(str(missing_zipcode_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n\n if missing_state_abbrvs_rows:\n print('\\n')\n print('Rows w/ Missing State Abbreviations from Location Addresses'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(missing_state_abbrvs_rows),PRINT_ARRAY_WIDTH):\n print(str(missing_state_abbrvs_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n\n if timezone_mismatch_rows:\n print('\\n')\n print('Rows w/ Mismatched Timezones between Start and End Times'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(timezone_mismatch_rows),PRINT_ARRAY_WIDTH):\n print(str(timezone_mismatch_rows[i:i+PRINT_ARRAY_WIDTH]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if ocd_id_rows:\n print('\\n')\n print('Rows w/ (Possibly) Incorrect OCD ID Formats'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(ocd_id_rows),1):\n print(str(ocd_id_rows[i:i+1]).strip('[]').center(PRINT_OUTPUT_WIDTH, ' '))\n \n\n print('\\n'*1)\n print('_'*PRINT_OUTPUT_WIDTH)\n print('\\n'*2)\n\n\n return \n\n\n\ndef summary_report(num_input_states, increment_httperror, increment_processingerror, increment_success,\n states_failed_to_load, states_failed_to_process, states_successfully_processed):\n \"\"\"\n PURPOSE: print summary report\n INPUT: increment_httperror, increment_processingerror, increment_success,\n states_failed_to_load, states_failed_to_process, states_successfully_processed\n RETURN: \n \"\"\"\n\n # PRINT final report\n print('\\n'*1)\n print('SUMMARY REPORT'.center(PRINT_OUTPUT_WIDTH, ' '))\n print('\\n'*1)\n print('Final Status for All Requested States'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n print(f\"{'Failed to load state data |':>{PRINT_CENTER}} {increment_httperror} state(s) out of {num_input_states}\")\n print(f\"{'Failed to process |':>{PRINT_CENTER}} {increment_processingerror} state(s) out of {num_input_states}\")\n print(f\"{'Successfully processed |':>{PRINT_CENTER}} {increment_success} state(s) out of {num_input_states}\")\n\n if states_failed_to_load:\n print('\\n'*1)\n print('States that failed to load state data'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(states_failed_to_load),PRINT_ARRAY_WIDTH):\n print(str(states_failed_to_load[i:i+PRINT_ARRAY_WIDTH]).strip('[]').replace('\\'', '').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if states_failed_to_process:\n print('\\n'*1)\n print('States that failed to process & why'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(states_failed_to_process),PRINT_ARRAY_WIDTH):\n print(str(states_failed_to_process[i:i+PRINT_ARRAY_WIDTH]).strip('[]').replace('\\'', '').center(PRINT_OUTPUT_WIDTH, ' '))\n\n if STATES_WITH_WARNINGS: \n print('\\n'*1)\n print('States that processed with warnings'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(STATES_WITH_WARNINGS),PRINT_ARRAY_WIDTH):\n print(str(STATES_WITH_WARNINGS[i:i+PRINT_ARRAY_WIDTH]).strip('[]').replace('\\'', '').center(PRINT_OUTPUT_WIDTH, ' '))\n \n if states_successfully_processed:\n print('\\n'*1)\n print('States that sucessfully processed'.center(PRINT_OUTPUT_WIDTH, ' '))\n print()\n for i in range(0, len(states_successfully_processed),PRINT_ARRAY_WIDTH):\n print(str(states_successfully_processed[i:i+PRINT_ARRAY_WIDTH]).strip('[]').replace('\\'', '').center(PRINT_OUTPUT_WIDTH, ' '))\n \n print('\\n'*3)\n\n\n return\n\n\n###########################################################################################################################\n# END OF REPORT RELATED DEFINITIONS #######################################################################################\n###########################################################################################################################\n\n\nif __name__ == '__main__':\n \n\n # SET UP command line inputs\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-states', nargs='+', required=True)\n \"\"\"\n input_message = \"Input state abbreviations with spaces bewteen each (or use the special argument all). No quotes or commas needed:\\n\\n\"\n input_states = [a.upper().strip() for a in str(input(input_message)).split(\" \")]\n print(\"Parsing:\", \", \".join(input_states))\n print('Timestamp:', datetime.datetime.now().replace(microsecond=0))\n start = time.time()\n\n # _____________________________________________________________________________________________________________________\n\n # SET UP Google API credentials\n # REQUIRES a local 'token.json' file & 'credentials.json' file\n # https://developers.google.com/sheets/api/quickstart/python\n \n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n \n try: \n # LOAD state feed data\n state_feed_result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID, \n range='STATE_FEED').execute()\n state_feed_values = state_feed_result.get('values', [])\n except:\n print('ERROR | STATE_FEED Google Sheet is either missing from the workbook or there is data reading error.')\n raise\n\n try: \n # LOAD election authorities data\n election_authorities_result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_EA_ID, \n range='Authorities').execute()\n election_authorities_values = election_authorities_result.get('values', [])\n except:\n print('ERROR | Authorities Google Sheet is either missing from the workbook or there is data reading error.')\n raise\n\n try: \n # LOAD election authorities data\n state_ea_result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_EA_ID, \n range='States').execute()\n state_ea_values = state_ea_result.get('values', [])\n except:\n print('ERROR | States Google Sheet is either missing from the workbook or there is data reading error.')\n raise\n\n\n # _____________________________________________________________________________________________________________________\n\n # PROCESS all user requested states \n\n # STORE states with errors\n states_successfully_processed = [] # STORE states that successfully create zip files\n states_failed_to_load = [] # STORE states whose data failed to load\n states_failed_to_process = [] # STORE states that failed to process\n increment_success = 0 # STORE count of states successfully processed\n increment_httperror = 0 # STORE count of states that could not be retrieved or found in Google Sheets\n increment_processingerror = 0 # STORE count of states that could not be processed\n \n # PROCESS each state individually (input_states are requested states listed as state abbreviations)\n #for input_states in parser:#parser.parse_args()._get_kwargs(): # ITERATE through input arguments\n\n\n #input_states = [state.upper() for state in parser] # FORMAT all inputs as uppercase\n \n # GENERATE state_feed & election_authorities dataframe\n state_feed_all = pd.DataFrame(state_feed_values[1:], columns=state_feed_values[0])\n election_authorities_all = pd.DataFrame(election_authorities_values[1:], columns=election_authorities_values[1])\n election_authorities_all.drop([0], inplace=True)\n election_authorities_all.columns = [col.lower().strip() for col in election_authorities_all.columns]\n state_ea_all = pd.DataFrame(state_ea_values[1:], columns = state_ea_values[0])\n state_ea_all.drop([0], inplace=True)\n state_ea_all.columns = [col.lower().strip() for col in state_ea_all.columns]\n state_ea_all['state'] = state_ea_all['ocd division'].apply(lambda x: str(x).split(\":\")[-1].upper())\n\n if 'ALL' in input_states: # IF user requests all states to be processed\n \n input_states = state_feed_all['state_abbrv'].unique().tolist() # FORMAT unique list of 50 state abbreviations\n\n\n for state_abbrv in input_states:\n \n try:\n \n # LOAD state data\n state_data_result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID, range=state_abbrv).execute()\n state_data_values = state_data_result.get('values', [])\n state_data = pd.DataFrame(state_data_values[0:],columns=state_data_values[0])\n print(state_data.columns)\n state_data.rename(columns = {\"Location Name\": \"location_name\", \"address_line1\": \"structured_line_1\", \"address_line2\": \"structured_line_2\", \"address_city\": \"structured_city\", \"address_state\": \"structured_state\", \"address_zip\": \"structured_zip\"}, inplace = True)\n state_data.drop([0], inplace=True) \n # FILTER state_feed, state_data, and election_authorities\n state_feed = state_feed_all[state_feed_all['state_abbrv'] == state_abbrv] # FILTER state_feed_all for selected state\n state_data = state_data[state_data['Outreach status'].str.upper().str.contains('^COMPLETE', regex = True, na = False)]#.reset_index(drop=True) #drop any rows that are not yet complete\n election_authorities = election_authorities_all.loc[election_authorities_all['state'] == state_abbrv, :] # FILTER election_authorities_all for selected state\n state_ea = state_ea_all.loc[state_ea_all['state'] == state_abbrv, :]\n\n election_authorities = pd.concat([state_ea, election_authorities], sort = False)\n election_authorities.rename(columns = {\"ocd division\": \"ocd_division\", \"homepage url\": \"homepage_url\", \"official title\": \"official_title\", \"polling place url\": \"where_do_i_vote_uri\", \"voter registration status url\": \"am_i_registered_uri\", \"ovr url\": \"registration_uri\"}, inplace = True)\n # GENERATE zip file and print state report\n vip_build(state_abbrv, state_feed, state_data, election_authorities)\n\n\n states_successfully_processed.append(state_abbrv)\n increment_success +=1\n \n except HttpError:\n increment_httperror += 1\n states_failed_to_load.append(state_abbrv)\n\n except Exception as e:\n increment_processingerror += 1\n exception_string = state_abbrv + ' | ' + str(type(e).__name__) + ' ' + str(e) + '\\n'\n states_failed_to_process.append(exception_string)\n\n\n summary_report(len(input_states), increment_httperror, increment_processingerror, increment_success,\n states_failed_to_load, states_failed_to_process, states_successfully_processed)\n\n\n print('Timestamp:', datetime.datetime.now().replace(microsecond=0))\n print(f'Run time: {float((time.time()-start)):.2f} second(s)')\n","sub_path":"2020/vip_earlyvoting.py","file_name":"vip_earlyvoting.py","file_ext":"py","file_size_in_byte":54034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452981242","text":"# coding: utf-8\n'''\n2^15 = 32768 であり, 各位の数字の和は 3 + 2 + 7 + 6 + 8 = 26 となる.\n同様にして, 2^1000 の各位の数字の和を求めよ.\n'''\n\n\ndef main():\n num = 1\n for i in range(1000):\n num = num * 2\n s = 0\n while num > 0:\n s += num % 10\n num = num // 10\n print(s)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Practice/ProjectEuler/Problem16.py","file_name":"Problem16.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122881511","text":"from __future__ import annotations\n\nfrom solver.base import BaseSolver\nfrom graph.node import Node\nfrom graph.grid import Map\n\nclass AStar(BaseSolver):\n \n def __init__(\n self,\n h_func,\n *args,\n ) -> AStar:\n \n super().__init__(h_func)\n \n def getSuccessors(\n self: AStar,\n state: Node,\n goal: Node,\n grid: Map,\n k: int,\n ) -> list:\n \n nodes = [\n Node(\n i = state.i + dx,\n j = state.j + dy,\n h = self.h_func(state.i + dx, state.j + dy, goal.i, goal.j),\n parent = state,\n k = k,\n )\n for (dx, dy) in grid.getAllowedMovements(state.i, state.j)\n ]\n \n return nodes","sub_path":"solver/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191132624","text":"#-*- coding:utf-8 -*-\n\nfrom numpy import *\nimport jieba\n\n#伯努利朴素贝叶斯模型中,每个特征的取值是布尔型,或以0和1表示,\n#所以伯努利模型中,每个特征值为0或者1。\n#思想核心:\n#假设有A,B两类,有x待分类数据,x有(n)个特征\n# p(A|x) = P(x|A)*P(A)/P(x)\n#由于p(x)是固定一致的,所以忽略此参数\n#在识别是否是侮辱性词语时,分别对侮辱性词语,和非侮辱性词语中的每个单词出现的权重。\n#在其中最重要的是,计算不同类别中的,每个单词占的比率,也就是说明这个单词发生,使得这个句子为这个类别\n#的语句的发生的概率。\n#并以此计算P(x|A)就是x中在A中包含的单词的权重的乘积之和,就代表着这段词语在A的发生的概率为\n#P(x|A),同理,计算P(x|B),然后比较这段词语在A发生的概率和,再B发生概率大小,然后判定事件是发生\n#再A类,还是B类\n\n\ndef load_dataset():\n \"\"\"init dataset base Web information.\"\"\"\n posting_list = [['my', 'dog', 'has', 'flea', \\\n 'problem', 'help', 'please'],\n ['maybe', 'not', 'take', 'him', \\\n 'to', 'dog', 'park', 'stupid'],\n ['my', 'dalmation', 'is', 'so', 'cute', \\\n 'I', 'love', 'him'],\n ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n ['mr', 'licks', 'ate', 'my', 'steak', 'how', \\\n 'to', 'stop', 'him'],\n ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\n class_vec = [0,1,0,1,0,1] # 1 is insult[zh:侮辱] comment, 0 is common comment\n return posting_list,class_vec\n\n\ndef get_qqmessage_dataset(ownername, targetname):\n \"\"\"get data from qq message\n\n :param ownername:owner name\n :param targetname:target name\n :returns datalist[0],[1]\n \"\"\"\n data_list = [[],[]]\n with open('qqmessage/qqmessage.txt','r',encoding=\"utf-8\",errors='ignore') as message:\n index = None\n line_string = \"\"\n for line in message:\n #find name in [2] value\n line_list = line[:-1].split(\" \")\n if line_list[-1] == ownername:\n index = 0\n continue\n elif line_list[-1] == targetname:\n index = 1\n continue\n elif line == \"\\n\":\n #memory the data into data list\n if line_string != \"\":\n if index == 0:\n data_list[0].append(line_string)\n elif index == 1:\n data_list[1].append(line_string)\n index = None\n line_string = \"\"\n\n #string splice\n if index is not None:\n line_string += line\n return data_list[0],data_list[1]\n\n\n\ndef create_vocab_list(dataset):\n \"\"\"create a vocalulary[zh:词汇] set than all element is no repeating.\n\n :param dataset:source data from function load_dataset.\n :returns vocab_list:a list has all not repeating element base on dataset\n \"\"\"\n vocab_set = set([])\n for document in dataset:\n vocab_set = vocab_set | set(document) #create a union of two collection\n return sorted(list(vocab_set))\n\n\ndef set_of_words_to_vector(vocab_list,input_set):\n \"\"\"deal with input_set to a vector[zh:向量]\n\n :param vocab_list:data from create_vocab_list function\n :param input_set:user input a vocalulary set\n :returns return_vec:a vector if words exists in vocab_list than values 1\n else values 0\n \"\"\"\n return_vec = [0] * len(vocab_list)\n for word in input_set:\n if word in vocab_list:\n return_vec[vocab_list.index(word)] = 1\n else:\n print(\"the word {0} is not in my vocabulary!\".format(word))\n return return_vec\n\n\ndef bag_of_words_to_vector(vocab_list,input_set):\n \"\"\"deal with input_set to a vector[zh:向量] bag\n\n :param vocab_list:data from create_vocab_list function\n :param input_set:user input a vocalulary set\n :returns return_vec:a vector the words's number in vocab_list,default 0\n \"\"\"\n return_vec = [0] * len(vocab_list)\n for word in input_set:\n if word in vocab_list:\n return_vec[vocab_list.index(word)] += 1\n else:\n print(\"the word {0} is not in my vocabulary!\".format(word))\n return return_vec\n\n\ndef create_trainmatrix(vocab_list, post_list):\n \"\"\"create a matrix base on post_list\n\n :param vocab_list:data from create_vocab_list function\n :param post_list:data from load_dataset\n :returns\n \"\"\"\n train_matrix = []\n for post in post_list:\n train_matrix.append(set_of_words_to_vector(vocab_list, post))\n return train_matrix\n\ndef train_native_bayes(train_matrix, train_category):\n \"\"\"train native bayes and count the category[zh:类别] probability.\n\n :param train_matrix:train data's vector matrix\n :param train_category:train data's class list\n :returns p0_vector,p1_vector,pa_insult:p1_vector is the ratio[zh:比例] of\n the numbers of the word to the numbers of all words\n \"\"\"\n num_traindocs = len(train_matrix) #calc matirx's line\n num_words = len(train_matrix[0]) #matrix[0] is rows element\n pa_insult = sum(train_category) / float(num_traindocs)\n p0_num = ones(num_words) #prevent probability value 0\n p1_num = ones(num_words)\n p0_denom = 2.0 #denom[zh:分母]\n p1_denom = 2.0\n for i in range(num_traindocs):\n if i % 100 == 0:\n print(i)\n if train_category[i] == 1:\n p1_num += train_matrix[i]\n p1_denom += sum(train_matrix)\n else:\n p0_num += train_matrix[i]\n p0_denom += sum(train_matrix)\n p1_vector = log(p1_num / p1_denom) #natural logaritm[zh:对数] function avoid underflow[zh:下溢出]\n p0_vector = log(p0_num / p0_denom)\n return p0_vector,p1_vector,pa_insult\n\ndef train_native_bayes(train_matrix, train_category):\n \"\"\"train native bayes and count the category[zh:类别] probability.\n\n :param train_matrix:train data's vector matrix\n :param train_category:train data's class list\n :returns p0_vector,p1_vector,pa_insult:p1_vector is the ratio[zh:比例] of\n the numbers of the word to the numbers of all words\n \"\"\"\n num_traindocs = len(train_matrix) #calc matirx's line\n num_words = len(train_matrix[0]) #matrix[0] is rows element\n pa_insult = sum(train_category) / float(num_traindocs)\n p0_num = ones(num_words) #prevent probability value 0\n p1_num = ones(num_words)\n p0_denom = 2.0 #denom[zh:分母]\n p1_denom = 2.0\n for i in range(num_traindocs):\n if i % 100 == 0:\n print(i)\n if train_category[i] == 1:\n p1_num += train_matrix[i]\n p1_denom += sum(train_matrix)\n else:\n p0_num += train_matrix[i]\n p0_denom += sum(train_matrix)\n p1_vector = log(p1_num / p1_denom) #natural logaritm[zh:对数] function avoid underflow[zh:下溢出]\n p0_vector = log(p0_num / p0_denom)\n return p0_vector,p1_vector,pa_insult\n\n\ndef train_native_bayes_icon(train_matrix, train_category):\n \"\"\"train native bayes and count the category[zh:类别] probability.\n\n :param train_matrix:train data's vector matrix\n :param train_category:train data's class list\n :returns p0_vector,p1_vector,pa_insult:p1_vector is the ratio[zh:比例] of\n the numbers of the word to the numbers of all words\n \"\"\"\n num_traindocs = len(train_matrix) #calc matirx's line\n num_words = len(train_matrix[0]) #matrix[0] is rows element\n p0_num = ones(num_words) #prevent probability value 0\n p1_num = ones(num_words)\n p0_denom = 2.0 #denom[zh:分母]\n p1_denom = 2.0\n for i in range(num_traindocs):\n if i % 100 == 0:\n print(i)\n if train_category[i] == 1:\n p1_num += train_matrix[i]\n p1_denom += sum(train_matrix[i])\n else:\n p0_num += train_matrix[i]\n p0_denom += sum(train_matrix[i])\n p1_vector = log(p1_num / p1_denom) #natural logaritm[zh:对数] function avoid underflow[zh:下溢出]\n p0_vector = log(p0_num / p0_denom)\n return p0_vector,p1_vector,pa_insult\n\n\ndef classify_native_bayes(vector_to_classify, p0_vector, p1_vector,\n pa_insult):\n \"\"\"native bayes classify the vector vector_to_classify\n\n :param vector_to_classify:vector has to be classify\n :param p0_vector:common comment's vector\n :param p1_vector:insult comment's vector\n :param pa_insult:the probability of insult\n :returns whether the comment is insult or common\n \"\"\"\n p1 = sum(vector_to_classify * p1_vector) + log(pa_insult) #base log(m*n) = log(m) + log(n)\n p0 = sum(vector_to_classify * p0_vector) + log(1 - pa_insult)\n if p1 > p0:\n return 1 #the comment is insult\n else:\n return 0 #the comment is common\n\n\ndef slice_text(big_string):\n \"\"\"clicing[zh:切分] text remove null and character string,and lower\n\n :param big_string:input string\n :returns slice text list\n \"\"\"\n import re\n list_of_words = re.split(r'\\W*', big_string) #regular expression[zh:正则表达式]\n return [word.lower() for word in list_of_words if len(word) > 0]\n\ndef slice_chinese_text(big_string):\n \"\"\"clicing[zh:切分] chinese text remove null and character string,and lower\n\n :param big_string:input string\n :returns slice text list\n \"\"\"\n import re\n big_string = \"\".join(re.findall(u'[\\u4e00-\\u9fa5]+', big_string)) #find chinese words\n word_list = '/'.join(jieba.cut(big_string)) #cut chinese words\n return [word for word in word_list.split('/') if len(word) >= 2]\n\n\ndef calc_most_frequently_word(vocab_list, full_text_list, wordnum=30):\n \"\"\"calc the most frequently word\n\n :param vocab_list:data from create_vocab_list function\n :param full_text_list:full text\n :returns\n \"\"\"\n import operator\n fre_dict = {}\n for word in vocab_list:\n fre_dict[word] = full_text_list.count(word)\n sort_dict = sorted(fre_dict.items(), key=operator.itemgetter(1), reverse=True)\n return sort_dict[:wordnum]\n\n\ndef show_top_word(vocab_list,p0_vector,p1_vector):\n \"\"\"show top use word\"\"\"\n rss1_list = []\n rss0_list = []\n for i in range(len(p0_vector)):\n if p0_vector[i] > -6.0:\n rss0_list.append((vocab_list[i],p0_vector[i]))\n if p1_vector[i] > -6.0:\n rss1_list.append((vocab_list[i],p1_vector[i]))\n\n print(\"**Rss1\"*10)\n for item in rss1_list:\n print(item[0])\n\n print(\"**Rss0\"*10)\n for item in rss0_list:\n print(item[0])\n","sub_path":"algorithm/learning/bayes/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":10751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138133301","text":"'''\nЗадание 1\nПользователь вводит с клавиатуры строку. Проверьте\nявляется ли введенная строка палиндромом. Палиндром — слово или текст, которое читается одинаково\nслева направо и справа налево. Например, кок; А роза\nупала на лапу Азора; доход; А буду я у дуба.\n'''\n\ndef palindrome(messag_e: int) -> None:\n\n message_1 = messag_e.replace(\" \" , \"\").lower() \n if message_1 == message_1[::-1]:\n print(messag_e, ' - palindrome\\n')\n input('Press to continue...')\n else:\n print(messag_e, ' - not palindrome\\n')\n input('Press to continue...')\n\n\nif __name__ == \"__main__\":\n\n TEXT = 't'\n EXIT = 'q' \n\n print('''\n Пользователь вводит с клавиатуры строку. Проверьте\n является ли введенная строка палиндромом. Палиндром — \n слово или текст, которое читается одинаково слева направо \n и справа налево. Например, кок; А роза упала на лапу Азора; \n доход; А буду я у дуба.''')\n\n while True:\n\n print(\"\"\"\n To exit press -> q\n Press to enter text -> t\n \"\"\")\n \n choice = input('Input: ')\n print()\n\n if choice == EXIT:\n break\n\n elif choice == TEXT:\n messag_e = input(\"Enter the string: \")\n print(\"-\" * 50)\n palindrome(messag_e)","sub_path":"Lesson_17_DZ_Nichipurenko_A.V/Lessin_17_DZ_1_Nichipurenko_A.V.py","file_name":"Lessin_17_DZ_1_Nichipurenko_A.V.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316124709","text":"_xWon = ['XXXX'] + ['X' * i + 'T' + 'X' * (3 - i) for i in range(4)]\r\n_oWon = ['OOOO'] + ['O' * i + 'T' + 'O' * (3 - i) for i in range(4)]\r\ndef checkState(board):\r\n allRows = board + [''.join(board[x][y] for x in range(4)) for y in range(4)]\r\n allRows.append(''.join(board[x][x] for x in range(4)))\r\n allRows.append(''.join(board[x][3 - x] for x in range(4)))\r\n for i in _xWon:\r\n if i in allRows:\r\n print(i, allRows)\r\n return 'X won'\r\n for i in _oWon:\r\n if i in allRows:\r\n return 'O won'\r\n for i in board:\r\n if '.' in i:\r\n return 'Game has not completed'\r\n return 'Draw'\r\n\r\n\r\n\r\nfin = open('A-small-attempt0.in', 'r') \r\nT = int(fin.readline().split()[0])\r\ncaseNo = 0\r\nfout = open('A-small.out', 'w')\r\nfor i in range(T):\r\n board = [fin.readline().rstrip() for j in range(4)]\r\n fin.readline()\r\n caseNo += 1\r\n fout.write('Case #' + str(caseNo) + ': ' + checkState(board) + '\\n')\r\n\r\nfin.close()\r\nfout.close()\r\n\r\n","sub_path":"solutions_2453486_0/Python/wmLiu/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"642059174","text":"\"\"\"\nCP1404/CP5632 - Practical\nPseudocode for temperature conversion\n\"\"\"\nMENU = \"\"\"C - Convert Celsius to Fahrenheit\n F - Convert Fahrenheit to Celsius\n Q - Quit\"\"\"\n\n\ndef main():\n\n print(MENU)\n choice = input(\">>> \").upper()\n while choice != \"Q\":\n if choice == \"F\":\n fahrenheit_input = float(input(\"Fahrenheit: \"))\n celsius_output = fahrenheit_to_celsius(fahrenheit_input)\n print(\"Result: {:.2f} C\".format(celsius_output))\n elif choice == \"C\":\n celsius_input = float(input(\"Celsius: \"))\n fahrenheit_output = celsius_to_fahrenheit(celsius_input)\n print(\"Result: {:.2f} F\".format(fahrenheit_output))\n else:\n print(\"Invalid option\")\n print(MENU)\n choice = input(\">>> \").upper()\n print(\"Thank you.\")\n\n\ndef celsius_to_fahrenheit(celsius_input):\n fahrenheit_output = celsius_input * 9.0 / 5 + 32\n return fahrenheit_output\n\n\ndef fahrenheit_to_celsius(fahrenheit_input):\n celsius_output = 5 / 9 * (fahrenheit_input - 32)\n return celsius_output\n\n\nmain()\n","sub_path":"prac_03/termperature.py","file_name":"termperature.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561806450","text":"\"\"\"\nFirst module of Algorithmic Thinking\n\"\"\"\n\n\n\nEX_GRAPH0 = {\n 0: set([1, 2]),\n 1: set([]),\n 2: set([]),\n}\nEX_GRAPH1 = {\n 0: set([1, 4, 5]),\n 1: set([2, 6]),\n 2: set([3]),\n 3: set([0]),\n 4: set([1]),\n 5: set([2]),\n 6: set([]),\n}\nEX_GRAPH2 = {\n 0: set([1, 4, 5]),\n 1: set([2, 6]),\n 2: set([3, 7]),\n 3: set([7]),\n 4: set([1]),\n 5: set([2]),\n 6: set([]),\n 7: set([3]),\n 8: set([1, 2]),\n 9: set([0, 4, 3, 5, 6, 7]),\n}\n\n\ndef make_complete_graph(num_nodes, no_self = False):\n \"\"\"\n Takes the number of nodes num_nodes and returns a dictionary corresponding\n to a complete directed graph with the specified number of nodes.\n \"\"\"\n result_graph = {}\n if num_nodes <= 0:\n return result_graph\n else:\n for node_i in range(num_nodes):\n if no_self == False:\n result_graph[node_i] = set([node_j for node_j \\\n in range(num_nodes) if node_j != node_i])\n else:\n result_graph[node_i] = set([node_j for node_j \\\n in range(num_nodes)])\n return result_graph\n\ndef compute_in_degrees(digraph):\n \"\"\"\n Takes a directed graph digraph (represented as a dictionary) and\n computes the in-degrees for the nodes in the graph.\n \"\"\"\n result = {}\n for dummy_i in digraph:\n result[dummy_i] = 0\n for node_i in digraph:\n for node_j in digraph[node_i]:\n result[node_j] += 1\n return result\n\ndef compute_out_degrees(digraph):\n return [len(value) for key, value in digraph.iteritems()]\n\ndef in_degree_distribution(digraph):\n \"\"\"\n Takes a directed graph digraph (represented as a dictionary) and\n computes the unnormalized distribution of the in-degrees of the graph.\n \"\"\"\n result = {}\n in_degrees = compute_in_degrees(digraph)\n for node in in_degrees:\n result[in_degrees[node]] = result.get(in_degrees[node], 0) + 1\n return result\n\n\n# def run_test(test_func, test_cases):\n# \"\"\"\n# As it says\n# \"\"\"\n# for ipt in test_cases:\n# print test_func(ipt)\n\n\n\n# if __name__ == \"__main__\":\n# test_cases = [EX_GRAPH0, EX_GRAPH1, EX_GRAPH2]\n\n# run_test(compute_in_degrees, test_cases)\n\n# run_test(in_degree_distribution, test_cases)\n\n\n","sub_path":"first/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72611892","text":"from xlrd import open_workbook, xldate_as_tuple\nfrom xlwt import Workbook\nfrom datetime import date\n\ninput_file = 'input/sales_2017.xlsx'\noutput_file = 'output/output03.xls' # xlsx\n\noutput_workbook = Workbook()\noutput_worksheet = output_workbook.add_sheet('jan_2017_output')\n\nfilter_column = 3\nwith open_workbook(input_file) as workbook:\n worksheet = workbook.sheet_by_name('january_2017')\n data = []\n header = worksheet.row_values(0)\n data.append(header)\n for row_index in range(1, worksheet.nrows):\n row_info = []\n sale_amount = worksheet.cell_value(row_index, filter_column)\n if sale_amount > 1400.0:\n # 조건에 맞는 경우에만 row 데이터 취득 \n for col_index in range(worksheet.ncols):\n cell_value = worksheet.cell_value(row_index, col_index)\n cell_type = worksheet.cell_type(row_index, col_index)\n\n if cell_type == 3: # 날짜데이터형식\n date_cell = xldate_as_tuple(cell_value, workbook.datemode) # (2017, 1, 1, 0, 0, 0)\n date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')\n row_info.append(date_cell)\n else:\n row_info.append(cell_value)\n data.append(row_info) # 조건에 맞는 row의 내용을 추가 \n # data에 있는 내용을 write\n \n for row_index, row_info in enumerate(data):\n for col_index, value in enumerate(row_info):\n output_worksheet.write(row_index, col_index, value)\n\noutput_workbook.save(output_file)\nprint('basic03.py executed')","sub_path":"액셀파일관리/basic03.py","file_name":"basic03.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1885146","text":"import pytz\nimport traceback\n\nfrom redminelib import Redmine\n\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\n\nfrom redmine.models import Connection\nfrom redmine.models import Project\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n ret = self.redmine_acquire_projects()\n if ret:\n return 'Succeess:redmine_acquire_projects'\n else:\n raise CommandError('Error:redmine_acquire_projects')\n\n def redmine_acquire_projects(self):\n status = True\n for connection in Connection.objects.order_by('id'):\n if not connection.enabled:\n continue\n redmine = Redmine(\n connection.home_url,\n key=connection.api_access_key)\n try:\n for project in redmine.project.all():\n if Project.objects.filter(\n redmine_id=project.id,\n connection__id=connection.id).exists():\n self.update_project(project, connection)\n else:\n self.create_project(project, connection)\n except Exception as e:\n traceback.print_exc()\n status = False\n continue\n return status\n\n def update_project(self, project, connection):\n pj = Project.objects.get(\n redmine_id=project.id, connection__id=connection.id)\n pj.redmine_id = project.id\n pj.name = project.name\n pj.identifier = project.identifier\n pj.description = project.description\n pj.created_on = pytz.utc.localize(project.created_on)\n pj.updated_on = pytz.utc.localize(project.updated_on)\n pj.is_public = project.is_public\n pj.parent_id = self.get_parent_id(project)\n pj.save()\n\n def create_project(self, project, connection):\n Project.objects.create(\n connection=connection,\n redmine_id=project.id,\n name=project.name,\n identifier=project.identifier,\n description=project.description,\n created_on=pytz.utc.localize(project.created_on),\n updated_on=pytz.utc.localize(project.updated_on),\n is_public=project.is_public,\n parent_id=self.get_parent_id(project))\n\n def get_parent_id(self, project):\n if 'parent' in dir(project):\n return project.parent['id']\n else:\n return None\n","sub_path":"src/django/redmine/management/commands/redmine_acquire_projects.py","file_name":"redmine_acquire_projects.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233408951","text":"\nclass RedditTracker():\n\n def __init__(self, subreddit):\n self.subreddit = subreddit\n \n \n def get_posts(self, praw, _limit):\n print(\"getting posts from\", self.subreddit)\n if self.subreddit == \"front\":\n subreddit = praw.front\n submissions = subreddit.hot(limit=_limit)\n else:\n subreddit = praw.subreddit(self.subreddit)\n submissions = subreddit.hot(limit=_limit)\n\n \n _submissions = []\n\n for submission in submissions:\n _submissions.append(self.submission_to_dict(submission))\n \n return _submissions\n \n def submission_to_dict(self,submission):\n submissionDict = {}\n \n for key,val in submission.__dict__.items():\n if type(val) is str or type(val) is int or type(val) is float:\n submissionDict[key] = val\n \n \n submissionDict['num_comments'] = submission.num_comments\n \n submissionDict['all_rank'] = []\n submissionDict['all_upvotes'] = []\n submissionDict['all_comments'] = []\n submissionDict['all_time'] = []\n\n submissionDict['sub_rank'] = []\n submissionDict['sub_upvotes'] = []\n submissionDict['sub_comments'] = []\n submissionDict['sub_time'] = []\n\n submissionDict['front_rank'] = []\n submissionDict['front_upvotes'] = []\n submissionDict['front_comments'] = []\n submissionDict['front_time'] = []\n \n return submissionDict","sub_path":"tracker/reddit_tracker.py","file_name":"reddit_tracker.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585100858","text":"import sys\nimport logging\n\n# configure loggers\nlog_formatter = logging.Formatter('%(message)s')\n\nstdout = logging.getLogger('echo.stdout')\nstdout.setLevel(logging.DEBUG)\nlog_handler = logging.StreamHandler(stream=sys.stdout)\nlog_handler.setFormatter(log_formatter)\nstdout.addHandler(log_handler)\n\nstderr = logging.getLogger('echo.stderr')\nstderr.setLevel(logging.DEBUG)\nlog_handler = logging.StreamHandler(stream=sys.stderr)\nlog_handler.setFormatter(log_formatter)\nstderr.addHandler(log_handler)\n\nquiet = True\n\ndef exception(e):\n '''\n print an exception message to stderr\n '''\n global quiet\n if quiet: return\n\n stderr.exception(e)\n\ndef err(format_msg, *args, **kwargs):\n '''print format_msg to stderr'''\n global quiet\n if quiet: return\n\n stderr.info(format_msg.format(*args, **kwargs))\n\ndef out(format_msg, *args, **kwargs):\n '''\n print format_msg to stdout, taking into account verbosity level\n '''\n global quiet\n if quiet: return\n\n stdout.info(format_msg.format(*args, **kwargs))\n\n","sub_path":"bang/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293530557","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ny = np.load('Results/NPY/Sun_Eu.npy')\n\nplt.close()\nfig, ax = plt.subplots(1, 2, figsize = (12,6))\nax[0].set_title('Space')\nax[0].plot(y[:,1,0].T, y[:,1,1].T, '--')\nax[0].set_xlabel('x [kpc]')\nax[0].set_ylabel('y [kpc]')\nax[0].axis('equal')\n\nax[1].set_title('Velocity')\nax[1].plot(y[:,0,0].T, y[:,0,1].T, '--')\nax[1].set_xlabel(r'$v_x$ [km/s]')\nax[1].set_ylabel(r'$v_y$ [km/s]')\nax[1].axis('equal')\n\nplt.savefig('Results/Figures/Sun_Eu.png')","sub_path":"Plot1.py","file_name":"Plot1.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450788038","text":"import os\nfrom comm import getPathInfo\nimport configparser\npath=getPathInfo.get_path()#实例化,先获取文件所在的路径\nconfig_path=os.path.join(path, '../config/config.ini')#然后把文件路径和文件名拼接起来,获得文件的绝对路径\nconfig=configparser.ConfigParser()#调用外部的读取配置文件的方法\nconfig.read(config_path,encoding=\"utf-8\")#读取文件中的内容\n\nclass getConfig():\n def get_http(self,name):\n value = config.get(\"HTTP\",name)\n return value\n def get_EMAIL(self,name):\n value = config.get(\"EMAIL\",name)\n return value\nif __name__==\"__main__\":\n print(\"HTTP的baseUrl配置为:\",getConfig().get_http(\"baseUrl\"))\n print(\"发送邮件消息的开关为:\",getConfig().get_EMAIL(\"on_off\"))","sub_path":"UnittestAndRequests/comm/readConfig.py","file_name":"readConfig.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470132816","text":"\n\nfrom xai.brain.wordbase.nouns._digit import _DIGIT\n\n#calss header\nclass _DIGITS(_DIGIT, ):\n\tdef __init__(self,): \n\t\t_DIGIT.__init__(self)\n\t\tself.name = \"DIGITS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"digit\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_digits.py","file_name":"_digits.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230871014","text":"import threading\nimport time\n\ncon = threading.Condition()\nclass Producer(threading.Thread):\n def __init__(self):\n super(Producer, self).__init__()\n def run(self):\n global x\n while True:\n\n time.sleep(1)\n print('p', x)\n with con:\n if x>0:\n print('p', '阻塞')\n con.wait()\n print('produce')\n for i in range(2):\n x+=1\n print('producing ...'+str(x))\n print('p', '唤醒')\n con.notify()\nclass Consumer(threading.Thread):\n def __init__(self):\n super(Consumer, self).__init__()\n def run(self):\n global x\n while True:\n print('c',x)\n with con:\n\n if x<1:\n print('c', '阻塞')\n con.wait()\n\n\n x-=1\n print('consuming...'+str(x))\n time.sleep(3)\n con.notify()\n\nx=0\nprint('start consumer')\nc=Consumer()\nprint('start producer')\np=Producer()\nc.start()\np.start()\nc.join()\np.join()\nprint('main thread over!')","sub_path":"exec/条件同步.py","file_name":"条件同步.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588077855","text":"import sendgrid\nimport os\nimport sys\nimport traceback\n\nfrom string import Template\n\nSENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')\n\n\ndef send_email(mail_from, recipient, subject, body):\n\n if not SENDGRID_API_KEY:\n return\n\n try:\n to = [{\"email\": e} for e in list(set(recipient))]\n\n sg = sendgrid.SendGridAPIClient(apikey=SENDGRID_API_KEY)\n data = {\n \"personalizations\": [\n {\n \"to\": to,\n \"subject\": subject\n }\n ],\n \"from\": {\n \"email\": mail_from\n },\n \"content\": [\n {\n \"type\": \"text/html\",\n \"value\": body\n }\n ]\n }\n\n response = sg.client.mail.send.post(request_body=data)\n\n print(response)\n except Exception as e:\n print(\"Error on sent email:\")\n print('-' * 60)\n traceback.print_exc(file=sys.stdout)\n print('-' * 60)\n\n\ndef build_email_body(date, hour, elderly_name, nursing_home_name):\n with open('mail/template.html') as f:\n template = Template(f.read())\n\n return template.substitute(\n date=date,\n hour=hour,\n elderly_name=elderly_name,\n nursing_home_name=nursing_home_name\n )\n","sub_path":"mail/mail_helper.py","file_name":"mail_helper.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321196565","text":"__author__ = 'charlesztt'\n\nimport os\nimport shutil\n\nf=open(\"data/video_list.txt\")\n\ncleaned_video_list=list()\n\nfor one_line in f:\n one_line=one_line.replace(\"\\n\",\"\")\n cleaned_video_list.append(one_line.replace(\".mp4\",\"\"))\n\nf.close()\n\nfor one_file in os.listdir('data/ie_data/isis_8000'):\n if one_file.replace(\".sgm.apf.xml\",\"\") in cleaned_video_list:\n shutil.copy(os.path.join('data/ie_data/isis_8000',one_file),'data/ie_data/isis_jan_feb')","sub_path":"python/basic_stat/clean_up_moving.py","file_name":"clean_up_moving.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572027057","text":"import requests\n\nurl = \"http://codeforces.com/api\"\noptions = '/contest.list'\nlink = 'http://codeforces.com/contest/'\n\ndata = requests.get(url+options)\ndata = data.json()\n\nedu = []\nif data['status'] == 'OK':\n data = data['result']\n for i in data:\n if 'Educational' in i['name']:\n edu.append(int(i['id']))\n edu.sort()\n\n f = open(\"educational.txt\",\"w+\")\n cnt = 1\n for i in edu:\n temp = 'Educational Round '\n temp += str(cnt)\n temp += ': '\n temp += link+str(i)\n temp += '\\n'\n f.write(temp)\n cnt+=1\n f.close()\nelse:\n print('There may be some issues on Codeforces or the servers or not responding');\n print('Please Wait and try it after few minutes');","sub_path":"edu.py","file_name":"edu.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48567322","text":"'''\nPolicy class for computing action from weights and observation vector. \nHoria Mania --- hmania@berkeley.edu\nAurelia Guy\nBenjamin Recht \n'''\n\n\nimport numpy as np\nfrom filter import get_filter\n\nclass Policy(object):\n\n def __init__(self, policy_params):\n\n self.ob_dim = policy_params['ob_dim']\n self.ac_dim = policy_params['ac_dim']\n self.weights = np.empty(0)\n\n # a filter for updating statistics of the observations and normalizing inputs to the policies\n self.observation_filter = get_filter(policy_params['ob_filter'], shape = (self.ob_dim,))\n self.update_filter = True\n \n def update_weights(self, new_weights):\n self.weights[:] = new_weights[:]\n return\n\n def get_weights(self):\n return self.weights\n\n def get_observation_filter(self):\n return self.observation_filter\n\n def act(self, ob):\n raise NotImplementedError\n\n def copy(self):\n raise NotImplementedError\n\nclass LinearPolicy(Policy):\n \"\"\"\n Linear policy class that computes action as . \n \"\"\"\n\n def __init__(self, policy_params):\n Policy.__init__(self, policy_params)\n self.weights = np.zeros((self.ac_dim, self.ob_dim), dtype = np.float64)\n\n def act(self, ob):\n ob = self.observation_filter(ob, update=self.update_filter)\n return np.dot(self.weights, ob)\n\n def get_weights_plus_stats(self):\n \n mu, std = self.observation_filter.get_stats()\n aux = np.asarray([self.weights, mu, std])\n return aux\n \nclass MLPPolicy(Policy):\n \"\"\"\n MLP policy class that computes action.\n \"\"\"\n\n def __init__(self, policy_params):\n Policy.__init__(self, policy_params)\n self.hidden1=64\n self.hidden2=64\n self.weights = np.zeros((self.ob_dim+1)*self.hidden1 +\n (self.hidden1+1)*self.hidden2 +\n (self.hidden2+1)*self.ac_dim, dtype=np.float64)\n\n self.end_w1 = self.ob_dim*self.hidden1\n self.end_w1_with_bias = (self.ob_dim+1)*self.hidden1\n\n self.end_w2 = self.end_w1_with_bias + self.hidden1*self.hidden2\n self.end_w2_with_bias = self.end_w1_with_bias + (self.hidden1+1)*self.hidden2\n\n self.end_w3 = self.end_w2_with_bias + self.hidden2*self.ac_dim\n self.end_w3_with_bias = self.end_w2_with_bias + (self.hidden2+1)*self.ac_dim\n\n assert self.end_w3_with_bias == self.weights.shape[0]\n\n def act(self, ob):\n ob = self.observation_filter(ob, update=self.update_filter)\n\n w1 = self.weights[0:self.end_w1].reshape(self.ob_dim, self.hidden1)\n b1 = self.weights[self.end_w1:self.end_w1_with_bias]\n\n w2 = self.weights[self.end_w1_with_bias:self.end_w2].reshape(self.hidden1, self.hidden2)\n b2 = self.weights[self.end_w2:self.end_w2_with_bias]\n\n w3 = self.weights[self.end_w2_with_bias:self.end_w3].reshape(self.hidden2, self.ac_dim)\n b3 = self.weights[self.end_w3:self.end_w3_with_bias]\n\n layer1 = np.dot(ob, w1) + b1\n tanh_layer1 = np.tanh(layer1)\n layer2 = np.dot(tanh_layer1, w2) + b2\n tanh_layer2 = np.tanh(layer2)\n layer3 = np.dot(tanh_layer2, w3) + b3\n\n return np.tanh(layer3)\n\n def get_weights_plus_stats(self):\n mu, std = self.observation_filter.get_stats()\n aux = np.asarray([self.weights, mu, std])\n return aux\n\n","sub_path":"code/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548255930","text":"#!/usr/bin/python -tt\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nfrom itertools import islice\nfrom csv import reader\nfrom re import match\n\n__all__ = ['analyze_file']\n\n\ndef head_suggests_address(header):\n lower = header.lower()\n\n for kw in ['adresa', 'addr']:\n if kw in lower:\n return True\n\n return False\n\ndef row_suggests_address(row):\n return match(r'\\w+ +\\d+/\\d+', row) and True\n\n\ndef shorten(text):\n text = text.decode('utf-8')\n\n if len(text) > 30:\n return text[:27] + '...'\n else:\n return text\n\n\ndef analyze_file(fp):\n \"\"\"Extract headers and a few example rows from supplied CSV file.\"\"\"\n\n r = reader(fp)\n\n head = r.next()\n rows = list(islice(r, 0, 5, 1))\n\n candidate = None\n\n for i, col in enumerate(head):\n if candidate is None and head_suggests_address(col):\n candidate = i\n\n head[i] = shorten(col)\n\n for row in rows:\n for i, col in enumerate(row):\n if candidate is None and row_suggests_address(col):\n candidate = i\n\n row[i] = shorten(col)\n\n return head, rows, candidate\n\n\n# vim:set sw=4 ts=4 et:\n","sub_path":"mhmp/lokalizator/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27672088","text":"#!/usr/bin/env python\r\n\r\nimport csv\r\nimport os\r\nimport sys\r\n#import datetime\r\nfrom tweepy import API, Cursor, OAuthHandler, TweepError\r\n\r\nconsumer_key = ''\r\nconsumer_secret = ''\r\naccess_token = ''\r\naccess_token_secret = ''\r\n\r\nauth = OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_token, access_token_secret)\r\napi = API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify = True)\r\n\r\ndef myTest():\r\n print(api.get_user(screen_name='BarackObama').screen_name)\r\n\r\ndef myRun(usernames):\r\n os.mkdir('tweet_data')\r\n error_usernames = []\r\n lastID = ''\r\n lastName = ''\r\n for username in usernames:\r\n print('Downloading tweet data for @{}'.format(username))\r\n with open('tweet_data/{}.csv'.format(username), 'wb') as output_file:\r\n writer = csv.writer(output_file, dialect='excel')\r\n try:\r\n user = api.get_user(username)\r\n #tweet_cursor = Cursor(api.user_timeline, id=username, since_id='242517296311062529', max_id='266087615860731904')#242517296311062529 266087615860731904\r\n page = 1\r\n while True:\r\n myStats = api.user_timeline(page=page,screen_name=username, since_id='551528342285856768', max_id='702499568550854657')#258559418692669441\r\n page +=1\r\n if myStats:\r\n for myStat in myStats:\r\n if not hasattr(myStat, 'retweeted_status'):\r\n writer.writerow([myStat.id_str.encode('utf-8'),\r\n str(myStat.created_at).encode('utf-8'),\r\n myStat.text.encode('utf-8'),\r\n myStat.retweet_count,\r\n myStat.favorite_count])\r\n lastID = myStat.id\r\n lastName = username\r\n else:\r\n break\r\n\t\t\t\t\t\r\n #for tweet in tweet_cursor.items():\r\n #writer.writerow([tweet.id_str.encode('utf-8'),\r\n # str(tweet.created_at).encode('utf-8'),\r\n # tweet.text.encode('utf-8')])\r\n except TweepError as e:\r\n error_usernames.append(username)\r\n error_usernames.append(lastID)\r\n error_usernames.append(lastName)\r\n print(e)\r\n continue\r\n if error_usernames:\r\n with open('tweets_error_usernames.csv', 'wb') as error_file:\r\n writer = csv.writer(error_file, dialect='excel')\r\n for error_username in error_usernames:\r\n writer.writerow([error_username])\r\n print('Some usernames caused errors; they have been saved to tweets_error_usernames.csv')\r\n\r\n \r\ndef get_tweets(usernames):\r\n os.mkdir('tweet_data')\r\n error_usernames = []\r\n for username in usernames:\r\n print('Downloading tweet data for @{}'.format(username))\r\n with open('tweet_data/{}.csv'.format(username), 'wb') as output_file:\r\n writer = csv.writer(output_file, dialect='excel')\r\n try:\r\n user = api.get_user(username)\r\n tweet_cursor = Cursor(api.user_timeline, id=username, since_id='242517296311062529', max_id='266087615860731904')#242517296311062529 266087615860731904\r\n for tweet in tweet_cursor.items():\r\n writer.writerow([tweet.id_str.encode('utf-8'),\r\n str(tweet.created_at).encode('utf-8'),\r\n tweet.text.encode('utf-8'),\r\n tweet.retweet_count.encode('utf-8')])\r\n except TweepError as e:\r\n error_usernames.append(username)\r\n print(e)\r\n continue\r\n if error_usernames:\r\n with open('tweets_error_usernames.csv', 'wb') as error_file:\r\n writer = csv.writer(error_file, dialect='excel')\r\n for error_username in error_usernames:\r\n writer.writerow([error_username])\r\n print('Some usernames caused errors; they have been saved to tweets_error_usernames.csv')\r\n\r\ndef get_followers(usernames):\r\n os.mkdir('follower_data')\r\n error_usernames = []\r\n for username in usernames:\r\n print('Downloading follower data for @{}'.format(username))\r\n with open('follower_data/{}.csv'.format(username), 'wb') as output_file:\r\n writer = csv.writer(output_file, dialect='excel')\r\n try:\r\n user = api.get_user(username)\r\n follower_cursor = Cursor(api.followers, id=username)\r\n for follower in follower_cursor.items():\r\n writer.writerow([follower.id_str.encode('utf-8'),\r\n follower.screen_name.encode('utf-8'),\r\n follower.description.encode('utf-8') if follower.description is not None else '',\r\n follower.location.encode('utf-8') if follower.location is not None else ''])\r\n except TweepError as e:\r\n error_usernames.append(username)\r\n print(e)\r\n continue\r\n if error_usernames:\r\n with open('followers_error_usernames.csv', 'wb') as error_file:\r\n writer = csv.writer(error_file, dialect='excel')\r\n for error_username in error_usernames:\r\n writer.writerow([error_username])\r\n print('Some usernames caused errors; they have been saved to followers_error_usernames.csv')\r\n\r\ndef usage_and_exit():\r\n print('Usage: scraper.py followers|tweets ~/path/to/usernames.csv')\r\n sys.exit(0)\r\n\r\ndef main():\r\n if len(sys.argv) != 3:\r\n usage_and_exit()\r\n\r\n\r\n with open(sys.argv[2], 'rU') as accounts_file:\r\n\r\n reader = csv.reader(accounts_file, dialect='excel')\r\n usernames = [row[5] for row in reader]\r\n if sys.argv[1] == 'followers':\r\n get_followers(usernames)\r\n elif sys.argv[1] == 'tweets':\r\n\r\n #get_tweets(usernames)\r\n myRun(usernames)\r\n else:\r\n print(sys.argv[1])\r\n usage_and_exit()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Data Collection/TwitterQuerynortsversion.py","file_name":"TwitterQuerynortsversion.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381817923","text":"\"\"\"\n5099. [파이썬 S/W 문제해결 기본] 6일차 - 피자 굽기\n\nN개의 피자를 동시에 구울 수 있는 화덕이 있다. 피자는 치즈가 모두 녹으면 화덕에서 꺼내며, 치즈의 양은 피자마다 다르다.\n\n1번부터 M번까지 M개의 피자를 순서대로 화덕에 넣을 때, 치즈의 양에 따라 녹는 시간이 다르기 때문에 꺼내지는 순서는 바뀔 수 있다.\n\n주어진 조건에 따라 피자를 구울 때, 화덕에 가장 마지막까지 남아있는 피자 번호를 알아내는 프로그램을 작성하시오.\n\n- 피자는 1번위치에서 넣거나 뺄 수 있다.\n- 화덕 내부의 피자받침은 천천히 회전해서 1번에서 잠시 꺼내 치즈를 확인하고 다시 같은 자리에 넣을 수 있다.\n- M개의 피자에 처음 뿌려진 치즈의 양이 주어지고, 화덕을 한 바퀴 돌 때 녹지않은 치즈의 양은 반으로 줄어든다. 이전 치즈의 양을 C라고 하면 다시 꺼냈을 때 C//2로 줄어든다.\n- 치즈가 모두 녹아 0이 되면 화덕에서 꺼내고, 바로 그 자리에 남은 피자를 순서대로 넣는다.\n\n[입력]\n\n첫 줄에 테스트 케이스 개수 T가 주어진다. 1<=T<=50\n\n다음 줄부터 테스트 케이스의 첫 줄에 화덕의 크기 N과 피자 개수 M이 주어지고, 다음 줄에 M개의 피자에 뿌려진 치즈의 양을 나타내는 Ci가 주어진다.\n\n3<=N<=20, N<=M<=100, 1<=Ci<=20\n\n[출력]\n\n각 줄마다 \"#T\" (T는 테스트 케이스 번호)를 출력한 뒤, 번호를 출력한다.\n\n\"\"\"\nimport sys\nsys.stdin = open(\"input.txt\",\"r\")\n\n\nT = int(input())\n\nfor t in range(1,T+1):\n N, M = map(int,input().split())\n data = list(map(int,input().split()))\n myQ = [None] * N\n out = out_count = pizza_count = Exit = 0\n while out_count < M:\n # 회전\n Exit = (Exit + 1) % N\n # 큐가 비어있고\n if myQ[Exit] == None:\n # 남아 있는 피자가 있으면\n if M > pizza_count:\n myQ[Exit] = [pizza_count,data[pizza_count]]\n pizza_count += 1\n else:\n # Fail Point : 0인지 확인하고 녹였는데 문제에서 원하는건 일단 녹이고 0인지 확인하는 거였음.\n # 이렇게 하면 코드 진행 수도 줄어들고 답도 알맞게 나옴\n myQ[Exit][1] = myQ[Exit][1] // 2\n # 큐가 비어있지 않고 치즈가 다 녹았으면\n if myQ[Exit][1] == 0:\n out = myQ[Exit][0]\n out_count += 1\n # 남아 있는 피자가 있으면\n if M > pizza_count:\n myQ[Exit] = [pizza_count,data[pizza_count]]\n pizza_count += 1\n # 없으면\n else:\n myQ[Exit] = None\n \n print(f\"#{t} {out+1}\") \n \n","sub_path":"OnlineJudge/SWExpertAcademy/InterMediate/06/im1902265099.py","file_name":"im1902265099.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78765902","text":"#!/usr/bin/env python\n\n# Weasyl Notification Counter: Graphical (Tkinter) Edition\n# Version 1.2\n# By Sil\n#\n# This application was created by a third-party and uses the official Weasyl application programming interface (API)\n#\n# The creator of this application does not claim to be part of Weasyl or officially associated with Weasyl other than\n# being a member of Weasyl and a frequent user of Weasyl services. Weasyl, the Weasyl icon, the Weasyl name and Wesley,\n# the Weasyl mascot, are property of the Weasyl team.\n#\n# This application is released under a loose open-source license. You may use, copy, edit and distribute this program,\n# and it's source code at no cost and with no prior permission, so long as you do not claim full credit to the code.\n#\n# Special thanks to Syfaro for his introduction to the god awful PEP8 regulations. Seriously, my blue brother, you're\n# a sadistic vulpine. :3\n\ntry:\n from tkinter import *\nexcept:\n from Tkinter import *\ntry:\n import tkMessageBox\nexcept:\n import tkmessagebox\nfrom PIL import ImageTk, Image\nimport os\nfrom os.path import expanduser, join\nimport requests\nimport json\nimport urllib\nimport thread\nimport time\n\n\ndef manualrefresh():\n try:\n print(\"Manual update: Downloading notification information...\")\n result = requests.get(\n 'https://weasyl.com/api/messages/summary', headers={'X-Weasyl-API-Key': key})\n except:\n tkMessageBox.showerror(\"Connection lost\", \"The connection to Weasyl could not be established, and we were unable to download notification information. Check your internet connection and try again.\")\n os._exit(1)\n js = result.json()\n print(\"Notification information downloaded. Parsing...\")\n subm = js[\"submissions\"]\n comm = js[\"comments\"]\n noti = js[\"notifications\"]\n jour = js[\"journals\"]\n unre = js[\"unread_notes\"]\n marked = 0\n if subm > 0:\n marked = marked + subm\n if jour > 0:\n marked = marked + jour\n if noti > 0:\n marked = marked + noti\n if comm > 0:\n marked = marked + comm\n if unre > 0:\n marked = marked + unre\n print(\"Parsed. Updating...\")\n if marked == 0 or marked > 1:\n totalnotifications.set(str(marked)+\" notifications\")\n else:\n totalnotifications.set(str(marked)+\" notification\")\n submi.set(\"Submissions (\"+str(subm)+\")\")\n journ.set(\"Journals (\"+str(jour)+\")\")\n comme.set(\"Comments (\"+str(comm)+\")\")\n notif.set(\"Notifications (\"+str(noti)+\")\")\n unrea.set(\"Notes (\"+str(unre)+\")\")\n windowtitle = \"Weasyl (\"+str(marked)+\")\"\n root.title(windowtitle)\n print(\"Updated.\")\n pass\n\n\ndef refresh():\n while True:\n time.sleep(checkdelay)\n try:\n print(\"\\nAutomatic update: Downloading notification information...\")\n result = requests.get(\n 'https://weasyl.com/api/messages/summary', headers={'X-Weasyl-API-Key': key})\n except:\n tkMessageBox.showerror(\"Connection lost\", \"The connection to Weasyl could not be established, and we were unable to download notification information. Check your internet connection and try again.\")\n os._exit(1)\n js = result.json()\n print(\"Notification information downloaded. Parsing...\")\n subm = js[\"submissions\"]\n comm = js[\"comments\"]\n noti = js[\"notifications\"]\n jour = js[\"journals\"]\n unre = js[\"unread_notes\"]\n marked = 0\n if subm > 0:\n marked = marked + subm\n if jour > 0:\n marked = marked + jour\n if noti > 0:\n marked = marked + noti\n if comm > 0:\n marked = marked + comm\n if unre > 0:\n marked = marked + unre\n print(\"Parsed. Updating...\")\n if marked == 0 or marked > 1:\n totalnotifications.set(str(marked)+\" notifications\")\n else:\n totalnotifications.set(str(marked)+\" notification\")\n submi.set(\"Submissions (\"+str(subm)+\")\")\n journ.set(\"Journals (\"+str(jour)+\")\")\n comme.set(\"Comments (\"+str(comm)+\")\")\n notif.set(\"Notifications (\"+str(noti)+\")\")\n unrea.set(\"Notes (\"+str(unre)+\")\")\n windowtitle = \"Weasyl (\"+str(marked)+\")\"\n root.title(windowtitle)\n print(\"Updated.\")\n pass\n\n\nhome = expanduser(\"~\")\nfileloc = join(home, \".weasylkey.api\")\n\ntry:\n with open(fileloc, \"r\") as keyfile:\n keyfilecontents = keyfile.readline()\n key = keyfilecontents[:-1]\n try:\n delaycontents = keyfile.readline()\n delay = delaycontents[:-1]\n checkdelay = int(delay)\n except:\n print(\"Your previous configuration file does not support automatic updating. Remove it and try again. (path is '~/.weasylkey.api')\")\n quit()\nexcept:\n print(\"Go to https://www.weasyl.com/control/apikeys and make a new API key, then copy the key and paste it below.\")\n key = str(raw_input(\"API Key: \"))\n print(\"\")\n print(\"Enter the desired length (in seconds) between each check.\")\n checkdelay = int(raw_input(\"Delay: \"))\n try:\n with open(fileloc, \"w\") as keyfile:\n keyfile.write(key + \"\\n\")\n keyfile.write(str(checkdelay) + \"\\n\")\n except:\n print(\"\\nWe could not save your API key. You will need to enter it next time you open this program.\")\n pass\n print(\"\")\n\nprint(\"Logging into Weasyl...\")\n\nresult = requests.get(\n 'https://weasyl.com/api/whoami', headers={'X-Weasyl-API-Key': key})\n\nif not result.status_code == 200:\n tkMessageBox.showerror(\"Unable to log in\", \"We were unable to log you into Weasyl. Check your API key and try again. (you will need to delete .weasylkey.api in your home folder first)\")\n quit()\n\njs = result.json()\nusername = str(js[\"login\"])\n\nprint(\"Logged in as user \"+str(username)+\".\")\n\nprint(\"Downloading avatar information for user \"+str(username)+\"...\")\n\nreq = {'username': username}\nresult = requests.get('http://weasyl.com/api/useravatar', params=req)\njs = result.json()\navatarurl = js[\"avatar\"]\n\nprint(\"Downloading avatar for user \"+str(username)+\"...\")\n\ntry:\n urllib.urlretrieve(avatarurl, \".avatar.png\")\nexcept:\n tkMessageBox.showerror(\"Could not download avatar\", \"Your Weasyl avatar image could not be downloaded. Check your internet connection and try again.\")\n quit()\n\nprint(\"Downloading notification information...\")\n\nresult = requests.get(\n 'https://weasyl.com/api/messages/summary', headers={'X-Weasyl-API-Key': key})\njs = result.json()\nsubm = js[\"submissions\"]\ncomm = js[\"comments\"]\nnoti = js[\"notifications\"]\njour = js[\"journals\"]\nunre = js[\"unread_notes\"]\nmarked = 0\nif subm > 0:\n marked = marked + subm\nif jour > 0:\n marked = marked + jour\nif noti > 0:\n marked = marked + noti\nif comm > 0:\n marked = marked + comm\nif unre > 0:\n marked = marked + unre\n\nroot = Tk()\n\nif marked == 0 or marked > 1:\n totalnotifications = StringVar()\n totalnotifications.set(str(marked)+\" notifications\")\nelse:\n totalnotifications = StringVar()\n totalnotifications.set(str(marked)+\" notification\")\n\nsubmi = StringVar()\nsubmi.set(\"Submissions (\"+str(subm)+\")\")\njourn = StringVar()\njourn.set(\"Journals (\"+str(jour)+\")\")\ncomme = StringVar()\ncomme.set(\"Comments (\"+str(comm)+\")\")\nnotif = StringVar()\nnotif.set(\"Notifications (\"+str(noti)+\")\")\nunrea = StringVar()\nunrea.set(\"Notes (\"+str(unre)+\")\")\n\nwindowtitle = \"Weasyl (\"+str(marked)+\")\"\n\nname = Label(root, text=username, font=\"Arial 20\")\nname.grid(row=0, column=0)\nstatus = Label(root, textvariable=totalnotifications)\nstatus.grid(row=1, column=0)\nimg = ImageTk.PhotoImage(Image.open(\".avatar.png\"))\npanel = Label(root, image=img, relief=RAISED, height=100, width=100)\npanel.grid(row=0, column=1, rowspan=2)\nsubmlabel = Label(root, textvariable=submi)\nsubmlabel.grid(row=2, column=0, columnspan=2)\njourlabel = Label(root, textvariable=journ)\njourlabel.grid(row=3, column=0, columnspan=2)\ncommlabel = Label(root, textvariable=comme)\ncommlabel.grid(row=4, column=0, columnspan=2)\nnotilabel = Label(root, textvariable=notif)\nnotilabel.grid(row=5, column=0, columnspan=2)\nunrelabel = Label(root, textvariable=unrea)\nunrelabel.grid(row=6, column=0, columnspan=2)\nupdatebutton = Button(root, text=\"Refresh\", command=manualrefresh)\nupdatebutton.grid(row=7, column=0, columnspan=2)\nthread.start_new_thread(refresh, ())\nroot.title(windowtitle)\nroot.mainloop()\n","sub_path":"WeasylApp.py","file_name":"WeasylApp.py","file_ext":"py","file_size_in_byte":8396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371182834","text":"#-*- encoding: utf-8 -*-\n\n'''\n@brief Módulo encargado de gestionar del ratón.\nSimulación de patrón Singleton con el módulo.\nLleva el control de qué botones están pulsados y cuales no.\n'''\n\nimport pygame\n\n#Distintos botones del raton\nLEFT, CENTER, RIGHT = range(3)\n\n#Varianles\n__actual_mouse = None\n__old_mouse = None\n__initialize = False\n\ndef __check_initialize():\n '''\n @brief Función que inicializa el ratón si no lo estaba\n '''\n global __actual_mouse\n global __old_mouse\n global __initialize\n \n if not __initialize:\n __actual_mouse = pygame.mouse.get_pressed()\n __old_mouse = pygame.mouse.get_pressed()\n __initialize = True\n\ndef update():\n '''\n @brief Función que actualiza el ratón, llamar en cada iteración del bucle principal\n '''\n global __actual_mouse\n global __old_mouse\n \n __check_initialize()\n __old_mouse = __actual_mouse\n __actual_mouse = pygame.mouse.get_pressed()\n \ndef pressed(button):\n '''\n @brief Consulta si un botón está pulsado\n \n @param button Botón a consultar\n @return True si lo está, False en caso contrario\n '''\n global __actual_mouse\n \n return __actual_mouse[button]\n\ndef release(button):\n '''\n @brief Consulta si un botón acaba de ser soltado\n \n @param button Botón a consultar\n @return True si lo está, False en caso contrario\n '''\n global __actual_mouse\n global __old_mouse\n \n return ((not __actual_mouse[button]) and __old_mouse[button])\n\ndef newpressed(button):\n '''\n @brief Consulta si un botón acaba de ser pulsado\n \n @param button Botón a consultar\n @return True si lo está, False en caso contrario\n '''\n global __actual_keyboard\n global __old_mouse\n \n return (__actual_mouse[button] and (not __old_mouse[button]))\n \ndef position():\n '''\n @brief Función que devuelve la posición del puntero del ratón\n \n @return Tupla con la posición x e y\n '''\n return pygame.mouse.get_pos()\n\n","sub_path":"engine/mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1922614","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nhousing_estate = np.array([[-100, 200], [-100, 0], [100, 0], [100, 200]])\navailable_housing_estate = np.array([[-98, 198], [-98, 2], [98, 2], [98, 198]])\nfloors = np.array([\n [[60, 50], [60, 100], [80, 100], [80, 50]],\n [[-80, 29], [-80, 64], [-60, 64], [-60, 29]],\n])\n\n\nx_floors = np.array([[floor[0][0], floor[2][0]] for floor in floors])\ny_floors = np.array([[floor[0][1], floor[1][1]] for floor in floors])\n\nx_min = available_housing_estate[0][0]\nx_max = available_housing_estate[2][0]\n\ny_min = available_housing_estate[1][1]\ny_max = available_housing_estate[0][1]\n\nx_availables = []\ny_availables = []\nfloors_size = floors.shape[0]\n\n\ndef get_y_floors(x1, x2):\n x_interval_floors = np.array([floor for floor in floors if floor[0][0] >= x1 and floor[2][0] <= x2])\n print(x_interval_floors)\n for y_floor in y_floors:\n for i, floor in enumerate(x_interval_floors):\n print(floor[0][0])\n if floor[0][0] >= y_floor[0] and floor[2][0] <= y_floor[1]:\n y_available = [x1, x2, x_floor[1], x_max]\n y_availables.append(y_available)\n if y_max > y_floor[0]:\n y_max = y_floor[0]\n if i + 1 == floors_size:\n y_availables.append([x1, x2, x_min, floor[0][0]])\n print('y_availables', y_availables)\n return y_availables\n\n\nfor x_floor in x_floors:\n for i, floor in enumerate(floors):\n if floor[0][0] >= x_floor[0] and floor[2][0] <= x_floor[1]:\n x_available = [x_floor[1], x_max]\n # get_y_floors(x_floor[0], x_max)\n x_availables.append(x_available)\n if x_max > x_floor[0]:\n x_max = x_floor[0]\n if i + 1 == floors_size:\n x_availables.append([x_min, floor[0][0]])\n\nprint(x_availables)\nprint(y_availables)\n\n\ndef main():\n fig1 = plt.figure(figsize=(14, 7))\n plt.xlim(housing_estate[0][0], housing_estate[2][0])\n plt.ylim(housing_estate[1][1], housing_estate[0][1])\n x_ticks = np.linspace(-100, 100, 1)\n y_ticks = np.linspace(0, 100, 1)\n plt.xticks(x_ticks)\n plt.yticks(y_ticks)\n axes1 = fig1.add_subplot()\n available_housing = plt.Polygon(xy=available_housing_estate, alpha=0.0)\n axes1.add_patch(available_housing)\n for floor in floors:\n square = plt.Polygon(xy=floor, color='blue', alpha=1)\n axes1.add_patch(square)\n for x_floor in x_availables:\n floor = [[x_floor[0], 198], [x_floor[0], 2], [x_floor[1], 0], [x_floor[1], 198]]\n square = plt.Polygon(xy=floor, color='red', alpha=0.5)\n axes1.add_patch(square)\n plt.show()\n","sub_path":"com/underground/garage/arrangement/application_launcher.py","file_name":"application_launcher.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580940762","text":"import numpy as np\nimport cv2\nimport cv2.aruco as aruco\nimport pickle\nfrom scipy.spatial import distance as dist\nimport process_common\nimport math\n\n\ndef rotationMatrixToEulerAngles(R):\n sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n if not singular :\n x = math.atan2(R[2,1] , R[2,2])\n y = math.atan2(-R[2,0], sy)\n z = math.atan2(R[1,0], R[0,0])\n else :\n x = math.atan2(-R[1,2], R[1,1])\n y = math.atan2(-R[2,0], sy)\n z = 0\n return np.array([x, y, z])\n\n\n\ndef getImageCorners_singleMarker(inputImage, calibrationData):\n\n cameraMatrix, distorsionCoefficients, rvecs, tvecs = calibrationData\n\n # get corners\n aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)\n parameters = aruco.DetectorParameters_create()\n\n parameters.cornerRefinementWinSize = 2\n parameters.cornerRefinementMethod = aruco.CORNER_REFINE_SUBPIX\n parameters.cornerRefinementMaxIterations = 50\n # parameters.minOtsuStdDev = 20\n\n h, w = inputImage.shape[:2]\n cameraMatrixNew, roi = cv2.getOptimalNewCameraMatrix(cameraMatrix, distorsionCoefficients, (w, h), 0, (w, h))\n inputImage = cv2.undistort(inputImage, cameraMatrix, distorsionCoefficients, None, cameraMatrixNew)\n\n corners, ids, rejectedImqgPoints = aruco.detectMarkers(inputImage, aruco_dict, parameters=parameters)\n\n dst = aruco.drawDetectedMarkers(inputImage, corners)\n\n poseRvecs, poseTvecs, trash = aruco.estimatePoseSingleMarkers(corners, 0.04, cameraMatrix, distorsionCoefficients)\n\n if poseTvecs is not None and poseRvecs.shape[2] == 3:\n for i in range(len(poseTvecs)):\n aruco.drawAxis(dst, cameraMatrixNew, distorsionCoefficients, poseRvecs[i], poseTvecs[i], 0.1)\n\n from math import pi, atan2, asin\n\n R = cv2.Rodrigues(poseRvecs[0])[0]\n\n pitch = atan2(-R[2][1], R[2][2])\n yaw = asin(R[2][0])\n roll = atan2(-R[1][0], R[0][0])\n\n roll_angle = 180 * atan2(-R[2][1], R[2][2]) / pi\n pitch_angle = 180 * asin(R[2][0]) / pi\n yaw_angle = 180 * atan2(-R[1][0], R[0][0]) / pi\n\n cv2.imshow('complete', dst)\n\n if (len(corners) > 0):\n tlCorner = corners[0]\n\n ordered = process_common.order_points(tlCorner[0])\n\n pixelDistanceX = np.linalg.norm(ordered[0] - ordered[1])\n pixelDistanceY = np.linalg.norm(ordered[0] - ordered[3])\n\n tl = (ordered[0][0], ordered[0][1])\n\n nominal_size = 19\n d1 = 170 * pixelDistanceX / nominal_size\n d2 = 95 * pixelDistanceY / nominal_size\n\n tr = (int(tl[0] + d1*np.cos(roll)*np.cos(yaw)), int(tl[1] - d1*np.sin(roll)*np.cos(pitch)))\n bl = (int(tl[0] + d2*np.sin(roll)*np.cos(yaw)), int(tl[1] + d2*np.cos(roll)*np.cos(pitch)))\n tl = (int(tl[0]), int(tl[1]))\n\n br = ((bl[0] + tr[0]-tl[0]), (bl[1] + tr[1]-tl[1]))\n\n\n return (tl,tr,br,bl)\n\n\n\n","sub_path":"perspectiveReprojection/process_singleMarker.py","file_name":"process_singleMarker.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344565077","text":"import cv2 as cv\nimport numpy as np\n\n\ndef hsv_trackbars_pos(unused=None, name=None):\n\n \"\"\" Returns the lower and upper hsv range boundaries from the Mask Detection Trackbar \"\"\"\n\n hue_min = cv.getTrackbarPos(\"Hue (Min)\", name)\n hue_max = cv.getTrackbarPos(\"Hue (Max)\", name)\n sat_min = cv.getTrackbarPos(\"Sat (Min)\", name)\n sat_max = cv.getTrackbarPos(\"Sat (Max)\", name)\n val_min = cv.getTrackbarPos(\"Val (Min)\", name)\n val_max = cv.getTrackbarPos(\"Val (Max)\", name)\n done = cv.getTrackbarPos(\"Done\", name)\n\n hsv_lower_bound = np.array([hue_min, sat_min, val_min])\n hsv_upper_bound = np.array([hue_max, sat_max, val_max])\n\n return (hsv_lower_bound, hsv_upper_bound), done\n\n\ndef hsv_trackbars_create(name):\n\n \"\"\" Color Detection Trackbars \"\"\"\n cv.namedWindow(name, cv.WINDOW_FREERATIO)\n cv.resizeWindow(name, 500,500)\n cv.createTrackbar(\"Hue (Min)\", name, 0, 179, hsv_trackbars_pos)\n cv.createTrackbar(\"Hue (Max)\", name, 179, 179, hsv_trackbars_pos)\n cv.createTrackbar(\"Sat (Min)\", name, 0, 255, hsv_trackbars_pos)\n cv.createTrackbar(\"Sat (Max)\", name, 255, 255, hsv_trackbars_pos)\n cv.createTrackbar(\"Val (Min)\", name, 0, 255, hsv_trackbars_pos)\n cv.createTrackbar(\"Val (Max)\", name, 255, 255, hsv_trackbars_pos)\n cv.createTrackbar(\"Done\", name, 0, 1, hsv_trackbars_pos)\n\n\n# The following code is used to tune the color ranges\n\"\"\"\nhsv_trackbars_create()\nwhile True:\n lower_bound, upper_bound = hsv_trackbars_pos()\n mask = cv.inRange(img_hsv, lowerb=lower_bound, upperb=upper_bound)\n img_masked = cv.bitwise_and(img_original, img_original, mask=mask)\n color_extraction_hstack = stack_images(0.8, [[img_original, img_masked], [img_hsv, mask]])\n cv.imshow(\"Color Extraction Stack\", color_extraction_hstack)\n cv.waitKey(1)\n\"\"\" \n","sub_path":"color_filter.py","file_name":"color_filter.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71232250","text":"#!/usr/bin/env python\n\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"\nDefine the launcher classes, responsible of running the tested applications.\n\"\"\"\n\nfrom mozlog.structured import get_default_logger\nfrom mozprofile import FirefoxProfile, ThunderbirdProfile, Profile\nfrom mozrunner import Runner\nfrom mozfile import rmtree\nfrom mozdevice import ADBAndroid, ADBHost\nimport mozversion\nimport mozinstall\nimport tempfile\nimport os\n\nfrom mozregression.utils import ClassRegistry, download_url, yes_or_exit\n\n\nclass Launcher(object):\n \"\"\"\n Handle the logic of downloading a build file, installing and\n running an application.\n \"\"\"\n def __init__(self, url, persist=None, persist_prefix=''):\n self._running = False\n self._logger = get_default_logger('Test Runner')\n\n basename = os.path.basename(url)\n if persist:\n dest = os.path.join(persist, '%s%s' % (persist_prefix, basename))\n if not os.path.exists(dest):\n self._download(url, dest)\n else:\n self._logger.info(\"Using local file: %s\" % dest)\n else:\n dest = basename\n self._download(url, dest)\n\n try:\n self._install(dest)\n finally:\n if not persist:\n os.unlink(dest)\n\n def start(self, **kwargs):\n \"\"\"\n Start the application.\n \"\"\"\n if not self._running:\n self._start(**kwargs)\n self._running = True\n\n def stop(self):\n \"\"\"\n Stop the application.\n \"\"\"\n if self._running:\n self._stop()\n self._running = False\n\n def get_app_info(self):\n \"\"\"\n Return information about the application.\n \"\"\"\n if self._running:\n return self._get_app_info()\n\n def __del__(self):\n self.stop()\n\n def _download(self, url, dest):\n self._logger.info(\"Downloading build from: %s\" % url)\n download_url(url, dest)\n\n def _get_app_info(self):\n raise NotImplementedError\n\n def _install(self, dest):\n raise NotImplementedError\n\n def _start(self, **kwargs):\n raise NotImplementedError\n\n def _stop(self):\n raise NotImplementedError\n\n\nclass MozRunnerLauncher(Launcher):\n tempdir = None\n runner = None\n app_name = 'undefined'\n profile_class = Profile\n binary = None\n\n def _install(self, dest):\n self.tempdir = tempfile.mkdtemp()\n self.binary = mozinstall.get_binary(\n mozinstall.install(src=dest, dest=self.tempdir),\n self.app_name)\n\n def _start(self, profile=None, addons=(), cmdargs=()):\n if profile:\n profile = self.profile_class(profile=profile, addons=addons)\n elif len(addons):\n profile = self.profile_class(addons=addons)\n else:\n profile = self.profile_class()\n\n process_args = {'processOutputLine': [self._logger.debug]}\n self.runner = Runner(binary=self.binary,\n cmdargs=cmdargs,\n profile=profile,\n process_args=process_args)\n self.runner.start()\n\n def _stop(self):\n self.runner.stop()\n\n def __del__(self):\n try:\n Launcher.__del__(self)\n finally:\n # always remove tempdir\n if self.tempdir is not None:\n rmtree(self.tempdir)\n\n def _get_app_info(self):\n return mozversion.get_version(binary=self.binary)\n\n\nREGISTRY = ClassRegistry('app_name')\n\n\ndef create_launcher(name, url, persist=None, persist_prefix=''):\n \"\"\"\n Create and returns an instance launcher for the given name.\n \"\"\"\n return REGISTRY.get(name)(url,\n persist=persist,\n persist_prefix=persist_prefix)\n\n\n@REGISTRY.register('firefox')\nclass FirefoxLauncher(MozRunnerLauncher):\n profile_class = FirefoxProfile\n\n\n@REGISTRY.register('thunderbird')\nclass ThunderbirdLauncher(MozRunnerLauncher):\n profile_class = ThunderbirdProfile\n\n\n@REGISTRY.register('b2g')\nclass B2GLauncher(MozRunnerLauncher):\n pass\n\n\n@REGISTRY.register('fennec')\nclass FennecLauncher(Launcher):\n app_info = None\n\n def _install(self, dest):\n while not ADBHost().devices():\n yes_or_exit(\"WARNING: no device connected. Connect a device\"\n \" and try again.\\nTry again?\")\n\n self.adb = ADBAndroid()\n yes_or_exit(\"WARNING: bisecting nightly fennec builds will clobber\"\n \" your existing nightly profile. Continue?\")\n\n self.adb.uninstall_app(\"org.mozilla.fennec\")\n self.adb.install_app(dest)\n # get info now, as dest may be removed\n self.app_info = mozversion.get_version(binary=dest)\n\n def _start(self, **kwargs):\n self.adb.launch_fennec(\"org.mozilla.fennec\")\n\n def _stop(self):\n self.adb.stop_application(\"org.mozilla.fennec\")\n\n def _get_app_info(self):\n return self.app_info\n","sub_path":"mozregression/launchers.py","file_name":"launchers.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634381047","text":"from django import template\nfrom apps.blog.models import Category\n\nregister = template.Library()\n\n\n@register.inclusion_tag(filename='blog/category_sidebar.html',\n name='include_category_sidebar')\ndef show_post_categories():\n context = {\n 'categories': Category.objects.all()\n }\n return context\n","sub_path":"apps/blog/templatetags/blog_template_tags.py","file_name":"blog_template_tags.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458730894","text":"import os\ntry: \n import sqlite3\nexcept:\n os.system('pip3 install sqlite3')\n import sqlite3\n \ntry:\n from pymongo import MongoClient\nexcept:\n os.system('pip3 install pymongo')\n from pymongo import MongoClient\n\"\"\"\nThis try/except is for if there is no lib, \ndownload it and use it in the code\n\"\"\"\n\nclass MongoDb():\n \"\"\"\n This Connect to Mongo DB\n The functions of this class are\n \n - insert_one (data)\n This function needs a one dict\n example:\n {\n 'name':'Jhon Wesley',\n 'age':19\n }\n \n - find_one (data)\n This function need a query for find one and first element\n example:\n {\n 'age':19\n } \n \n - find_all(query)\n This function is similar to find_one, but return one list with \n all elements to query. If the query is not specified, \n the return will be all items.\n\n example:\n {\n 'name': {\n '$ne' : 'Jhon Wesley'\n }\n }\n \n - update_one(query,set_update)\n This function needs two parameters, one query and the other update.\n The query find one item for update, updates one item at a time\n \n example:\n query = {\n 'name':'Jhon Wesley',\n 'age':19\n }\n update = {\n 'age':25\n }\n \n - find_and_delete(query)\n This function delete one item. Needs a query to find one item for remove\n example:\n query = {\n 'name':'Jhon Wesley',\n 'age':25\n }\n \n \n \"\"\"\n \n \n def __init__(self,host:str=\"mongodb://localhost:27017/\",set_dataBase:str=\"defaultDB\",collection:str=\"userCollection\"):\n self.client = MongoClient(host)\n self.db = self.client[set_dataBase]\n self.conector = self.db[collection]\n #db..\n \n def insert_one(self, query):\n self.conector.insert_one(query)\n \n def find_all(self,query=None):\n if query == None:\n return [row for row in self.conector.find()]\n else:\n return [row for row in self.conector.find(query)]\n \n def find_one(self,query:dict):\n \n return self.conector.find_one(query)\n \n def update_one(self,query,set_update):\n self.conector.find_one_and_update(query,{\"$set\":set_update})\n \n def find_and_delete(self,query:dict):\n self.conector.find_one_and_delete(query)\n \nclass SqliteDb():\n def __init__(self,path:str=\"data/dataBase.db\"):\n self.db = path\n \n def _connect(self):\n conector = sqlite3.connect(self.db)\n cursor = conector.cursor()\n return conector,cursor\n \n def _creatTable(self):\n conector,cursor = self._connect()\n \n table=\"\"\"\n CREATE TABLE IF NOT EXISTS candidato (\n cpf NOT NULL PRIMARY KEY,\n nome VARCHAR NOT NULL,\n nota FLOAT NOT NULL,\n id_input VARCHAR NOT NULL\n )\n \"\"\"\n cursor.execute(table)\n \n def insert_one(self,cpf,nome,nota,id_input):\n conector,cursor = self._connect()\n self._creatTable()\n \n sql = f\"\"\"\n INSERT INTO candidato (cpf,nome,nota,id_input)\n VALUES ('{cpf}','{nome}','{nota}','{id_input}')\n \"\"\"\n \n cursor.execute(sql)\n conector.commit()\n conector.close()\n \n def find_all(self,table:str,column:str='*'):\n conector,cursor = self._connect()\n sql=f\"\"\"\n SELECT {column}\n FROM {table}\n \"\"\"\n cursor.execute(sql)\n return cursor.fetchall()\n \n def find_one(self,table:str,column:str='*',field:str='cpf',index:str='58127043680'):\n conector,cursor = self._connect()\n sql=f\"\"\"\n SELECT {column}\n FROM {table}\n WHERE {field} = '{index}'\n \"\"\"\n cursor.execute(sql)\n return cursor.fetchall()\n \n \n def update_one(self,data):\n pass\n \n def find_and_delete(self,data):\n pass\n","sub_path":"saveData.py","file_name":"saveData.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160329580","text":"# _*_ CODING:UTF-8 _*_\n'''\n@authot:马维畅\n@time:2018/10/15 21:10\n'''\n\ndef DayDayUp(df):\n dayup = 1.0\n\n for i in range(365):\n if i % 7 in [6,0]:\n dayup = dayup*(1 - 0.01)\n else:\n dayup = dayup*(1 + df)\n return dayup\n\ndayfactor = 0.01\nwhile DayDayUp(dayfactor) < 37.78:\n dayfactor += 0.001\n\nprint(\"努力指数:{:.3f}\".format(dayfactor))","sub_path":"expert-func/DayDayUp_2.py","file_name":"DayDayUp_2.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269325627","text":"#!/usr/bin/env python\nimport rospy\nimport fileinput\nimport sys\nfrom math import sqrt\nfrom turtlesim.msg import Pose\nfrom paveturtle.msg import Point, Plan\n\nclass MissionControl():\n def __init__(self, waypoints):\n rospy.init_node('mission_control')\n rospy.loginfo('launched mission_control')\n self.waypoint = waypoints\n self.pub = rospy.Publisher('goto', Point)\n self.dist = 0\n rospy.Subscriber('turtle1/pose', Pose, self.turtlesays)\n rospy.Subscriber('distance', Plan, self.plannersays)\n def turtlesays(self, pose):\n pt = self.waypoint[0]\n dx = pt[0] - pose.x\n dy = pt[1] - pose.y\n self.dist = sqrt(dx * dx + dy * dy)\n def plannersays(self, dist):\n pt = self.waypoint[0]\n if self.distance < 0.1:\n self.waypoint = self.waypoint[1:]\n if len(self.waypoint) == 0:\n rospy.signal_shutdown('reached all waypoints')\n else:\n rospy.loginfo('reached waypoint (%f,%f)' % pt)\n pt = self.waypoint[0]\n else:\n rospy.loginfo('headed toward (%f,%f)' % pt)\n point = Point()\n point.x, point.y = pt\n self.pub.publish(point)\n rospy.sleep(1.0)\n\nif __name__== '__main__':\n waypoints = []\n waypoints.extend([(float(x[0]), float(x[1])) for x in (line.split() for line in fileinput.input())])\n fileinput.close()\n if len(waypoints) == 0:\n rospy.signal_shutdown('no coordinates provided by input file')\n else:\n mission = MissionControl(waypoints)\n rospy.spin()\n","sub_path":"paveturtle/scripts/mission_control.py","file_name":"mission_control.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337775252","text":"import torch\n\nfrom kotonoha.metrics.accuracy import word_accuracy\n\n\ndef test_word_accuracy():\n # prediction is [[2, 1, 2, 2]] so accuracy should be 2 / 3\n logits = torch.tensor([[[0, 0, 1],\n [0, 1, 0],\n [0, 0, 1],\n [0, 0, 1]]])\n tgt_seq = torch.tensor([[2, 1, 1, 0]])\n\n assert word_accuracy(logits, tgt_seq) == 2 / 3\n\n # this should be same\n logits = torch.tensor([[[0, 0, 1],\n [0, 1, 0],\n [0, 0, 1]]])\n\n assert word_accuracy(logits, tgt_seq) == 2 / 3\n","sub_path":"tests/test_metrics/test_accuracy.py","file_name":"test_accuracy.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173157438","text":"from x64.Release.MikeLearn import NeuralNetwork\nfrom x64.Release.MikeLearn import ClassificationOptimizer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport time\n\n#=======================================================\n# Training Set\n#=======================================================\n\nmodeldata = pickle.load( open( \"mnist_data.p\", \"rb\" ) )\nX = modeldata['X']\nY = modeldata['Y']\n\nX = np.array(X)\nX = X/255\nX = X.tolist()\n\nnIn = len(X[0])\nnOut = len(Y[0])\n\n#=======================================================\n# Model\n#=======================================================\nverbosity = 1\n\nN = NeuralNetwork([nIn,350,350,nOut],['sigmoid','sigmoid','softmax'])\nN.setLoggerVerbosity(verbosity)\n\nOpt = ClassificationOptimizer(N,X,Y)\n\nOpt.setLoggerVerbosity(verbosity)\nstart_time = time.time();\nE = Opt.fit(2,0.1)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n#plt.plot(E)\n#plt.show()\n#================================\n\nmodel_test_data = pickle.load( open( \"mnist_test_data.p\", \"rb\" ) )\nX2 = model_test_data['X']\nY2 = model_test_data['Y']\nprint(len(X2))\nprint(len(Y2))\nX2 = np.array(X2)\nX2 = X2/255\nX2 = X2.tolist()\n\nXL = Opt.predict(X2)\n\nprint(len(XL))\n\ncorrect = 0\nfor i,x in enumerate(XL):\n if XL[i].index(max(XL[i])) == Y[i].index(max(Y2[i])):\n correct = correct + 1\n\nprint(\"Correct = \" + str(correct))\nAccuracy = correct/len(XL)*100;\nprint(\"Accuracy = \" + str(Accuracy))\n\n\n","sub_path":"RunTest5.py","file_name":"RunTest5.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224104752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 10 14:52:33 2016\n\n@author: sichengzhu\n\"\"\"\nimport os\nimport time\nimport cPickle\n\nfrom dpCluster.core.distance.distance import PearsonDistance, ConsineDistance, EuclideanDistance\nfrom dpCluster.api.utils.logger_factory import LoggerFactory\n\nlogger = LoggerFactory.get_logger()\n\nclass DistanceCalculator():\n \n def __init__(self):\n pass\n \n def calculate(self, X, channelId, outputFolderPath, metric='cosine'):\n logger.info('start distance calculation, metric = {0}'.format(\n metric))\n if metric == 'cosine':\n distance_obj = ConsineDistance()\n elif metric == 'euclidean':\n distance_obj = EuclideanDistance()\n elif metric == 'pearson':\n distance_obj = PearsonDistance()\n else:\n raise Exception(\"metric invalid\")\n \n _start = time.time()\n distance_f = []\n for i in xrange(len(X) - 1):\n for j in xrange(i, len(X)):\n distance_f.append((i + 1, j + 1, distance_obj.distance(X[i], X[j])))\n \n logger.info('distance calculation finished in {:.2f} seconds'.format(time.time() - _start))\n# self._saveCache(distance_f, channelId, outputFolderPath)\n return distance_f\n\n def _saveCache(self, distance_f, channelId, outputFolderPath):\n with open(os.path.join(outputFolderPath, \"distance_{0}.txt\".format(channelId)), 'w') as cacheFile:\n cPickle.dump(distance_f, cacheFile)\n\n","sub_path":"cserver/dpCluster/core/distanceCalculator.py","file_name":"distanceCalculator.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317852774","text":"\"\"\" Name : Neil Barot\nCourse : CMPS 1500\nLab Section : Tuesday 2 -3.15 pm\nAssignment : hw9pr2.py\nDate : 11/11/14\n\"\"\"\n\nclass Node:\n def __init__(self, data): #constructer\n self.data = data\n self.next = None\n \n def __str__(self): #overloading str method\n return str(self.data) \n\ndef bstlist(root):\n lst = []\n postorderTrav(root, lst) #creates a regular list of the postorder traversal\n if len(lst) > 0: #converts list to linked list\n head = Node(lst[0]) #sets head as first element of list\n current = head\n for i in range(1, len(lst)): #goes through list and makes it into linked list\n current.next = Node(lst[i])\n current = current.next\n else: #checks if list is empty, and if so, makes head an empty node\n head = Node(None)\n return head #returns the head\n \n\ndef postorderTrav(subtree, lst): #traverses the list following post order -- recursively\n if subtree != None:\n postorderTrav(subtree.left, lst)\n postorderTrav(subtree.right, lst)\n lst += [subtree.data] #appends values to lst\n\n\n","sub_path":"Homework 9/hw9pr2.py","file_name":"hw9pr2.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341865198","text":"from flask import Flask, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom blue_print import users\n\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:mysql@localhost:3306/my_form_db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.register_blueprint(users)\ndb = SQLAlchemy(app)\n\ntb_student_course = db.Table('tb_student_course',\n db.Column('student_id', db.Integer, db.ForeignKey('student.id')),\n db.Column('course_id', db.Integer, db.ForeignKey('course.id')))\n\nclass Student(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(32))\n courses = db.relationship('Course', backref='students', secondary='tb_student_course')\n def __repr__(self):\n return 'Student: %s %s' % (self.name, self.id)\n\n\nclass Course(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n\n def __repr__(self):\n return 'Courses: %s %s ' % (self.name, self.id)\n\n\n@app.route('/')\ndef index():\n\n student_list = Student.query.all()\n\n return render_template('multi2multi.html', student_list=student_list)\n\n\nif __name__ == '__main__':\n db.drop_all()\n db.create_all()\n\n stu1 = Student(name='张三')\n stu2 = Student(name='李四')\n stu3 = Student(name='王五')\n\n cou1 = Course(name='物理')\n cou2 = Course(name='化学')\n cou3 = Course(name='生物')\n\n stu1.courses = [cou2, cou3]\n stu2.courses = [cou2]\n stu3.courses = [cou1, cou2, cou3]\n\n db.session.add_all([stu1, stu2, stu2])\n db.session.add_all([cou1, cou2, cou3])\n\n db.session.commit()\n\n app.run(debug=True)\n","sub_path":"my_flask_blog/multi2multi.py","file_name":"multi2multi.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476590318","text":"import base64\nimport json\n\nfrom hackajob_phone_book_api import API_AUTH, database\nfrom hackajob_phone_book_api.api import app\nfrom hackajob_phone_book_api.models import Entry, Phone, Email, Address\n\nauth = {'Authorization': 'Basic ' + base64.b64encode(bytes(':'.join(API_AUTH), 'ascii')).decode('ascii')}\n\n\nclass TestAuth:\n def test_it_handles_not_authorised_users(self):\n with app.test_client() as c:\n r = c.get('/entries/404')\n assert r.status_code == 401\n\n\nclass TestGet:\n def setup_method(self, method):\n reset_database()\n\n def test_handles_situation_when_entry_does_not_exist(self):\n with app.test_client() as c:\n r = c.get('/entries/404', headers=auth)\n assert r.status_code == 404\n assert json.loads(r.data) == {'err': 'Entity with id \"404\" does not exist.'}\n\n def test_get_one_entry(self):\n with app.test_client() as c:\n r = c.get('/entries/1', headers=auth)\n assert r.status_code == 200\n assert json.loads(r.data) == {'addresses': [{'id': 1, 'value': 'Room 67 \\n14 Tottenham Court Road \\nLondon '\n '\\nEngland\\\\W1T 1JY'}],\n 'emails': [{'id': 1, 'value': 'john.doe@example.com'}],\n 'first_name': 'John', 'id': 1, 'last_name': 'Doe',\n 'phones': [{'id': 1, 'value': '12345678'},\n {'id': 2, 'value': '87654321'}]}\n\n\nclass TestPost:\n def setup_method(self, method):\n reset_database()\n\n def test_no_payload(self):\n with app.test_client() as c:\n r = c.post('/entries', headers=auth)\n assert r.status_code == 400\n assert json.loads(r.data) == {'err': 'Please provide payload'}\n\n def test_creates_entry(self):\n with app.test_client() as c:\n r = c.post('/entries',\n data=json.dumps(\n {'addresses': [{'value': 'Box 777 \\n91 Western Road \\nBrighton \\nEngland\\\\BN1 2NW'}],\n 'emails': [{'value': 'john.rambo@example.com'}],\n 'first_name': 'John', 'last_name': 'Rambo',\n 'phones': [{'value': '999999999'},\n {'value': '111111111'}]}),\n headers=auth)\n assert r.status_code == 200\n assert json.loads(r.data) == {'addresses': [], 'emails': [{'id': 2, 'value': 'john.rambo@example.com'}],\n 'first_name': 'John', 'id': 2, 'last_name': 'Rambo',\n 'phones': [{'id': 3, 'value': '999999999'}, {'id': 4, 'value': '111111111'}]}\n\n\nclass TestDelete:\n def test_it_deletes_entry(self):\n with app.test_client() as c:\n r = c.delete('/entries/1', headers=auth)\n assert r.status_code == 200\n\n r = c.get('/entries/1', headers=auth)\n assert r.status_code == 404\n\n\ndef reset_database():\n tables = (Entry, Phone, Email, Address)\n\n database.drop_tables(tables, safe=True)\n database.create_tables(tables, safe=True)\n\n entry = Entry.create(first_name='John', last_name='Doe')\n Address.create(value='Room 67 \\n14 Tottenham Court Road \\nLondon \\nEngland\\\\W1T 1JY', entry=entry)\n Email.create(value='john.doe@example.com', entry=entry)\n Phone.create(value='12345678', entry=entry)\n Phone.create(value='87654321', entry=entry)\n\n database.close()\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107558626","text":"from wtg import db, ma\nfrom sqlalchemy.dialects.postgresql import JSON\nfrom sqlalchemy.schema import UniqueConstraint\nfrom sqlalchemy import (\n func,\n Column,\n String,\n Integer,\n Float,\n DateTime,\n Boolean)\n\n\nclass CommonColumns(db.Model):\n __abstract__ = True\n created = Column(DateTime, default=func.now())\n updated = Column(DateTime, default=func.now(), onupdate=func.now())\n etag = Column(String(64))\n\nclass User(CommonColumns):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True, autoincrement=True)\n uuid = Column(String(128), unique=True, index=True, nullable=False)\n apns_token = Column(String(128), unique=True)\n upgraded = Column(Boolean, nullable=False)\n is_rest = Column(Boolean, nullable=False)\n rest_start = Column(Integer)\n rest_end = Column(Integer)\n latitude = Column(Float)\n longitude = Column(Float)\n town = Column(String(32))\n region = Column(String(32))\n country = Column(String(32))\n tz_name = Column(String(32))\n tz_abbreviation = Column(String(32))\n units = Column(String(64), nullable=False)\n is_24_hour = Column(Boolean, nullable=False)\n send_event_alerts = Column(Boolean, nullable=False)\n send_summary_alerts = Column(Boolean, nullable=False)\n alert_offset = Column(Integer)\n summary_alert_time = Column(Integer)\n precip_thresh = Column(Float)\n low_temp_thresh = Column(Integer)\n high_temp_thresh = Column(Integer)\n events_list_type = Column(String(16), nullable=False)\n\n\n\nclass Event(CommonColumns):\n __tablename__ = 'events'\n id = Column(Integer, primary_key=True, autoincrement=True)\n app_id = Column(String(128), index=True, nullable=False) #unique per user\n user_uuid = Column(String(128), index=True, nullable=False)\n __table_args__ = (UniqueConstraint('user_uuid', 'app_id', name='_user_uuid_app_id'),)\n title = Column(String(64), nullable=False)\n is_multiday = Column(Boolean, nullable=False) #could be true for two-hour event if on two days\n start = Column(DateTime, nullable=False)\n end = Column(DateTime, nullable=False)\n pretty_date_range = Column(String(64), nullable=False)\n rest_segments = Column(JSON)\n latitude = Column(Float)\n longitude = Column(Float)\n town = Column(String(32))\n region = Column(String(32))\n country = Column(String(32))\n tz_name = Column(String(32))\n tz_abbreviation = Column(String(32))\n alert_time = Column(DateTime, index=True)\n alert_checked = Column(Boolean)\n summary_checked = Column(Boolean)\n\nclass UserSchema(ma.ModelSchema):\n class Meta:\n model = User\n\nclass EventSchema(ma.ModelSchema):\n class Meta:\n model = Event\n","sub_path":"wtg/db_tables.py","file_name":"db_tables.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400178577","text":"from huobi.model.constant import *\r\n\r\n\r\nclass CompleteSubAccountInfo:\r\n \"\"\"\r\n Sub-account completed info\r\n\r\n :member\r\n id: The sub-id.\r\n account_type: The sub account type.\r\n balances: The balance list, the content is Balance class.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.id = 0\r\n self.account_type = AccountType.INVALID\r\n self.balances = list()\r\n","sub_path":"build/lib/huobi/model/completesubaccountinfo.py","file_name":"completesubaccountinfo.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173070437","text":"# -*- coding: utf-8 -*-\n# © <2016> \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n# 1 : imports of python lib\nimport locale\nimport logging\nimport sys \nfrom openerp import models, fields, api\nfrom openerp import SUPERUSER_ID\nfrom openerp import tools\nfrom openerp.osv import osv\nfrom openerp.tools.translate import _\n\n\nreload(sys) \nsys.setdefaultencoding('utf8')\n_logger = logging.getLogger(__name__)\n\n## Documentation for a class.\n# Modificaciones al catálogo de empleados para el funcionamiento del módulo de reclutamiento y selección de personal.\n# Sobrescribiendo el modelo hr.employee\nclass hr_employee_gi(models.Model):\n _inherit = 'hr.employee'\n \n job_id = fields.Many2one('hr.job', 'Puesto vacante', ondelete='cascade')\n department_id = fields.Many2one(related='job_id.department_id', string=\"Árbol de jerarquía\", readonly=True, store=True)\n empl_temp = fields.Boolean('Empleado temporal')\n\n\n\n ## Documentation for a function.\n # Esta función comprueba la cantidad de puestos autorizados \n @api.onchange('job_id')\n def onchange_job_id_num_aut_emplo(self):\n num_empl_allowed = self.job_id.x_num_aut_emplo\n num_empl_current = self.job_id.no_of_employee_cur\n x_tem_emplo = self.job_id.x_tem_emplo\n\n if self.active:\n if self.job_id.id != False:\n if num_empl_current == num_empl_allowed:\n\n if not self.empl_temp:\n if x_tem_emplo > 0:\n self.job_id = None\n return {'value':{},'warning':{'title':'Advertencia','message':'Solo hay plazas temporales para este puesto, si se desea agregar un puesto temporal activa la casilla \"Empleado temporal\" y vuelve a intentarlo, si el empleado no es temporal comunícate con el gerente de recursos humanos.'}}\n else:\n self.job_id = None\n return {'value':{},'warning':{'title':'Advertencia','message':'No puedes seleccionar este puesto por que todas las vacantes ya estan ocupadas'}}\n\n\n if self.empl_temp:\n _logger.warning(num_empl_allowed + x_tem_emplo)\n if num_empl_current >= num_empl_allowed + x_tem_emplo:\n self.job_id = None\n return {'value':{},'warning':{'title':'Advertencia','message':'No puedes seleccionar este puesto por que todas las vacantes ya estan ocupadas'}}\n\n\n","sub_path":"hr_recruitment_gi/models/hr_employee_gi.py","file_name":"hr_employee_gi.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2420619","text":"import discord\r\nfrom discord import client\r\nfrom discord.ext import commands\r\nimport config \r\nfrom config import token\r\nimport asyncio\r\nimport os\r\n\r\nclient = commands.Bot(command_prefix= '?')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for file in os.listdir(\"./commands\"):\r\n if file.endswith(\".py\"):\r\n extension = file[:-3]\r\n try:\r\n client.load_extension(f\"commands.{extension}\")\r\n print(f\"Loaded extension '{extension}'\")\r\n except Exception as e:\r\n exception = f\"{type(e).__name__}: {e}\"\r\n print(f\"Failed to load extension {extension}\\n{exception}\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(client.guilds)} servers | ?help'))\r\n print(\"bot is online\")\r\n\r\n\r\n\r\n@client.event\r\nasync def on_guild_join(guild):\r\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(client.guilds)} servers | ?help'))\r\n@client.event\r\nasync def on_guild_remove(guild):\r\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(client.guilds)} servers | ?help'))\r\n\r\n\r\n\r\nclient.run(token)\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148024269","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright @ 2014 OPS,\n#\n# Author: tingfang.bao\n# DateTime: 15-3-5 下午4:50\nimport StringIO\nimport mimetypes\n\nfrom flask import Flask, Response\nfrom werkzeug.datastructures import Headers\nimport xlwt\n\napp = Flask(__name__)\n\n\n@app.route(\"/export/\")\ndef export_view():\n \"\"\"\n 显示了动态生成excel文件,返回给response流\n\n :return:\n \"\"\"\n response = Response()\n response.status_code = 200\n\n workbook = xlwt.Workbook(encoding='utf-8')\n sheet = workbook.add_sheet(\"sheet1\", cell_overwrite_ok=True)\n sheet.write(1, 1, u'测试')\n\n # 关键是下面这两行\n output = StringIO.StringIO()\n workbook.save(output)\n\n response.data = output.getvalue()\n\n file_name = 'data.xls'\n mimetype_tuple = mimetypes.guess_type(file_name)\n\n response_headers = Headers({\n 'Pragma': \"public\", # required,\n 'Expires': '0',\n 'Cache-Control': 'must-revalidate, post-check=0, pre-check=0,private',\n # 'Cache-Control': 'private', # required for certain browsers,\n 'Content-Type': mimetype_tuple[0],\n 'Content-Disposition': 'attachment; filename=\\\"%s\\\";' % file_name,\n 'Content-Transfer-Encoding': 'binary',\n 'Content-Length': len(response.data)\n })\n\n if not mimetype_tuple[1] is None:\n response_headers.update({\n 'Content-Encoding': mimetype_tuple[1]\n })\n\n response.headers = response_headers\n response.set_cookie('fileDownload', 'true', path='/')\n\n return response\n\n\n@app.route(\"/binary/\")\ndef export_binary_file():\n \"\"\"\n 显示了读取一个二进制的文件,然后重定向到response流\n\n :return:\n \"\"\"\n response = Response()\n response.status_code = 200\n\n f = open(\"app.py\", 'rb')\n\n file_name = 'app.py'\n response.data = f.read()\n f.close()\n mimetype_tuple = mimetypes.guess_type(file_name)\n\n response_headers = Headers({\n 'Pragma': \"public\", # required,\n 'Expires': '0',\n 'Cache-Control': 'must-revalidate, post-check=0, pre-check=0,private',\n # 'Cache-Control': 'private', # required for certain browsers,\n 'Content-Type': mimetype_tuple[0],\n 'Content-Disposition': 'attachment; filename=\\\"%s\\\";' % file_name,\n 'Content-Transfer-Encoding': 'binary',\n 'Content-Length': len(response.data)\n })\n\n if not mimetype_tuple[1] is None:\n response_headers.update({\n 'Content-Encoding': mimetype_tuple[1]\n })\n\n response.headers = response_headers\n response.set_cookie('fileDownload', 'true', path='/')\n\n return response\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host=\"0.0.0.0\", port=8080)","sub_path":"basic/download_file/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276030665","text":"# Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University.\n# All Rights Reserved.\n\nfrom openks.loaders import loader_config, SourceType, FileType, GraphLoader\nfrom openks.models import OpenKSModel\n# from py2neo import Graph\n\n''' 图谱载入与图谱数据结构生成 '''\n# 载入参数配置与数据集载入\nloader_config.source_type = SourceType.LOCAL_FILE\nloader_config.file_type = FileType.OPENKS\n# loader_config.source_type = SourceType.NEO4J\n# graph_db = Graph(host='127.0.0.1', http_port=7474, user='neo4j', password='123456')\n# loader_config.graph_db = graph_db\nloader_config.source_uris = 'openks/data/company-kg'\n# loader_config.source_uris = 'openks/data/FB15k-237'\n# loader_config.source_uris = 'openks/data/medical-kg'\nloader_config.data_name = 'my-data-set'\n# 图谱数据结构载入\ngraph_loader = GraphLoader(loader_config)\ngraph = graph_loader.graph\ngraph.info_display()\n''' 图谱表示学习模型训练 '''\n# 列出已加载模型\nOpenKSModel.list_modules()\n# 算法模型选择配置\nargs = {\n\t'gpu': True,\n\t'learning_rate': 0.001,\n\t'epoch': 500,\n\t'batch_size': 1024,\n\t'optimizer': 'adam',\n\t'hidden_size': 500,\n\t'margin': 4.0,\n\t'model_dir': './',\n\t'eval_freq': 20,\n\t'gamma': 12.0,\n\t'epsilon': 2.0\n}\nplatform = 'PyTorch'\nexecutor = 'KGLearn_Dy'\nmodel = 'DyE'\nargs['model_dir'] = model+'.pt'\nprint(\"根据配置,使用 {} 框架,{} 执行器训练 {} 模型。\".format(platform, executor, model))\nprint(\"-----------------------------------------------\")\n# 模型训练\nexecutor = OpenKSModel.get_module(platform, executor)\nprint('--')\nprint(OpenKSModel.get_module(platform, model))\nkglearn = executor(graph=graph, model=OpenKSModel.get_module(platform, model), args=args)\nkglearn.run()\nprint(\"-----------------------------------------------\")\n","sub_path":"examples/example_kg_dynamic_train.py","file_name":"example_kg_dynamic_train.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"15801617","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nimport glob, os\n\nAUTHOR = 'serge-sans-paille and other pythraners'\nSITENAME = 'Pythran stories'\nSITEURL = 'http://serge-sans-paille.github.io/pythran-stories'\n\nPATH = 'content'\nSTATIC_PATHS = ['notebooks', 'images'] + [os.path.basename(p) for p in glob.glob(os.path.join(PATH, \"*_files\"))]\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pythran Doc', 'https://pythran.readthedocs.io'),\n ('Pythran on PyPI', 'https://pypi.python.org/pypi/pythran'),\n )\n\n# Social widget\nSOCIAL = (('github', 'https://github.com/serge-sans-paille/pythran'),\n )\n\nDEFAULT_PAGINATION = 10\nRELATIVE_URLS = True\nDELETE_OUTPUT_DIRECTORY = True\n\nTHEME= 'bootstrap2'\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77121006","text":"import csv\n\n# Чтение необработанных данных из csv\nrawData = csv.reader(open('sample.csv', 'rb'), dialect='excel')\n\n# шаблон. где данные из csv будут отформатированы к geojson\ntemplate = \\\n ''' \\\n { \"type\" : \"Feature\",\n \"id\" : %s,\n \"geometry\" : {\n \"type\" : \"Point\",\n \"coordinates\" : [\"%s\",\"%s\"]},\n \"properties\" : { \"name\" : \"%s\", \"value\" : \"%s\"}\n },\n '''\n\n# голова geojson файла\noutput = \\\n ''' \\\n{ \"type\" : \"Feature Collection\",\n {\"features\" : [\n '''\n\n# цикл через csv строкой, пропускающий первое\niter = 0\nfor row in rawData:\n iter += 1\n if iter >= 2:\n id = row[0]\n lat = row[1]\n lon = row[2]\n name = row[3]\n pop = row[4]\n output += template % (row[0], row[1], row[2], row[3], row[4])\n\n# хвост geojson файла\noutput += \\\n ''' \\\n ]\n}\n '''\n\n# открывает файл geoJSON, чтобы записать вывод в файл output.geojson\noutFileHandle = open(\"output.geojson\", \"w\")\noutFileHandle.write(output)\noutFileHandle.close()\n","sub_path":"csvToGeoJSON.py","file_name":"csvToGeoJSON.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"504290001","text":"from com.lufaxin.stock.mapper.property.DbColumn import DbColumn\n\n\nclass StockKnnDailyCrossValidationLogCfg():\n __cols = [\n DbColumn(\"ID\", int.__name__, True),\n DbColumn(\"MID\"),\n DbColumn(\"KNNMID\"),\n DbColumn(\"KNNITEMID\"),\n DbColumn(\"YBTS\"),\n DbColumn(\"YZSJ\"),\n DbColumn(\"YZSJ_INT\"),\n DbColumn(\"YBSL\"),\n DbColumn(\"YZSL\"),\n DbColumn(\"CGSL\"),\n DbColumn(\"SBSL\"),\n DbColumn(\"ZZSL\"),\n DbColumn(\"ZFSL\"),\n DbColumn(\"FFSL\"),\n DbColumn(\"FZSL\")\n ]\n\n __name = \"t_stock_knn_daily_cross_validation_log\"\n\n def get_name(self):\n return self.__name\n\n def get_cols(self):\n return self.__cols\n","sub_path":"com/lufaxin/stock/mapper/cfg/StockKnnDailyCrossValidationLogCfg.py","file_name":"StockKnnDailyCrossValidationLogCfg.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530813007","text":"from queue import Queue\n\n\nROOT = \"root\"\n# Implementando uma Árvore Binária: https://youtu.be/6E169kShoNU\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def __str__(self):\n return str(self.data)\n\nclass BinaryTree:\n def __init__(self, data=None, node=None):\n if node:\n self.root = node\n elif data:\n node = Node(data)\n self.root = node\n else:\n self.root = None\n\n # Percurso em ordem simétrica (o correto é \"inorder\" em inglês)\n def simetric_traversal(self, node=None):\n if node is None:\n node = self.root\n if node.left:\n # parênteses são específicos para o nosso exemplo,\n # um percurso em ordem simétrica não precisa deles\n print('(', end='') \n self.simetric_traversal(node.left)\n print(node, end='')\n if node.right:\n self.simetric_traversal(node.right)\n print(')', end='')\n \n # Percurso em PÓS ORDEM em ÁRVORE BINÁRIA: https://youtu.be/QC8oiQnlYos\n def postorder_traversal(self, node=None):\n if node is None:\n node = self.root\n if node.left:\n self.postorder_traversal(node.left)\n if node.right:\n self.postorder_traversal(node.right)\n print(node)\n \n def height(self, node=None):\n if node is None:\n node = self.root\n hleft = 0\n hright = 0\n if node.left:\n hleft = self.height(node.left)\n if node.right:\n hright = self.height(node.right)\n if hright > hleft:\n return hright + 1\n return hleft + 1\n\n def inorder_traversal(self, node=None):\n if node is None:\n node = self.root\n if node.left:\n self.inorder_traversal(node.left)\n print(node, end=' ')\n if node.right:\n self.inorder_traversal(node.right)\n\n # Percurso em Nível em Árvore Binária: https://youtu.be/UOK7nS2E9xM\n def levelorder_traversal(self, node=ROOT):\n if node == ROOT:\n node = self.root\n\n queue = Queue()\n queue.push(node)\n while len(queue):\n node = queue.pop()\n if node.left:\n queue.push(node.left)\n if node.right:\n queue.push(node.right)\n print(node, end=\" \")\n\n# Árvore Binária de Busca: https://youtu.be/rviJVdt_icw\nclass BinarySearchTree(BinaryTree):\n \n def insert(self, value):\n parent = None\n x = self.root\n while(x):\n parent = x\n if value < x.data:\n x = x.left\n else:\n x = x.right\n if parent is None:\n self.root = Node(value)\n elif value < parent.data:\n parent.left = Node(value)\n else:\n parent.right = Node(value)\n\n def search(self, value):\n return self._search(value, self.root)\n\n def _search(self, value, node):\n if node is None:\n return node\n if node.data == value:\n return BinarySearchTree(node)\n if value < node.data:\n return self._search(value, node.left)\n return self._search(value, node.right)\n\nif __name__ == \"__main__\":\n # tree = BinaryTree(7)\n # tree.root.left = Node(18)\n # tree.root.right = Node(14)\n\n # print(tree.root)\n # print(tree.root.right)\n # print(tree.root.left)\n\n tree = BinaryTree()\n n1 = Node('a')\n n2 = Node('+')\n n3 = Node('*')\n n4 = Node('b')\n n5 = Node('-')\n n6 = Node('/')\n n7 = Node('c')\n n8 = Node('d')\n n9 = Node('e')\n\n n6.left = n7\n n6.right = n8\n n5.left = n6\n n5.right = n9\n n3.left = n4\n n3.right = n5\n n2.left = n1\n n2.right = n3\n \n tree.root = n2\n\n tree.inorder_traversal()\n","sub_path":"test/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483018056","text":"import numpy as np\n\n\ndef solver(matrix):\n if (matrix.shape[0] > 1) and (matrix.shape[1] > 1):\n if abs(matrix[0, 0]) < 0.000001:\n for i in range(1, matrix.shape[0]):\n if abs(matrix[i, 0]) > 0.000001:\n temp = matrix[0, :]\n temp = list(temp)\n matrix[0, :] = matrix[i, :]\n matrix[i, :] = np.array(temp)\n return solver(matrix)\n ans, roots = solver(matrix[1:, 1:])\n if ans == 'NO':\n return ans, []\n else:\n return 'INF', []\n else:\n for i in range(1, matrix.shape[0]):\n matrix[i, :] -= matrix[0, :]*(matrix[i, 0]/matrix[0, 0])\n ans, roots = solver(matrix[1:, 1:])\n if ans == 'NO':\n return ans, []\n elif ans == 'INF':\n return ans, []\n else:\n matrix[0, :] = matrix[0, :]/matrix[0, 0]\n root1 = matrix[0, matrix.shape[1]-1]\n for i in range(1, matrix.shape[1]-1):\n root1 -= roots[i-1]*matrix[0, i]\n return ans, [root1] + roots\n else:\n if matrix.shape[1] == 1:\n for i in range(0, matrix.shape[0]):\n if abs(matrix[i, 0]) > 0.0000001:\n return 'NO', []\n\n return 'YES', []\n elif matrix.shape[0] == 1:\n if matrix.shape[1] == 2:\n if abs(matrix[0, 0]) > 0.000001:\n return 'YES', [matrix[0, 1]/matrix[0, 0], ]\n elif abs(matrix[0, 1]) > 0.000001:\n return 'NO', []\n else:\n return 'INF', []\n else:\n for i in range(0, matrix.shape[1]-1):\n if abs(matrix[0, i]) > 0.0000001:\n return 'INF', []\n if abs(matrix[0, matrix.shape[1]-1]) > 0.000001:\n return 'NO', []\n else:\n return 'INF', []\n return ans, matrix\n\n\ndef estimator(matrix):\n f = np.copy(matrix[:, -1])\n\n X = np.copy(matrix[:, :-1])\n mt1 = X.T.dot(X)\n f1 = X.T.dot(f)\n ans = solver(np.hstack((mt1, f1.reshape(-1,1))))\n return ans[1]\n\n\ndef data_handler():\n data = input()\n n = int(data.split(' ')[0].strip())\n m = int(data.split(' ')[1].strip())\n matrix = []\n for i in range(n):\n data1 = input()\n try:\n data_spl = list(map(lambda x: float(x.strip()), data1.strip().split(' ')))\n except ValueError:\n raise ValueError(data1.split(' '))\n matrix.append(data_spl)\n matrix1 = np.array(matrix)\n return estimator(matrix1)\n\nresult = data_handler()\nprint(*result)\n","sub_path":"Math/least_squares.py","file_name":"least_squares.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496391647","text":"import logging\nimport logging.handlers\n\n\n__all__ = 'logger', 'handler'\n\nlogging.basicConfig(\n format='[%(asctime)s|%(levelname)s]'\n '%(name)s:%(filename)s:%(module)s:%(lineno)d - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n)\nlogger = logging.getLogger('hikikomori')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.handlers.RotatingFileHandler('hikikomori.log')\nlogger.addHandler(handler)\n","sub_path":"hikikomori/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"136599631","text":"import os\nimport sys\nimport spacy\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\n\n# ROOT FOLDER : Make things easier setting the root folder as the origin\n#root_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n#sys.path.insert(0, root_path)\n\nclass NLUCore:\n \"\"\"\n Core NLU for intent predictions.\n \"\"\"\n\n def __init__(self, db_name='data.csv'):\n self.root_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n self.db_path = f'{self.root_path}/NLI_Project/data/{db_name}'\n self.classifier = None\n self.classes = None\n self.label_encoder = None\n self.spacy_nlp = spacy.load('en_core_web_lg')\n self.extracted_entities = None\n self.extracted_pos_dobj = None\n\n X, y = self.__pre_process_data()\n self.__initialize_classifier(X, y)\n\n def __pre_process_data(self):\n \"\"\"\n Obtains X and y data from a data frame. Encodes label and data as numpy values.\n :return:\n \"\"\"\n df = pd.read_csv(self.db_path)\n\n X_as_text = df['text']\n y_as_text = df['intent']\n\n # Tokenize the words for TRAIN\n n_queries = len(X_as_text)\n dim_embedding = self.spacy_nlp.vocab.vectors_length\n X = np.zeros((n_queries, dim_embedding))\n\n for idx, sentence in enumerate(X_as_text):\n doc = self.spacy_nlp(str(sentence))\n X[idx, :] = doc.vector\n\n # Encode labels\n le = LabelEncoder()\n y = le.fit_transform(y_as_text)\n\n self.classes = le.classes_\n self.label_encoder = le\n\n return X, y\n\n def __initialize_classifier(self, X, y):\n \"\"\"\n Initializes and trains a multi-class logistic classifier to fit any of the intents in the training data.\n :param X: Vector embeddings representing the sentences belonging to a particular class intent.\n For example: \"Hi!\" belongs to \"greeting\" intent.\n :param y: Labels encoded for the different class intents.\n :return: Initializes internally the logistic classifier.\n \"\"\"\n classifier = LogisticRegression(random_state=0).fit(X, y)\n self.classifier = classifier\n\n def __extract_entities(self, text, prediction):\n \"\"\"\n Entity extraction for the user utterances, needed to complete the slots details\n :param text: The user utterance in need to be parsed\n :param prediction: Prediction of the utterance type, in case we need to perform extra steps to detect properly\n entities for a particular utterance type\n :return: A list containing all the entities recognized, set to the class proeprties\n \"\"\"\n doc = self.spacy_nlp(text)\n #for ent in doc.ents:\n #print(ent, ent.label_)\n\n pos_tags_or_dobj = []\n for token in doc:\n #print(token.pos_, token.dep_)\n if token.pos_ == 'ADJ' or token.dep_ == 'dobj' or token.pos_ == 'NOUN':\n pos_tags_or_dobj.append(token.text)\n\n self.extracted_entities = doc.ents\n self.extracted_pos_dobj = pos_tags_or_dobj\n\n def predict_intent(self, text):\n \"\"\"\n Prediction made over a particular sentence provided by the user. For instance: 'I`m looking for a restaurant'\n would predict a 'restaurant_search' intent, so the DM module would know how to react properly.\n :param text: Sentence provided by the user.\n :return: Best class label prediction and a vector with probabilities for all classes.\n \"\"\"\n word_vector = self.spacy_nlp(text).vector\n\n enc_prediction = self.classifier.predict(np.array(word_vector).reshape(1, -1))\n probabilities = self.classifier.predict_proba(np.array(word_vector).reshape(1, -1))\n\n prediction = self.label_encoder.inverse_transform([enc_prediction])\n self.__extract_entities(text, prediction[0])\n\n return prediction[0], probabilities[0]\n","sub_path":"nlu_core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638937772","text":"\"\"\"\nCreated on Mon Apr 22 15:31:18 2019\n\n@author: zijun\n\"\"\"\n\nimport os\nimport numpy as np\nfrom d3m import container\nfrom collections import OrderedDict\nfrom d3m import container, utils\nfrom common_primitives import utils as comUtils\nfrom d3m.metadata import base as metadata_base\nfrom d3m import metrics\nfrom common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive\nfrom common_primitives import ndarray_to_dataframe\nfrom common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive\nfrom common_primitives.column_parser import ColumnParserPrimitive\nfrom common_primitives.unseen_label_encoder import UnseenLabelEncoderPrimitive\nfrom common_primitives.unseen_label_decoder import UnseenLabelDecoderPrimitive\nfrom common_primitives import dataset_remove_columns\nfrom common_primitives import construct_predictions\nfrom common_primitives import compute_scores\nfrom common_primitives import extract_columns_semantic_types, random_forest, column_parser, utils\nimport pandas as pd\nfrom sklearn.metrics import f1_score\n\nfrom rpi_d3m_primitives.JMIplus_auto import JMIplus_auto\n\nimport d3m.primitives.data_cleaning.imputer as Imputer\nimport d3m.primitives.classification.random_forest as RF\n\n# Classification\ndataset_name = '57_hypothyroid'#remove column 15,19, uniform only\n\n\nprint('\\nLoad Dataset') \npath = os.path.join('/home/zijun/Dropbox/Project/DARPA-D3M-project/D3Mdatasets-phase1/', dataset_name,'TRAIN/dataset_TRAIN/datasetDoc.json')\ndataset = container.Dataset.load('file://{uri}'.format(uri=path))\n\ntarget_index = dataset.metadata.query(('learningData', metadata_base.ALL_ELEMENTS))['dimension']['length']-1\ndataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')\ndataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')\ndataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')\n\nprint('\\nRemove Columns')\nremove_columns_hyperparams_class = dataset_remove_columns.RemoveColumnsPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nhp = remove_columns_hyperparams_class({'columns': [15,19], 'resource_id': 'learningData'})\nremove_columns_primitive = dataset_remove_columns.RemoveColumnsPrimitive(hyperparams=hp)\ndataset = remove_columns_primitive.produce(inputs=dataset).value\n\nprint('\\nDataset to Dataframe')\nhyperparams_class = DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = DatasetToDataFramePrimitive(hyperparams=hyperparams_class.defaults())\ncall_metadata = primitive.produce(inputs=dataset)\ndataframe = call_metadata.value\n\nprint('\\nColumn Parser')\nhyperparams_class = column_parser.ColumnParserPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = column_parser.ColumnParserPrimitive(hyperparams=hyperparams_class.defaults())\ndataframe = primitive.produce(inputs=dataframe).value\n\nprint('\\nExtract Attributes')\nhyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute']}))\ncall_metadata = primitive.produce(inputs=dataframe)\ntrainD = call_metadata.value\n\n\nprint('\\nExtract Targets')\nhyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types':['https://metadata.datadrivendiscovery.org/types/SuggestedTarget']}))\ncall_metadata = primitive.produce(inputs=dataframe)\ntrainL = call_metadata.value\n\nprint ('\\nLoad testing dataset') \npath = os.path.join('/home/zijun/Dropbox/Project/DARPA-D3M-project/D3Mdatasets-phase1/', dataset_name,'TEST/dataset_TEST/datasetDoc.json')\ndataset = container.Dataset.load('file://{uri}'.format(uri=path))\n\ndataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')\ndataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')\ndataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')\n\n##*************************\nprint('\\nRemove Column')\ndataset = remove_columns_primitive.produce(inputs=dataset).value\n##***************************\n\nprint('\\nDataset to Dataframe')\nhyperparams_class = DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = DatasetToDataFramePrimitive(hyperparams=hyperparams_class.defaults())\ncall_metadata = primitive.produce(inputs=dataset)\ndataframe = call_metadata.value\n\nprint('\\nColumn Parser')\nhyperparams_class = column_parser.ColumnParserPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = column_parser.ColumnParserPrimitive(hyperparams=hyperparams_class.defaults())\ndataframe = primitive.produce(inputs=dataframe).value\n\nprint('\\nExtract Attributes')\nhyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute']}))\ncall_metadata = primitive.produce(inputs=dataframe)\ntestD = call_metadata.value\n\nprint('\\nExtract Suggested Target')\nhyperparams_class = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nprimitive = extract_columns_semantic_types.ExtractColumnsBySemanticTypesPrimitive(hyperparams=hyperparams_class.defaults().replace({'semantic_types': ['https://metadata.datadrivendiscovery.org/types/SuggestedTarget']}))\ncall_metadata = primitive.produce(inputs=dataframe)\ntestL = call_metadata.value\n\nprint('\\nGet Target Name')\ncolumn_metadata = testL.metadata.query((metadata_base.ALL_ELEMENTS, 0))\nTargetName = column_metadata.get('name',[])\n\nprint('JMI feature selection')\nnbins = 2 #nbins\nhyperparams_class = JMIplus_auto.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nFSmodel = JMIplus_auto(hyperparams=hyperparams_class.defaults().replace({'nbins':nbins}))\nFSmodel.set_training_data(inputs=trainD, outputs=trainL) \nFSmodel.fit()\nprint('\\nSelected Feature Index')\nprint(FSmodel._index)\nprint('\\n')\ntrainD = FSmodel.produce(inputs=trainD) \ntrainD = trainD.value\n\nprint('\\nSubset of testing data')\ntestD = FSmodel.produce(inputs=testD)\ntestD = testD.value\n\n##================================================================================\nprint('\\nImpute trainD')\nhyperparams_class = Imputer.SKlearn.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nImputer_primitive = Imputer.SKlearn(hyperparams=hyperparams_class.defaults().replace({'strategy':'most_frequent'}))\nImputer_primitive.set_training_data(inputs=trainD)\nImputer_primitive.fit()\ntrainD = Imputer_primitive.produce(inputs=trainD).value\n\nprint('\\nImpute testD')\ntestD = Imputer_primitive.produce(inputs=testD).value\n \n##===============================================================================================\nprint('\\nRandom Forest')\nn_estimators = 24\nhyperparams_class = RF.SKlearn.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nRF_primitive = RF.SKlearn(hyperparams=hyperparams_class.defaults().replace({'n_estimators':n_estimators}))\nRF_primitive.set_training_data(inputs=trainD, outputs=trainL)\nRF_primitive.fit()\npredictedTargets = RF_primitive.produce(inputs=testD)\npredictedTargets = predictedTargets.value\n\n\n##================================================================================================\nprint('\\nConstruct Predictions')\nhyperparams_class = construct_predictions.ConstructPredictionsPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nconstruct_primitive = construct_predictions.ConstructPredictionsPrimitive(hyperparams=hyperparams_class.defaults())\ncall_metadata = construct_primitive.produce(inputs=predictedTargets, reference=dataframe)\ndataframe = call_metadata.value\n\nprint('\\ncompute scores')\npath = os.path.join('/home/zijun/Dropbox/Project/DARPA-D3M-project/D3Mdatasets-phase1/', dataset_name, 'SCORE/dataset_TEST/datasetDoc.json')\ndataset = container.Dataset.load('file://{uri}'.format(uri=path))\n\ndataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')\ndataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')\n\nhyperparams_class = compute_scores.ComputeScoresPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\nmetrics_class = hyperparams_class.configuration['metrics'].elements\nprimitive = compute_scores.ComputeScoresPrimitive(hyperparams=hyperparams_class.defaults().replace({\n 'metrics': [metrics_class({\n 'metric': 'F1_MACRO',\n 'pos_label': None,\n 'k': None,\n })],\n }))\nscores = primitive.produce(inputs=dataframe, score_dataset=dataset).value\n\n\nprint(scores)\n#F1_MACRO 0.986754\n\n#print('\\nSave file')\n#os.mkdir('/output/predictions/e7239570-bb9d-464b-aa5b-a0f7be958dc0')\n#output_path = os.path.join('/output','predictions','e7239570-bb9d-464b-aa5b-a0f7be958dc0','predictions.csv')\n#with open(output_path, 'w') as outputFile:\n# dataframe.to_csv(outputFile, index=False,columns=['d3mIndex', TargetName])\n","sub_path":"executable-examples/exec-JMI-RF.py","file_name":"exec-JMI-RF.py","file_ext":"py","file_size_in_byte":10518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142248564","text":"\"\"\"\nThe prime factors of 13195 are 5, 7, 13 and 29.\n\nWhat is the largest prime factor of the number 600851475143 ?\n\"\"\"\n\n\ndef prime_numbers(number):\n\tprimes = []\n\ti=2\n\twhile number>i:\n\t\tif number%i:\n\t\t\ti+=1\n\n\t\telse:\n\t\t\tnumber//=i #Floor division -> Lower number \n\t\t\tprimes.append(i)\n\n\tif number>1:\n\t\tprimes.append(number)\n\n\n\tprint(\"El número primo factor más grande es\", number)\n\n\n\nprime_numbers(600851475143)\n\n\n\n\n\n\n\n\n","sub_path":"euler3.py","file_name":"euler3.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416640442","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 8 16:10:04 2019\n\n@author: yonic\n\"\"\"\nimport glob\nimport os\nimport pickle\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nfrom speaker_embed import networks\nfrom database import tedlium\nfrom tacotron2.layers import TacotronSTFT\n\n\nSFT_CONFIG={ \n \"sampling_rate\": 16000,\n \"filter_length\": 400,\n \"hop_length\": 160,\n \"win_length\": 400,\n \"mel_fmin\": 0.0,\n \"mel_fmax\": 8000.0}\n\nMAX_WAV_VALUE = 32768.0\nEMBEDDING_SIZE = 512\n#TEDIUM_NUM_OF_CLASSES = 774\nTEDIUM_NUM_OF_CLASSES = 140\nMAX_DATA_CHUNK = 11\nMAX_TRAIN_EPOCH = 1000\n\nclass EmbeddingNetClassifier(networks.EmbeddingNet):\n def __init__(self,num_of_clusses):\n super(EmbeddingNetClassifier, self).__init__()\n self.logits = networks.EmbeddingNet() \n self.fc = nn.Linear(EMBEDDING_SIZE,num_of_clusses)\n \n def forward(self, x): \n x = self.logits(x)\n x = self.fc(x)\n return F.log_softmax(x, dim=1) \n \n\ndef data_generator(data_base,chunk_length_in_sec,label_order_list): \n stft = TacotronSTFT(**SFT_CONFIG)\n keys = data_base.get_db_keys() \n batch_train_x = []\n batch_train_y = [] \n \n while True: \n for k in keys: \n label = np.array([label_order_list.index(k)]) \n for t in data_base.get_db_wv_times(k): \n sampling_rate, speech = data_base.get_wav(k,*t)\n chunks = int(len(speech)/sampling_rate/chunk_length_in_sec)\n audio_length = sampling_rate*chunk_length_in_sec \n for chunk in range(chunks): \n audio = speech[chunk*audio_length:(chunk+1)*audio_length]\n audio_norm = audio / MAX_WAV_VALUE\n audio_norm = torch.from_numpy(audio_norm).float()\n audio_norm = audio_norm.unsqueeze(0)\n audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)\n melspec = stft.mel_spectrogram(audio_norm)\n mel_np = melspec.detach().numpy()\n for i in range(mel_np.shape[1]):\n channel_mean = np.mean(mel_np[0,i,:]) \n mel_np[0,i,:] = mel_np[0,i,:] - channel_mean \n \n batch_train_x.append(mel_np)\n batch_train_y.append(label)\n if len(batch_train_x) >0 and len(batch_train_y)>0:\n yield np.array(batch_train_x),np.concatenate(np.array(batch_train_y))\n else:\n yield np.array([]),np.array([])\n \n batch_train_x = []\n batch_train_y = [] \n \n yield None,None\n \n \n'''Generate train matrix, take labels that have samples more than threshold\n'''\ndef merger_train(threshold):\n labels = []\n H = pickle.load(open('./data/meta.bin','rb')) \n for k in H.keys():\n if H[k]>threshold:\n labels.append(k)\n \n X_train = []\n Y_train = []\n X_val = []\n Y_val = []\n \n index = 0\n for l in labels: \n fname = './data/data_x_{}.npy'.format(l)\n x = np.load(fname) \n \n fname = './data/data_y_{}.npy'.format(l)\n y = np.load(fname)\n \n index = list(range(len(x)))\n np.random.shuffle(index)\n x = x[index]\n y = y[index]\n \n X_train.append(x[0:-1])\n Y_train.append(y[0:-1])\n X_val.append(x[-1])\n Y_val.append(y[-1])\n print('Append label:{}'.format(l)) \n \n \n X_train = np.concatenate(X_train)\n X_val = np.concatenate(X_val)\n Y_train = np.concatenate(Y_train)\n Y_val = np.array(Y_val)\n print('X_train shape:',X_train.shape)\n print('X_val shape:',X_val.shape)\n \n index = list(range(len(X_train)))\n np.random.shuffle(index)\n \n X_train = X_train[index]\n Y_train = Y_train[index]\n \n fname = './data/x_train_merged'\n np.save(fname,X_train)\n \n fname = './data/x_val_merged'\n np.save(fname,X_val) \n \n fname = './data/y_train_merged'\n np.save(fname,Y_train)\n \n fname = './data/y_val_merged'\n np.save(fname,Y_val)\n \n \n'''Generate train matrix, take labels that have samples more than threshold\n'''\ndef merger_embed_train(threshold1,threshold2,prefix):\n labels = []\n H = pickle.load(open('./data/meta.bin','rb')) \n for k in H.keys():\n if H[k]>=threshold1 and H[k]<=threshold2:\n labels.append(k)\n \n X_test = []\n Y_test = [] \n \n index = 0\n for l in labels: \n fname = './data/data_x_{}.npy'.format(l)\n x = np.load(fname) \n \n fname = './data/data_y_{}.npy'.format(l)\n y = np.load(fname)\n \n index = list(range(len(x)))\n np.random.shuffle(index)\n x = x[index]\n y = y[index]\n \n X_test.append(x[0:-1])\n Y_test.append(y[0:-1]) \n print('Append label:{}'.format(l)) \n \n \n X_test = np.concatenate(X_test)\n Y_test = np.concatenate(Y_test) \n print('X_test shape:',X_test.shape)\n print('Y_test shape:',Y_test.shape)\n \n index = list(range(len(X_test)))\n np.random.shuffle(index)\n \n X_test = X_test[index]\n Y_test = Y_test[index]\n \n fname = './data/x_embed_merged_{}'.format(prefix)\n np.save(fname,X_test) \n \n fname = './data/y_embed_merged_{}'.format(prefix)\n np.save(fname,Y_test) \n \n \n \n \ndef train_matrix_generator(): \n chunk_length_in_sec=3\n \n data_base = tedlium.TedLium(mode='train')\n label_order_list = sorted(data_base.get_db_keys())\n num_of_clusses = len(set(label_order_list))\n print('label_order_list',label_order_list)\n \n print('Tedium DB num of classes is:',num_of_clusses) \n G_data = data_generator(data_base=data_base,\n chunk_length_in_sec=chunk_length_in_sec,\n label_order_list=label_order_list)\n\n H = {}\n while True:\n data,target = next(G_data)\n if data is None: \n break \n \n if len(target)>0:\n label = target[0]\n fname = './data/data_x_{}'.format(label) \n np.save(fname,data)\n \n fname = './data/data_y_{}'.format(label) \n np.save(fname,target) \n print('Label:{}, size:{}'.format(label,len(target)))\n H[label] = len(target) \n \n pickle.dump(H,open('./data/meta.bin','wb'))\n \n \ndef get_ckpt(folder='./ckpt/'):\n \n def parse_cktp_name(ckpt_name): \n epoch = ckpt_name.split('/')[-1].split('epoch')[1].split('_')[1] \n return int(epoch) \n H = {}\n for f in glob.glob(os.path.join(folder,'model_epoch*.pt')):\n epoch = parse_cktp_name(f) \n H[epoch] = f\n \n return H\n \n\n'''Train from scrath if epoch == 0, else try to load from chekpoint\n'''\ndef train_model(start_epoch,learning_rate): \n torch.cuda.init() \n \n chunk = 0\n train_batch_size = 128\n validation_batch_size = 128 \n \n valid_data = []\n valid_target = []\n fname = './data/x_train_merged.npy'\n data = np.load(fname) \n fname = './data/x_val_merged.npy'\n valid_data = np.load(fname)\n valid_data = np.expand_dims(valid_data,axis=1)\n\n fname = './data/y_train_merged.npy'\n target = np.load(fname)\n fname = './data/y_val_merged.npy'\n valid_target = np.load(fname)\n \n unique_label = np.unique(valid_target)\n I = {}\n index = 0\n for l in unique_label:\n if not l in I:\n I[l] = index\n index += 1\n \n print('Train fix') \n for i in range(len(valid_target)): \n valid_target[i] = I[valid_target[i]]\n \n print('Val fix') \n for i in range(len(target)): \n target[i] = I[target[i]] \n \n num_of_batches_in_validation = int(np.ceil(len(valid_target)/validation_batch_size))\n validation_index = list(range(len(valid_data)))\n num_of_batches_in_chunk = int(np.ceil(len(target)/train_batch_size))\n print('num_of_batches_in_validation:',num_of_batches_in_validation,', num_of_batches_in_chunk:',num_of_batches_in_chunk)\n print('Validation set size:{}, unique labels:{}'.format(len(valid_target),len(np.unique(valid_target))))\n TRAIN_LOSS = []\n VAL_LOSS = []\n \n device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n learning_rate = learning_rate \n \n embedding_net = EmbeddingNetClassifier(num_of_clusses=len(unique_label))\n if start_epoch > 0:\n ckpt = get_ckpt()\n fname = ckpt[start_epoch]\n print('Load model from:',fname)\n embedding_net.load_state_dict(torch.load(fname))\n \n embedding_net.cuda() \n optimizer = torch.optim.Adam(embedding_net.parameters(), \n lr=learning_rate) \n for epoch in range(start_epoch,MAX_TRAIN_EPOCH): \n\n random_index = list(range(len(target)))\n np.random.shuffle(random_index) \n \n batch_index = 0\n \n ######################## TRAIN ####################################\n embedding_net.train()\n train_loss = 0\n for batch_index in range(num_of_batches_in_chunk-1): \n train_data_batch_index = random_index[batch_index*train_batch_size:batch_index*train_batch_size+train_batch_size] \n batch_data = data[train_data_batch_index,:,:,:]\n batch_target = target[train_data_batch_index] \n x = torch.tensor(batch_data,device='cuda') \n y = Variable(torch.tensor(batch_target,device='cuda').long())\n optimizer.zero_grad()\n net_out = embedding_net(x) \n loss = F.nll_loss(net_out, y)\n loss.backward() \n optimizer.step() \n train_loss += loss.item()\n del x\n del y \n del batch_data\n del batch_target \n \n ######################## VALIDATION ###############################\n embedding_net.eval() \n val_loss = 0\n correct = 0 \n \n with torch.no_grad(): \n for batch_index in range(num_of_batches_in_validation):\n validation_batch_index = validation_index[batch_index*validation_batch_size:batch_index*validation_batch_size+validation_batch_size] \n batch_data = valid_data[validation_batch_index,:,:,:]\n batch_target = valid_target[validation_batch_index] \n x_val = torch.tensor(batch_data,device='cuda')\n y_val = Variable(torch.tensor(batch_target,device='cuda').long())\n net_out = embedding_net(x_val)\n val_loss += F.nll_loss(net_out, y_val)\n pred = net_out.argmax(dim=1, keepdim=True) # get the index of the max log-probability \n correct += pred.eq(y_val.view_as(pred)).sum().item()\n del x_val\n del y_val\n \n train_loss/= num_of_batches_in_chunk \n val_loss /= num_of_batches_in_validation \n TRAIN_LOSS.append(train_loss)\n VAL_LOSS.append(val_loss)\n print('\\nTest set: epoch:{},chunk:{},train loss: {:.4f},test loss:{:.4f},Accuracy: {}/{} ({:.0f}%)\\n'.format(\n epoch,chunk,train_loss, val_loss,correct, \n len(valid_target),100. * correct / len(valid_target)))\n fname = './stat/stat_epoch_{}_chunk_{}.bin'.format(epoch,chunk)\n pickle.dump([TRAIN_LOSS,VAL_LOSS],open(fname,'wb'))\n \n fname = './ckpt/model_epoch_{}_chunk_{}_val_loss_{:.4f}_acc_{:.0f}.pt'.format(epoch,chunk,val_loss,100. * correct / len(valid_target))\n torch.save(embedding_net.state_dict(),fname) \n\n \nif __name__ == '__main__':\n #train_matrix_generator()\n #merger_train(threshold=200)\n merger_embed_train(threshold1=0,threshold2=100,prefix='0to100')\n #train_model(start_epoch=14,learning_rate=1e-3)\n \n\n \n \n ","sub_path":"speaker_class_trainer.py","file_name":"speaker_class_trainer.py","file_ext":"py","file_size_in_byte":12607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206172813","text":"#!/user/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n\r\ndef palindrome(string):\r\n str_dict = {} #初始化空字典\r\n num = 0 #回文字符串长度\r\n count = 0 #字符数目是奇数的个数\r\n\r\n #统计字符串中各个字符数目\r\n for i in string:\r\n str_dict[i] = string.count(i)\r\n #遍历字典\r\n for i in str_dict.values():\r\n if i%2 == 0:\r\n num += i\r\n else:\r\n count += 1\r\n num += i - 1\r\n #存在奇数则num += 1\r\n if count >= 1:\r\n return num + 1\r\n else:\r\n return num\r\n\r\nif __name__ == \"__main__\":\r\n str = 'abccccdd'\r\n print(palindrome(str))","sub_path":"LintCode/1_627.py","file_name":"1_627.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387151992","text":"# 查询sql\n# encoding: utf-8\n\nimport MySQLdb\n\nMYSQL_HOST = '192.168.31.102'\nMYSQL_PORT = 3306\nMYSQL_USER = 'root'\nMYSQL_PASSWORD = 'No3fJQn66DZx'\nMYSQL_DB = 'thorough_esd'\nMYSQL_CHARSET = 'utf8'\n\n\n# 今日上传数\nSELECT_SQL_1 = '''\n select count(*) from upload_image where to_days(create_date) = to_days(now());\n '''\n\n# 总上传数\nSELECT_SQL_2 = '''\n select count(*) from upload_image;\n '''\n\n# 今日预测成功数\nSELECT_SQL_3 = '''\n select count(*) from upload_image where status = 6 and to_days(create_date) = to_days(now());\n '''\n\n# 总预测成功\nSELECT_SQL_4 = '''\n select count(*) from upload_image where status = 6;\n '''\n\n# 今日复原成功数\nSELECT_SQL_5 = '''\n select count(*) from pathology_image where ai_predict_recover = 4 and to_days(create_date) = to_days(now());\n '''\n\n# 总复原成功数\nSELECT_SQL_6 = '''\n select count(*) from pathology_image where ai_predict_recover = 4;\n '''\n\n\ndef select():\n conn = MySQLdb.connect(host=MYSQL_HOST, port=MYSQL_PORT, user=MYSQL_USER, passwd=MYSQL_PASSWORD, db=MYSQL_DB, charset=MYSQL_CHARSET)\n cur = conn.cursor()\n\n result_1 = False\n result_2 = False\n result_3 = False\n result_4 = False\n result_5 = False\n result_6 = False\n\n try:\n cur.execute(SELECT_SQL_1)\n result_1 = cur.fetchall()[0][0]\n except BaseException as e:\n print(e)\n\n try:\n cur.execute(SELECT_SQL_2)\n result_2 = cur.fetchall()[0][0]\n except BaseException as e:\n print(e)\n\n try:\n cur.execute(SELECT_SQL_3)\n result_3 = cur.fetchall()[0][0]\n except BaseException as e:\n print(e)\n\n try:\n cur.execute(SELECT_SQL_4)\n result_4 = cur.fetchall()[0][0]\n except BaseException as e:\n print(e)\n\n try:\n cur.execute(SELECT_SQL_5)\n result_5 = cur.fetchall()[0][0]\n except BaseException as e:\n print(e)\n\n try:\n cur.execute(SELECT_SQL_6)\n result_6 = cur.fetchall()[0][0]\n except BaseException as e:\n print(e)\n\n cur.close()\n conn.close()\n\n return result_1, result_2, result_3, result_4, result_5, result_6","sub_path":"item/3/cmdb/asset/select_sql.py","file_name":"select_sql.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621710220","text":"import os\n# import cv2\ntry:\n from PIL import Image\nexcept ImportError:\n import Image\nimport pytesseract\nimport datetime\n\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for, send_from_directory\n)\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.exceptions import abort\nfrom os.path import join, dirname, realpath\n\nfrom digestai.utils import get_summary, allowed_file\nfrom digestai.db import get_db\n\nbp = Blueprint('images', __name__)\n# pytesseract.pytesseract.tesseract_cmd = r'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract'\n\ndef get_text_from_image(image_path):\n # img = cv2.imread(image_path)\n # Image.open(image)\n # img = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)\n return pytesseract.image_to_string(Image.open(image_path))\n\nUPLOADS_PATH = 'static/image_uploads' #join(dirname(realpath(__file__)), 'static/image_uploads')\n\n# UPLOAD_FOLDER = \"uploads\"\nALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}\n# image upload\n@bp.route('/image/upload', methods=('POST', 'GET'))\ndef upload_image():\n if request.method == 'POST':\n db = get_db()\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename, ALLOWED_EXTENSIONS):\n filename = secure_filename(file.filename)\n file.save(os.path.join(UPLOADS_PATH, filename))\n # need to save details to database before return from here\n db.execute(\n 'INSERT INTO imageupload (created_at, filename, filepath) VALUES (?, ?, ?)',\n (datetime.datetime.now(), filename, os.path.join(UPLOADS_PATH, filename))\n )\n db.commit()\n # process the file then add to mongodb\n return redirect(url_for('images.image_list'))\n return render_template('image_upload.html')\n\n@bp.route('/images')\ndef image_list():\n db = get_db()\n uploads = db.execute(\n 'SELECT * FROM imageupload ORDER BY created_at DESC'\n ).fetchall()\n print(uploads)\n return render_template('images.html', uploads=uploads)\n\ndef get_upload(id):\n upload = get_db().execute(\n 'SELECT id, filepath, filename'\n ' FROM imageupload'\n ' WHERE id = ?', id).fetchone()\n if upload is None:\n abort(404, f\"Upload id {id} doesn't exist.\")\n return upload\n\n@bp.route('/image/')\ndef image_text(id):\n print(f\"Download file requested: {id}\")\n image_obj = get_upload(id)\n # check if image_obj is actually of type image using upload type\n image_text = get_text_from_image(image_obj['filepath'])\n # check if summary parameter is provided\n q = request.args.get('q')\n if q is not None and q == 'summary':\n summary = get_summary(image_text)\n summarized = True\n else:\n summary = None\n summarized = False\n return render_template('image_content.html', image_name=image_obj['filename'], image_text=image_text, summary=summary, summarized=summarized)\n","sub_path":"digestai/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430673998","text":"# coding: utf-8\nimport MySQLdb\n\n# -*- coding: utf-8 -*-\nimport MySQLdb\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\npath = '/home/nico/树蛙/MySQL上课文件/作业/university/'\nif __name__ == '__main__':\n\n # 连接数据库\n conn = MySQLdb.connect(\n host='localhost',\n port=3306,\n user='root',\n passwd='052123',\n charset=\"utf8\",\n db='test',\n )\n\n # 获取数据库执行游标\n cur = conn.cursor()\n\n # 将文件按行读取并写入数据库\n\n\n with open('/home/a/data/department.txt', 'r') as f: #循环将文本数据 按行插入 数据库\n for line in f.readlines():\n s = line.split(' ') #以空格分开,存储类型为列表\n cur.execute(\"insert into department values('\"+s[0]+\"','\"+s[1]+\"','\"+s[2]+\"')\") #“+s[0]+\" 是把s[0]转化为字符串\n\n with open('/home/a/data/student.txt', 'r') as f:\n for line in f.readlines():\n s = line.split(' ')\n cur.execute(\"insert into student values('\"+s[0]+\"','\"+s[1]+\"','\"+s[2]+\"','\"+s[3]+\"','\"+s[4]+\"','\"+s[5]+\"')\")\n\n with open('/home/a/data/exam.txt', 'r') as f:\n for line in f.readlines():\n s = line.split(' ')\n cur.execute(\"insert into exam values('\"+s[0]+\"','\"+s[1]+\"','\"+s[2]+\"')\")\n\n cur.close()\n conn.commit()\n conn.close()","sub_path":"step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63160013","text":"from django.shortcuts import render,get_object_or_404,redirect,reverse\nfrom .models import Comment\nfrom django.views.generic import View\n# Create your views here.\nfrom .models import Article\nfrom .models import Comment\nfrom django.http import HttpResponse\nclass AddComment(View):\n def post(self,request,id):\n article=get_object_or_404(Article,pk=id)\n\n username=request.POST.get('name')\n email=request.POST.get('email')\n url=request.POST.get('url')\n comment=request.POST.get('comment')\n\n c=Comment()\n c.username = username\n c.email=email\n c.url=url\n c.content=comment\n c.article=article\n c.save()\n return redirect(reverse('boke:single',args=(id,)))","sub_path":"demo1/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401570490","text":"import time, win32con, win32api, win32gui, ctypes\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom apscheduler.schedulers.background import BackgroundScheduler\r\nfrom pywinauto import clipboard\r\nimport pandas as pd \r\n\r\nkakao_opentalk_name = 'test'\r\nchat_command = '님이 들어왔습니다.'\r\nban_list = []\r\n\r\nPBYTE256 = ctypes.c_ubyte * 256\r\n_user32 = ctypes.WinDLL(\"user32\")\r\nGetKeyboardState = _user32.GetKeyboardState\r\nSetKeyboardState = _user32.SetKeyboardState\r\nPostMessage = win32api.PostMessage\r\nSendMessage = win32gui.SendMessage\r\nFindWindow = win32gui.FindWindow\r\nIsWindow = win32gui.IsWindow\r\nGetCurrentThreadId = win32api.GetCurrentThreadId\r\nGetWindowThreadProcessId = _user32.GetWindowThreadProcessId\r\nAttachThreadInput = _user32.AttachThreadInput\r\n\r\nMapVirtualKeyA = _user32.MapVirtualKeyA\r\nMapVirtualKeyW = _user32.MapVirtualKeyW\r\n\r\nMakeLong = win32api.MAKELONG\r\nw = win32con\r\n\r\ndef PostKeyEx(hwnd, key, shift, specialkey):\r\n if IsWindow(hwnd):\r\n\r\n ThreadId = GetWindowThreadProcessId(hwnd, None)\r\n\r\n lparam = MakeLong(0, MapVirtualKeyA(key, 0))\r\n msg_down = w.WM_KEYDOWN\r\n msg_up = w.WM_KEYUP\r\n\r\n if specialkey:\r\n lparam = lparam | 0x1000000\r\n\r\n if len(shift) > 0:\r\n pKeyBuffers = PBYTE256()\r\n pKeyBuffers_old = PBYTE256()\r\n\r\n SendMessage(hwnd, w.WM_ACTIVATE, w.WA_ACTIVE, 0)\r\n AttachThreadInput(GetCurrentThreadId(), ThreadId, True)\r\n GetKeyboardState(ctypes.byref(pKeyBuffers_old))\r\n\r\n for modkey in shift:\r\n if modkey == w.VK_MENU:\r\n lparam = lparam | 0x20000000\r\n msg_down = w.WM_SYSKEYDOWN\r\n msg_up = w.WM_SYSKEYUP\r\n pKeyBuffers[modkey] |= 128\r\n\r\n SetKeyboardState(ctypes.byref(pKeyBuffers))\r\n time.sleep(0.01)\r\n PostMessage(hwnd, msg_down, key, lparam)\r\n time.sleep(0.01)\r\n PostMessage(hwnd, msg_up, key, lparam | 0xC0000000)\r\n time.sleep(0.01)\r\n SetKeyboardState(ctypes.byref(pKeyBuffers_old))\r\n time.sleep(0.01)\r\n AttachThreadInput(GetCurrentThreadId(), ThreadId, False)\r\n\r\n else: \r\n SendMessage(hwnd, msg_down, key, lparam)\r\n SendMessage(hwnd, msg_up, key, lparam | 0xC0000000)\r\n\r\ndef SendReturn(hwnd):\r\n win32api.PostMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)\r\n time.sleep(0.01)\r\n win32api.PostMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)\r\n\r\ndef open_chatroom(chatroom_name):\r\n hwndkakao = win32gui.FindWindow(None, \"카카오톡\")\r\n hwndkakao_edit1 = win32gui.FindWindowEx( hwndkakao, None, \"EVA_ChildWindow\", None)\r\n hwndkakao_edit2_1 = win32gui.FindWindowEx( hwndkakao_edit1, None, \"EVA_Window\", None)\r\n hwndkakao_edit2_2 = win32gui.FindWindowEx( hwndkakao_edit1, hwndkakao_edit2_1, \"EVA_Window\", None)\r\n hwndkakao_edit3 = win32gui.FindWindowEx( hwndkakao_edit2_2, None, \"Edit\", None)\r\n\r\n win32api.SendMessage(hwndkakao_edit3, win32con.WM_SETTEXT, 0, chatroom_name)\r\n time.sleep(1) \r\n SendReturn(hwndkakao_edit3)\r\n time.sleep(1)\r\n\r\ndef kakao_sendtext(chatroom_name, text):\r\n hwndMain = win32gui.FindWindow( None, chatroom_name)\r\n hwndEdit = win32gui.FindWindowEx( hwndMain, None, \"RICHEDIT50W\", None)\r\n\r\n win32api.SendMessage(hwndEdit, win32con.WM_SETTEXT, 0, text)\r\n SendReturn(hwndEdit)\r\n\r\ndef copy_chatroom(chatroom_name):\r\n hwndMain = win32gui.FindWindow( None, chatroom_name)\r\n hwndListControl = win32gui.FindWindowEx(hwndMain, None, \"EVA_VH_ListControl_Dblclk\", None)\r\n\r\n PostKeyEx(hwndListControl, ord('A'), [w.VK_CONTROL], False)\r\n time.sleep(1)\r\n PostKeyEx(hwndListControl, ord('C'), [w.VK_CONTROL], False)\r\n ctext = clipboard.GetData()\r\n return ctext\r\n\r\ndef get_ban_list():\r\n f = open(\"./blacklist.txt\", mode=\"r\", encoding=\"utf8\")\r\n while True:\r\n line = f.readline()\r\n if not line: break\r\n ban_list.append(line)\r\n\r\ndef chat_last_save():\r\n open_chatroom(kakao_opentalk_name) \r\n ttext = copy_chatroom(kakao_opentalk_name) \r\n\r\n a = ttext.split('\\r\\n') \r\n df = pd.DataFrame(a) \r\n\r\n df[0] = df[0].str.replace('\\[([\\S\\s]+)\\] \\[(오전|오후)([0-9:\\s]+)\\] ', '') \r\n\r\n return df.index[-2], df.iloc[-2, 0]\r\n\r\ndef chat_chek_command(cls, clst):\r\n open_chatroom(kakao_opentalk_name) \r\n ttext = copy_chatroom(kakao_opentalk_name) \r\n\r\n a = ttext.split('\\r\\n') \r\n df = pd.DataFrame(a) \r\n\r\n df[0] = df[0].str.replace('\\[([\\S\\s]+)\\] \\[(오전|오후)([0-9:\\s]+)\\] ', '')\r\n\r\n if df.iloc[-2, 0] == clst:\r\n return df.index[-2], df.iloc[-2, 0]\r\n else:\r\n\r\n df1 = df.iloc[cls+1 : , 0] \r\n found = df1[ df1.str.contains(chat_command)]\r\n print(\"found : \" + found)\r\n\r\n if 1 <= int(found.count()):\r\n print(\"user recognition\")\r\n bl = found.replace(chat_command, \"\") \r\n\r\n for bl_ckeck in ban_list:\r\n if bl_ckeck == bl:\r\n print(bl + \" confirmation\")\r\n kakao_sendtext(bl + \"님 확인되었습니다.\") \r\n return df.index[-2], df.iloc[-2, 0]\r\n\r\n else:\r\n print(\"user unidentified\")\r\n return df.index[-2], df.iloc[-2, 0]\r\n\r\ndef main():\r\n get_ban_list()\r\n cls, clst = chat_last_save() \r\n\r\n while True:\r\n print(\"working!!\")\r\n cls, clst = chat_chek_command(cls, clst) \r\n time.sleep(5)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338503573","text":"\"\"\"\r\nПокажите вывод функции для чисел от 1 до 100.\r\n\"\"\"\r\n\r\ndef fizzbuzz(number):\r\n \"\"\"\r\n Функцию, которая принимает на вход число, а в ответ выводит:\r\n Fizz, если число делится на 3;\r\n Buzz, если число делится на 5;\r\n FizzBuzz, если число делится и на 3, и на 5.\r\n \"\"\"\r\n if number % 3 == 0 and number % 5 == 0:\r\n return \"FizzBuzz\"\r\n elif number % 3 == 0:\r\n return \"Fizz\"\r\n elif number % 5 == 0:\r\n return \"Buzz\"\r\n \"\"\"\r\n Если надо добавить вывод когда не делится ни на то ни на другое\r\n else:\r\n return \"И не Fizz, и не Buzz, и даже, не FizzBuzz\"\r\n \"\"\"\r\n \r\ndef sumFizzBuzz(): \r\n diapazon = int(input(\"Веди до куда перебирать: \"))\r\n sumFizzBuzz = 0\r\n for i in range(1,diapazon,1):\r\n #print(i)\r\n if fizzbuzz(i):\r\n sumFizzBuzz += i\r\n #print(\"при числе {} сумма равна {}\".format(i,sumFizzBuzz))\r\n print(sumFizzBuzz)\r\n return(sumFizzBuzz)\r\n\r\nsumFizzBuzz()","sub_path":"b5_7/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65455008","text":"import sys\nimport random\nimport copy\nimport math\nimport numpy as np\n\ndef setKthBit(n, k):\n return ((1 << k) | n)\n\ndef resetKthBit(n, k):\n return (~(1 << k) & n)\n\ndef getKthBit(n, k):\n if n & (1 << k):\n return 1\n else:\n return 0\n\ndef generate_faulty_weights(matrix_height, matrix_width, total_cells, fault_rate, weight_matrix, layer_file,quantization_bits):\n faulty_weight_matrix = copy.deepcopy(weight_matrix)\n defect_map = [[0] * (matrix_width * quantization_bits) for x in range(matrix_height)]\n stuckat_one_cells = []\n stuckat_zero_cells = []\n all_bits = quantization_bits * total_cells\n stuckat_cells = random.sample(range(0, all_bits - 1), int(fault_rate * all_bits))\n random.shuffle(stuckat_cells)\n stuckat_one_cells.extend(stuckat_cells[0: int(math.floor(0.8 * fault_rate * all_bits))])\n stuckat_zero_cells.extend(stuckat_cells[int(math.floor(0.8 * fault_rate * all_bits)):])\n\n\n # intitialize defect_map with 2\n # Later put 1 for stuck-at-1 and 0 for stuck-at-0 and 2 for clean cells\n for i in range(0, matrix_height):\n for j in range(0, quantization_bits * matrix_width):\n defect_map[i][j] = 2\n\n for x in stuckat_one_cells:\n rowAddress = x / (matrix_width * quantization_bits)\n columnAddress = x % (matrix_width * quantization_bits)\n defect_map[int(rowAddress)][columnAddress] = 1\n\n for x in stuckat_zero_cells:\n rowAddress = x / (matrix_width * quantization_bits)\n columnAddress = x % (matrix_width * quantization_bits)\n defect_map[int(rowAddress)][columnAddress] = 0\n\n for i in range(0, matrix_height):\n for j in range(0, matrix_width):\n for k in range(0, quantization_bits):\n this_weight = faulty_weight_matrix[i][j]\n if defect_map[i][j * quantization_bits + k] == 1:\n if (getKthBit(this_weight, quantization_bits - k - 1) == 0):\n this_weight = setKthBit(this_weight, quantization_bits - k - 1)\n\n elif defect_map[i][j * quantization_bits + k] == 0:\n if (getKthBit(this_weight, quantization_bits - k - 1) == 1):\n this_weight = resetKthBit(this_weight, quantization_bits - k - 1)\n\n faulty_weight_matrix[i][j] = this_weight\n\n return defect_map, faulty_weight_matrix\n\n################## main function ################\ntotal_arg = len(sys.argv)\nfault_rate = float(sys.argv[total_arg-1])\nquantization_bits = int(sys.argv[total_arg-2])\n\n\nfor i in range(1 , total_arg-2):\n layer_file = sys.argv[i]\n weight_matrix = []\n\n with open(layer_file) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n for line in content:\n all_elem = line.split(',')\n all_elem_int = []\n for x in all_elem:\n if x != '':\n all_elem_int.append(int(x))\n weight_matrix.append(all_elem_int)\n\n matrix_height = len(weight_matrix)\n matrix_width = len(weight_matrix[0])\n total_cells = matrix_height * matrix_width\n\n defect_map, faulty_weight_matrix = generate_faulty_weights(matrix_height, matrix_width, total_cells, fault_rate, weight_matrix, layer_file,quantization_bits)\n # Print the defect map\n filename = \"defectmap\" + \"_\" + str(fault_rate) + \"_\" + layer_file\n with open(filename, 'w') as outf:\n for i in range(0, matrix_height):\n for j in range(0, quantization_bits * matrix_width):\n outf.write(str(defect_map[i][j]) + \",\")\n outf.write('\\n')\n\n # Print the faulty weights\n filename = \"faulty\" + \"_\" + str(fault_rate) + \"_\" + layer_file\n with open(filename, 'w') as outf:\n for i in range(0, matrix_height):\n for j in range(0, matrix_width):\n outf.write(str(faulty_weight_matrix[i][j]) + \",\")\n outf.write('\\n')\n\n","sub_path":"NN_fault_Injection.py","file_name":"NN_fault_Injection.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96452578","text":"import datetime\r\nimport utils\r\nimport crypto\r\nimport time\r\nimport re\r\n\r\nfrom urlparse import urljoin\r\nfrom BeautifulSoup import BeautifulSoup, Comment\r\n\r\nfrom flask import url_for, request, redirect, url_for\r\n\r\n# Helper required by Flask-Login for returning a redirect to the login page.\r\ndef user_unauthorized_callback():\r\n return redirect(url_for('logout'))\r\n\r\n# Helper required by Flask-Login for returning the current user.\r\ndef load_user(username):\r\n from models import User\r\n user = User.objects.get(username=username)\r\n if user:\r\n return user\r\n return redirect(url_for('logout'))\r\n\r\n# Helper method for determining the monday immediately prior to a given day.\r\ndef get_last_monday(today):\r\n offset = today.weekday() % 7\r\n last_monday = today - datetime.timedelta(days=offset)\r\n return last_monday\r\n\r\ndef sanitize_number_input(number):\r\n try:\r\n float(number)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\ndef sanitize_time_input(time_input):\r\n try:\r\n time.strptime(time_input, '%I:%M %p')\r\n return True\r\n except ValueError:\r\n return False\r\n\r\ndef sanitize_mongo_hash(hash):\r\n if hash:\r\n if len(hash) == 24:\r\n return re.findall(r\"([a-fA-F\\d]{24})\", hash)\r\n return False\r\n\r\ndef sanitizeHtml(value, base_url=None):\r\n rjs = r'[\\s]*(&#x.{1,7})?'.join(list('javascript:'))\r\n rvb = r'[\\s]*(&#x.{1,7})?'.join(list('vbscript:'))\r\n re_scripts = re.compile('(%s)|(%s)' % (rjs, rvb), re.IGNORECASE)\r\n validTags = 'p i strong b u a h1 h2 h3 pre br img'.split()\r\n validAttrs = 'href src width height'.split()\r\n urlAttrs = 'href src'.split() # Attributes which should have a URL\r\n soup = BeautifulSoup(value)\r\n for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):\r\n # Get rid of comments\r\n comment.extract()\r\n for tag in soup.findAll(True):\r\n if tag.name not in validTags:\r\n tag.hidden = True\r\n attrs = tag.attrs\r\n tag.attrs = []\r\n for attr, val in attrs:\r\n if attr in validAttrs:\r\n val = re_scripts.sub('', val) # Remove scripts (vbs & js)\r\n if attr in urlAttrs:\r\n val = urljoin(base_url, val) # Calculate the absolute url\r\n tag.attrs.append((attr, val))\r\n\r\n return soup.renderContents().decode('utf8')\r\n\r\ndef validate_ssn(ssn):\r\n ssn = ssn.replace('-', '')\r\n # this actually matches valid SSN's (too strong for CDC)\r\n # if re.match(r\"^(?!000|666)(?:[0-6][0-9]{2}|7(?:[0-6][0-9]|7[0-2]))?(?!00)[0-9]{2}(?!0000)[0-9]{4}$\", ssn):\r\n if re.match(r\"^(?:[0-9]{3})(?:[0-9]{2})(?:[0-9]{4})$\", ssn):\r\n return True\r\n else:\r\n return False","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68941048","text":"#!/usr/bin/python\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport string \nclass ListAccessObject(object):\n\n def __init__(self, database, name_input):\n self.db = database\n self.name = name_input\n self.mydb = database[name_input]\n\n def find_items(self):\n l = []\n for each_item in self.mydb.find():\n l.append({'item':each_item['item'], \\\n 'list':each_item['list'], \\\n 'timestamp':each_item['_id'].generation_time})\n return l\n\n def find_lists(self):\n l = []\n for each_item in self.mydb.find():\n l.append({'list':each_item['list'], \\\n 'timestamp':each_item['_id'].generation_time})\n return l\n def find_sorted(self):\n l = []\n for each_item in self.mydb.find().sort('list',1):\n l.append({'item':each_item['item'], \\\n 'list':each_item['list'], \\\n 'timestamp':each_item['_id'].generation_time})\n return l\n\n def last_items(self, value):\n l = []\n for each_item in self.mydb.find().sort('_id',-1).limit(value):\n #for each_item in self.mydb.find().limit(value):\n l.append({'item':each_item['item'], \\\n 'list':each_item['list'], \\\n 'timestamp':each_item['_id'].generation_time})\n return l\n\n def insert_item(self, list_name, newitem):\n newitem = {'list':list_name, 'item':newitem}\n self.mydb.insert(newitem)\n\n def delete_item(self, item):\n newitem = {'item':item}\n self.mydb.delete_many(newitem)\n\n def insert_listname(self, list_name):\n newitem = {'list':list_name}\n self.mydb.insert(newitem)\n\n def delete_listname(self, list_name):\n newitem = {'list':list_name}\n self.mydb.delete_many(newitem)\n","sub_path":"Lists/lib/ListAccessObject.py","file_name":"ListAccessObject.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502372871","text":"import os\nimport redis\n\nREDIS_PORT = 6379\nREDIS_DB = 0\nREDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', 'redis')\n\nREDIS_CONNECTION_POOL = redis.ConnectionPool(host=REDIS_HOST,\n port=REDIS_PORT,\n db=REDIS_DB)\n","sub_path":"apiserver/redis_cli.py","file_name":"redis_cli.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277689567","text":"pessoas_restaurante = int(input(\"Olá, bem vindo ao nosso restaurante\\n Vocês estão em Quantas pessoas? \"))\n\nif pessoas_restaurante >= 8:\n print(\"Espere um momento que disponibilizaremos uma mesa já!!!\")\nelse:\n print('Opa temos mesa por aqui')\n\nnumeros_multiplos = int(input(\"fale um numero agora e vamos ver se é multiplo de dez\"))\n\ncalculo = numeros_multiplos % 10\n\nif calculo == 0:\n print('Seu numero é multiplo de dez')\nelse:\n print('Não é um numero multiplo de dez')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486751134","text":"# Copyright 2018 The CapsLayer Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==========================================================================\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nfrom datasets.dataloader import DataLoader\nimport tensorflow as tf\nfrom datasets.utils import int64_feature, bytes_feature\n\n\nclass DataLoader(DataLoader):\n def __init__(self, path=None,\n num_worker=1,\n one_hot=False,\n image_size=64,\n name=None, **kwargs):\n self.height = self.width = int(image_size)\n self.channels = 3\n self.num_classes = 1000\n self.batched_shapes = {'images': [-1, self.height, self.width, self.channels],\n 'labels': [-1]}\n if path is None:\n path = os.path.join('data', 'imagenet%sx%s'%(image_size, image_size), \"*\")\n super(DataLoader, self).__init__(path=path,\n num_worker=num_worker,\n one_hot=one_hot,\n name=name, **kwargs)\n\n def tfrecorder(self, path):\n return path\n\n def parser(self, serialized_record):\n features = tf.parse_single_example(serialized_record,\n features={'data': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([1], tf.int64)})\n image = tf.decode_raw(features['data'], tf.uint8)\n label = tf.cast(features['label'], dtype=tf.int32)\n\n features = {'images': image, 'labels': label}\n return(features)\n","sub_path":"datasets/imagenet.py","file_name":"imagenet.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138682235","text":"import boto.ec2\n\nclass data:\n def __init__(self, credentials, Items):\n self.Name = 'EIP'\n self.Priority = 4\n self.show = True\n self.HeaderNames = ['IP address', 'Instance ID', 'Instance name', 'Route53 name']\n self.HeaderWidths = ['2', '2', '5', '4']\n self.HeaderKeys = ['ip_address', 'instance_id', 'name', 'route53_name']\n self.credentials = credentials\n self.Items = Items\n self.account = ''\n self.skipRegions = []\n\n def resultDict(self, address, instances, zones):\n res = {}\n res['ip_address'] = address.public_ip\n res['instance_id'] = address.instance_id\n res['name'] = 'Empty'\n res['route53_name'] = ''\n for region_name, instancesList in instances.items():\n for instance in instancesList:\n if instance['id'] == address.instance_id:\n res['name'] = instance['name']\n break\n for zone_name, records in zones.items():\n if res['ip_address'] in records:\n res['route53_name'] = records[res['ip_address']]\n break\n return res\n\n def getAllItems(self, aws_key, aws_secret, Items):\n addr = {}\n regions = boto.ec2.regions(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)\n for region in regions:\n if region.name in self.skipRegions:\n continue \n conn = region.connect(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)\n addresses = conn.get_all_addresses()\n addr[region.name] = []\n for address in addresses:\n addrDict = self.resultDict(address, Items['EC2'][self.account], Items['Route53'])\n addr[region.name].append(addrDict)\n return addr\n\n def getData(self):\n EIPs = {}\n for credential in self.credentials:\n self.account = credential[2]\n EIPs[credential[2]] = self.getAllItems(credential[0], credential[1], self.Items)\n return EIPs\n","sub_path":"data_EIP.py","file_name":"data_EIP.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532566857","text":"import copy\r\nimport networkx as nx\r\nimport numpy as np\r\nimport config as cf\r\nimport random\r\nimport math\r\nfrom network import Network\r\nfrom network import Energy\r\nimport matplotlib.pyplot as plt\r\nfrom random import *\r\n\r\ndef Get_Fitness(network,SM,Alive_Node):\r\n Fitness = 0\r\n f1 = 0\r\n f2 = 0\r\n f3 = 0\r\n f4 = 0\r\n CHID = np.where(SM==np.max(SM))[0]+1\r\n potf3 = np.zeros(cf.N_NODE+1)\r\n potf4 = np.zeros(cf.N_NODE+1)\r\n\r\n for i in Alive_Node:\r\n x,y = network.node[i]['pos']\r\n NNDist = 1000\r\n NNID = 0\r\n for j in CHID:\r\n if i == j:\r\n d = network.node[j]['RTBS']\r\n if d > cf.D_o:\r\n f1 += cf.E_ELEC*cf.L*cf.E_MP*(d**4) + cf.E_DA\r\n else:\r\n f1 += cf.E_ELEC*cf.L*cf.E_FS*(d**2) + cf.E_DA\r\n continue\r\n if i in CHID:\r\n continue\r\n x2,y2 = network.node[j]['pos']\r\n NewDist = math.sqrt((x-x2)**2+(y-y2)**2)\r\n if NewDist cf.D_o:\r\n f1 += cf.NCH_L * (2*cf.E_ELEC + cf.E_MP*(NNDist**4))\r\n f2 += NNDist\r\n potf3[NNID] += network.node[i]['res_energy']\r\n potf4[NNID] += 1\r\n else:\r\n f1 += cf.NCH_L * (2*cf.E_ELEC + cf.E_FS*(NNDist**2))\r\n f2 += NNDist\r\n potf3[NNID] += network.node[i]['res_energy']\r\n potf4[NNID] += 1\r\n\r\n MinDist = 1000\r\n for i in CHID:\r\n potf3[i] = potf3[i]/network.node[i]['res_energy']\r\n for j in CHID:\r\n if i==j:\r\n continue\r\n x,y = network.node[i]['pos']\r\n x2,y2 = network.node[j]['pos']\r\n NewMinDist = math.sqrt((x-x2)**2+(y-y2)**2)\r\n if NewMinDist < MinDist:\r\n MinDist = NewMinDist\r\n\r\n f2 = f2/MinDist\r\n f3 = np.sum(potf3)\r\n f4 = np.max(potf4)+1\r\n Fitness = (f1+f2+f3+f4) *0.25\r\n Fitness = 1/(1+Fitness)\r\n return Fitness\r\n \r\n \r\n\r\n\r\ndef Optimizer(network, Alive_Node, Update=False, R=30, In_Median=30,First = False):\r\n BSMO_NET = nx.create_empty_copy(network)\r\n BSMO_CHID = []\r\n Swarm_Size = 40\r\n MIR = 100\r\n\r\n if Update == True:\r\n MAX_X = 0\r\n MAX_Y = 0\r\n for i in Alive_Node:\r\n x,y =BSMO_NET.node[i]['pos']\r\n if x > MAX_X:\r\n MAX_X = x\r\n if y > MAX_Y:\r\n MAX_Y = y\r\n \r\n R = math.sqrt(MAX_X**2 + MAX_Y**2)/4\r\n\r\n ##Initializing\r\n SM_Arr = []\r\n FIT = []\r\n MG = 4\r\n Group0 = []\r\n Group1 = []\r\n Group2 = []\r\n Group3 = []\r\n NGroup = 1\r\n LLL = np.zeros(MG)\r\n GLL = 0\r\n MLLL = 10\r\n MGLL = 20\r\n NB_Cluster = max(round(cf.P_CH*len(Alive_Node)),1)\r\n for i in range(0,Swarm_Size):\r\n SM = []\r\n for j in Alive_Node:\r\n if random()<=cf.P_CH:\r\n SM.append(1)\r\n else:\r\n SM.append(0)\r\n SM_Arr.append(SM)\r\n FIT.append(Get_Fitness(BSMO_NET,SM,Alive_Node))\r\n Group0.append(i)\r\n\r\n\r\n Pr = 0.1\r\n LLID = np.where(np.max(FIT)==FIT)[0][0]\r\n GLID = np.where(np.max(FIT)==FIT)[0][0]\r\n\r\n for Iter in range(0,MIR):\r\n ## Local Leader Phase\r\n Pr = Pr + (0.4-0.1)/MIR\r\n for i in range(0,MG):\r\n if i == 0:\r\n temp = Group0\r\n if i == 1:\r\n temp = Group1\r\n if i == 2:\r\n temp = Group2\r\n if i == 3:\r\n temp = Group3\r\n \r\n ## find LLID\r\n MAXFIT = 0\r\n count = 0\r\n for ID in temp:\r\n TMPFIT = FIT[ID]\r\n if TMPFIT > MAXFIT:\r\n LLID = ID\r\n MAXFIT = TMPFIT\r\n\r\n for j in temp:\r\n if FIT[j] == FIT[LLID]:\r\n continue\r\n if FIT[j] == FIT[GLID]:\r\n continue\r\n if Pr > random():\r\n SM = SM_Arr[j]\r\n LL = SM_Arr[LLID]\r\n Rand = np.random.choice(temp,1)[0]\r\n SMR = SM_Arr[Rand]\r\n b = randint(0,1)\r\n d = randint(-1,1)\r\n SM_Arr[j] = np.bitwise_xor(SM,np.bitwise_or(np.bitwise_and(b,np.bitwise_xor(LL,SM)),np.bitwise_and(d,np.bitwise_xor(SMR,SM))))\r\n FIT[j] = Get_Fitness(BSMO_NET,SM_Arr[j],Alive_Node)\r\n if FIT[j] > FIT[LLID]:\r\n count = 1\r\n LLIDPOT = j\r\n if count == 0:\r\n LLL[i] += 1\r\n else:\r\n count = 0\r\n LLID = LLIDPOT\r\n \r\n ## Local Leader Decision\r\n if LLL[i] == MLLL:\r\n LLL[i] = 0\r\n for TT in temp:\r\n if FIT[TT] == FIT[LLID]:\r\n continue\r\n if FIT[TT] == FIT[GLID]:\r\n continue\r\n if Pr > random():\r\n SM = SM_Arr[TT]\r\n LL = SM_Arr[LLID]\r\n GL = SM_Arr[GLID]\r\n b = randint(0,1)\r\n SM_Arr[TT] = np.bitwise_xor(SM,np.bitwise_or(np.bitwise_and(b,np.bitwise_xor(LL,SM)),np.bitwise_and(b,np.bitwise_xor(GL,SM))))\r\n FIT[TT] = Get_Fitness(BSMO_NET,SM_Arr[TT],Alive_Node)\r\n \r\n else:\r\n SM = []\r\n for KT in Alive_Node:\r\n if random() random():\r\n GL = SM_Arr[GLID]\r\n SM = SM_Arr[i]\r\n Rand = randint(0,Swarm_Size-1)\r\n SMR = SM_Arr[Rand]\r\n b = randint(0,1)\r\n d = randint(-1,1)\r\n SM_Arr[i] = np.bitwise_xor(SM,np.bitwise_or(np.bitwise_and(b,np.bitwise_xor(GL,SM)),np.bitwise_and(d,np.bitwise_xor(SMR,SM))))\r\n FIT[i] = Get_Fitness(BSMO_NET,SM_Arr[i],Alive_Node)\r\n if FIT[i] > FIT[GLID]:\r\n count = 1\r\n if count == 0:\r\n GLL += 1\r\n else:\r\n count = 0\r\n GLID = np.where(np.max(FIT)==FIT)[0][0]\r\n \r\n ## Global Desision\r\n if GLL == MGLL:\r\n GLL = 0\r\n NGroup += 1\r\n Choice_Node = np.arange(0,Swarm_Size,1)\r\n if NGroup == 2:\r\n Group0 = np.random.choice(Choice_Node,int(len(Choice_Node)/NGroup),replace=False)\r\n Choice_Node = list(set(Choice_Node)-set(Group0))\r\n Group1 = np.array(Choice_Node)\r\n if NGroup == 3:\r\n Group0 = np.random.choice(Choice_Node,int(len(Choice_Node)/NGroup),replace=False)\r\n Choice_Node = list(set(Choice_Node)-set(Group0))\r\n Group1 = np.random.choice(Choice_Node,int(len(Choice_Node)/NGroup),replace=False)\r\n Choice_Node = list(set(Choice_Node)-set(Group1))\r\n Group2 = np.array(Choice_Node)\r\n if NGroup == 4:\r\n Group0 = np.random.choice(Choice_Node,int(len(Choice_Node)/NGroup),replace=False)\r\n Choice_Node = list(set(Choice_Node)-set(Group0))\r\n Group1 = np.random.choice(Choice_Node,int(len(Choice_Node)/NGroup),replace=False)\r\n Choice_Node = list(set(Choice_Node)-set(Group1))\r\n Group2 = np.random.choice(Choice_Node,int(len(Choice_Node)/NGroup),replace=False)\r\n Choice_Node = list(set(Choice_Node)-set(Group2))\r\n Group3 = np.array(Choice_Node)\r\n if NGroup == 5:\r\n BSMO_CHID = SM_Arr[GLID]\r\n\r\n INNER = []\r\n OUTER = []\r\n BSMO_CHID = np.where(SM_Arr[GLID]==np.max(SM_Arr[GLID]))[0] + 1\r\n for i in BSMO_CHID:\r\n if BSMO_NET.node[i]['RTBS'] < R:\r\n INNER.append(i)\r\n BSMO_NET.node[i]['Next'] = 0\r\n else:\r\n OUTER.append(i)\r\n \r\n for i in Alive_Node:\r\n if i in BSMO_CHID:\r\n continue\r\n x,y = BSMO_NET.node[i]['pos']\r\n NNDist = 1000\r\n NNID = 0\r\n for j in BSMO_CHID:\r\n if i == j:\r\n continue\r\n x2,y2 = BSMO_NET.node[j]['pos']\r\n NewDist = math.sqrt((x-x2)**2+(y-y2)**2)\r\n if NNDist > NewDist:\r\n NNID = j\r\n NNDist = NewDist\r\n BSMO_NET.node[i]['Next'] = NNID\r\n \r\n for i in OUTER:\r\n NNID = 0\r\n NNDist = 1000\r\n x,y = BSMO_NET.node[i]['pos']\r\n for j in INNER:\r\n x2,y2 = BSMO_NET.node[j]['pos']\r\n NewDist = math.sqrt((x-x2)**2+(y-y2)**2)\r\n if NNDist > NewDist:\r\n NNID = j\r\n NNDist = NewDist\r\n BSMO_NET.node[i]['Next'] = NNID\r\n\r\n if First == True:\r\n ## add_Edge \r\n for i in Alive_Node:\r\n BSMO_NET.add_edge(i,BSMO_NET.node[i]['Next'])\r\n\r\n return BSMO_NET, BSMO_CHID, R","sub_path":"SSMOECHS/Optimizer/BSMO_MAX.py","file_name":"BSMO_MAX.py","file_ext":"py","file_size_in_byte":9625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551972498","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020-2021 Alibaba Group Holding Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport logging\nimport os\nfrom typing import Any\n\nfrom airflow.configuration import conf\nfrom airflow.models.xcom import BaseXCom\nfrom airflow.utils.session import provide_session\nimport pendulum\nfrom sqlalchemy.orm import Session, reconstructor\n\nimport vineyard\n\nlogger = logging.getLogger('vineyard')\n\n\ndef _resolve_vineyard_xcom_options():\n options = {}\n if conf.has_option('vineyard', 'persist'):\n options['persist'] = conf.getboolean('vineyard', 'persist')\n else:\n options['persist'] = False\n if conf.has_option('vineyard', 'ipc_socket'):\n options['ipc_socket'] = conf.get('vineyard', 'ipc_socket')\n else:\n if 'VINEYARD_IPC_SOCKET' in os.environ:\n options['ipc_socket'] = os.environ['VINEYARD_IPC_SOCKET']\n else:\n raise RuntimeError(\"Failed to find vineyard IPC socket configuration, \" +\n \"please configure it using the environment variable \" +\n \"$VINEYARD_IPC_SOCKET, or via airfow's vineyard.ipc_socket configuration.\")\n return options\n\n\nclass VineyardXCom(BaseXCom):\n \"\"\"\n Custom Backend Serving to use Vineyard.\n\n Setup your airflow environment by specifying the following\n environment varable:\n\n export AIRFLOW__CORE__XCOM_BACKEND=vineyard.contrib.airflow.xcom.VineyardXCom\n \"\"\"\n\n __options = None\n\n @classmethod\n @property\n def options(cls):\n if cls.__options is None:\n cls.__options = _resolve_vineyard_xcom_options()\n return cls.__options\n\n @reconstructor\n def init_on_load(self):\n \"\"\"\n Called by the ORM after the instance has been loaded from the DB or otherwise reconstituted\n i.e automatically deserialize Xcom value when loading from DB.\n \"\"\"\n self.value = super(VineyardXCom, self).init_on_load()\n\n @classmethod\n @provide_session\n def set(cls, key, value, execution_date, task_id, dag_id, session=None):\n \"\"\"\n Store an XCom value.\n :return: None\n \"\"\"\n session.expunge_all()\n\n value = VineyardXCom.serialize_value(value)\n\n # remove any duplicate XComs\n query = session.query(cls).filter(cls.key == key, cls.execution_date == execution_date, cls.task_id == task_id,\n cls.dag_id == dag_id)\n targets = []\n for result in query.with_entities(VineyardXCom.value):\n targets.append(vineyard.ObjectID(BaseXCom.deserialize_value(result)))\n if targets:\n logger.info(\"Drop duplicates from vineyard: %s\", targets)\n try:\n client = vineyard.connect(cls.options['ipc_socket'])\n client.delete(targets)\n except Exception as e:\n logger.error('Failed to drop duplicates from vineyard: %s', e)\n\n # step 2: remove from the underlying xcom db\n query.delete()\n session.commit()\n\n # insert new XCom\n session.add(VineyardXCom(key=key, value=value, execution_date=execution_date, task_id=task_id, dag_id=dag_id))\n session.commit()\n\n @classmethod\n @provide_session\n def delete(cls, xcoms, session=None):\n \"\"\"Delete Xcom\"\"\"\n if isinstance(xcoms, VineyardXCom):\n xcoms = [xcoms]\n targets = []\n for xcom in xcoms:\n if not isinstance(xcom, VineyardXCom):\n raise TypeError(f'Expected XCom; received {xcom.__class__.__name__}')\n if xcom.value:\n targets.append(vineyard.ObjectID(BaseXCom.deserialize_value(xcom)))\n session.delete(xcom)\n logger.info(\"Drop from vineyard: %s\", targets)\n try:\n client = vineyard.connect(cls.options['ipc_socket'])\n client.delete(targets)\n except Exception as e:\n logger.error('Failed to drop from vineyard: %s', e)\n session.commit()\n\n @classmethod\n @provide_session\n def clear(\n cls,\n execution_date: pendulum.DateTime,\n dag_id: str,\n task_id: str,\n session: Session = None,\n ) -> None:\n query = session.query(cls).filter(\n cls.dag_id == dag_id,\n cls.task_id == task_id,\n cls.execution_date == execution_date,\n )\n targets = []\n for result in query.with_entities(VineyardXCom.value):\n targets.append(vineyard.ObjectID(BaseXCom.deserialize_value(result)))\n if targets:\n logger.info(\"Drop from vineyard: %s\", targets)\n try:\n client = vineyard.connect(cls.options['ipc_socket'])\n client.delete(targets)\n except Exception as e:\n logger.error('Failed to drop from vineyard: %s', e)\n query.delete()\n\n @staticmethod\n def serialize_value(value: Any):\n client = vineyard.connect(VineyardXCom.options['ipc_socket'])\n value_id = client.put(value)\n if VineyardXCom.options['persist']:\n client.persist(value_id)\n logger.debug(\"serialize_value: %s -> %r\", value, value_id)\n return BaseXCom.serialize_value(repr(value_id))\n\n @staticmethod\n def deserialize_value(result: \"XCom\") -> Any:\n value = BaseXCom.deserialize_value(result)\n vineyard_value = VineyardXCom.post_resolve_value(result, value)\n logger.debug(\"deserialize_value: %s -> %s -> %s\", result, value, vineyard_value)\n return vineyard_value\n\n @staticmethod\n @provide_session\n def post_resolve_value(result: \"XCom\", value: Any, session: Session = None) -> Any:\n ''' The :code:`post_resolve_value` runs before the return the value to the\n operators to prepare necessary input data for the task.\n\n The post resolution will fill-up the occurrence if remote objects by\n of :code:`VineyardObjectRef` with the actual (remote) value by triggering\n a migration.\n\n It will also record the migrated xcom value into the db as well to make\n sure it can be dropped properly.\n '''\n client = vineyard.connect(VineyardXCom.options['ipc_socket'])\n object_id = vineyard.ObjectID(value)\n\n meta = client.get_meta(object_id)\n if meta.islocal:\n return client.get(object_id)\n\n # migration\n logger.debug('start migration: %r')\n target_id = client.migrate(object_id)\n logger.debug('finish migration: %r -> %r', object_id, target_id)\n\n # TODO: should we record the replicated XCom into the db ?\n # session.add(VineyardXCom(...))\n # session.commit()\n\n return client.get(target_id)\n\n\n__all__ = [\n 'VineyardXCom',\n]\n","sub_path":"python/vineyard/contrib/airflow/xcom/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627013401","text":"#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\n# @Time : 2018-11-02\n# @Author : J.sky\n# @Mail : bosichong@qq.com\n# @Site : www.17python.com\n# @Title : 基于Python开发的小学生口算题生成器\n# @Url : http://www.17python.com/blog/29\n# @Details : Python实现小学生加减乘除速算考试题卷。\n# @Other : OS X 10.11.6\n# Python 3.6.1\n# PyCharm\n\n\n'''\n孩子上小学一年级了,加减乘除的口算就要开始练习了,估计老题肯定会让家长出题,所以提前准备一下.\n\n利用Python开发了一套自动生成小学生口算题的小应用。而且今天是程序员节,撸200行代码庆祝一下。:)\n\n程序核心功能:\n\n 1.根据条件生成相关的口算题.\n\n 2.保存为.docx用来打印.\n\n\n开心Python Django 学习交流q群:217840699\n\n\nAuthor : J.sky\nMail : bosichong@qq.com\n\n\n'''\n\nfrom docx import Document # 引入docx类生成docx文档\nfrom docx.enum.table import WD_ROW_HEIGHT_RULE\nfrom docx.oxml.ns import qn\nfrom docx.shared import RGBColor, Cm\nfrom docx.shared import Pt\nfrom docx.enum.text import WD_PARAGRAPH_ALIGNMENT\n\n__version__ = \"1.0.0\"\n\n\nclass PrintPreview:\n '''本类负责生成完整的口算题文档使之适合打印机打印。可以生成多套题,生成数可以控。\n\n - @p_list list\n 需要打印口算题库,至少包含一套口算题\n\n - @p_title list\n 页面标题,这个标题的生成依据程序题型的选择和数字的范围选择而生成,例如:选择了0-20,加减法,进退位\n 则自动生成标题为:0到20加减法进退位混合口算题,list中包含了多套题的页面标题名称\n\n - @p_column int\n 打印页排版口算题的列数\n\n '''\n\n p_list = None\n p_title = None\n p_subtitle = None\n p_column = None\n p_title_size = None\n p_subtitle_size = None\n p_content_siae = None\n\n def __init__(self, l, tit, subtitle, col=2, tsize=20, subsize=16, csize=18):\n '''\n :param l: list 需要打印的口算题列表\n :param tit: list 口算页标题\n :param subtitle str 小标题\n :param col: int 列数\n :param tsize: int 标题字号\n :param csize: int 口算题字号\n '''\n self.p_list = l\n self.p_title = tit\n self.p_subtitle = subtitle\n self.p_column = col\n self.p_title_size = tsize\n self.p_subtitle_size = subsize\n self.p_content_siae = csize\n\n def create_psmdocx(self, expressionList, title, docxname):\n '''\n :param expressionList list 一组题库\n :param title str 页面标题\n :param docxname str 题库保存文件名\n :return: none\n '''\n if (title == ''):\n page_title = '算术���'\n else:\n page_title = title\n p_docx = Document() # 创建一个docx文档\n # 自定义正文格式\n p_docx.styles['Normal'].font.name = u'Arial' # 可换成word里面任意字体\n p_docx.styles['Normal'].paragraph_format.space_before = Pt(5)\n p_docx.styles['Normal'].paragraph_format.space_after = Pt(5)\n p_docx.styles['Normal'].font.size = Pt(self.p_content_siae)\n # 自定义标题格式\n p_docx.styles['Heading 1'].paragraph_format.space_before = Pt(12)\n p_docx.styles['Heading 1'].paragraph_format.space_after = Pt(12)\n p_docx.styles['Heading 1'].paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER # 段落文字居中设置\n\n # 判断需要用到的行数\n if (len(expressionList) % self.p_column == 0):\n rs = len(expressionList) // self.p_column\n else:\n rs = len(expressionList) // self.p_column + 1\n\n # print(rs)\n\n # 将口算题添加到docx表格中\n # 每页10行\n tableRows = 10\n for i in range(rs):\n if (i % tableRows == 0):\n if i > 0:\n # 添加分页符\n p_docx.add_page_break()\n # 添加页头内容\n self.addPageHeader(p_docx, page_title)\n # 添加算式到表格\n table = p_docx.add_table(rows=tableRows, cols=self.p_column)\n # 自定义行高\n for row in table.rows:\n row.height = Cm(1.8)\n table.style.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n table.style.font.color.rgb = RGBColor(54, 0, 0) # 颜色设置,这里是用RGB颜色\n table.style.font.size = Pt(self.p_content_siae) # 字体大小设置,和word里面的字号相对应\n table.rows.height_rule = WD_ROW_HEIGHT_RULE.EXACTLY\n k = 0 # 计数器\n\n # for i in range(rs):\n if i%tableRows >= 0:\n row_cells = table.rows[i%tableRows].cells\n for j in range(self.p_column):\n columnIndex = 2 * i + j\n if (columnIndex > len(expressionList) - 1):\n print('第{}行、第{}列,超出算式列表总数{}'.format(i, columnIndex, len(expressionList)))\n break\n else:\n row_cells[j].text = expressionList[columnIndex]\n k = k + 1\n\n p_docx.save('{}.docx'.format(docxname)) # 输出docx\n\n def addPageHeader(self, p_docx, page_title):\n # 新建标题并应用格式\n paragraph_title = p_docx.add_paragraph()\n paragraph_title.style = p_docx.styles['Heading 1']\n title = paragraph_title.add_run(page_title)\n title.font.color.rgb = RGBColor(54, 0, 0) # 颜色设置,这里是用RGB颜色\n title.font.size = Pt(self.p_title_size) # 字体大小设置,和word里面的字号相对应\n title.font.name = u'楷体'\n title = self.setZhFont(title, u'楷体')\n # 新建子标题,设置字体大小\n subTitle = p_docx.add_paragraph()\n subTitle.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER # 段落文字居中设置\n srun = subTitle.add_run(self.p_subtitle)\n srun.font.color.rgb = RGBColor(54, 0, 0) # 颜色设置,这里是用RGB颜色\n srun.font.size = Pt(self.p_subtitle_size) # 字体大小设置,和word里面的字号相对应\n srun.font.name = u'Arial'\n srun = self.setZhFont(srun, u'楷体')\n # 添加空行\n p_docx.add_paragraph()\n\n def setZhFont(self, run, zhFontName):\n \"\"\"\n 设置字体,含西文(数字)字体、中文字体\n :param run: 待设置的行\n :param fontName: 西文(数字)字体\n :param zhFontName: 中文字体\n :return:\n \"\"\"\n run._element.rPr.rFonts.set(qn('w:eastAsia'), zhFontName)\n return run\n\n\n def produce(self):\n '''\n 生成.docx文档\n :return:\n '''\n k = 1\n for l, t in zip(self.p_list, self.p_title):\n self.create_psmdocx(l, t, t + str(k))\n k = k + 1\n\n\n\nif __name__ == '__main__':\n l = [['1-17=', '3-4=', '13-6=', '15-5=', '2-4=', '15-9=', '12-13=', '15-12=', '14-16=', '4-11=', '18-16=', '12-14=',\n ],\n ['1-17=', '3-4=', '13-6=', '15-5=', '2-4=', '15-9=', '12-13=', '15-12=', '14-16=', '4-11=', '18-16=', '12-14=',\n '14-7=', '7-17=', '16-19=', ]]\n t = ['小学生口算题', '小学生口算题']\n pp = PrintPreview(l, t,\"姓名:__________ 日期:____月____日 时间:________ 对题:____道\" ,4)\n pp.produce()\n","sub_path":"PrintPreview.py","file_name":"PrintPreview.py","file_ext":"py","file_size_in_byte":7592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588949148","text":"from gi.repository import Gtk\nfrom functools import wraps\n\n\ndef scrollable(width=-1, height=-1):\n \"\"\"A function that takes optinal width and height and returns\n the scrollable decorator. -1 is the default GTK option for both\n width and height.\"\"\"\n\n def scrollable_decorator(func):\n \"\"\"Takes a function and returns the scroll_object_wrapper.\"\"\"\n\n @wraps(func)\n def scroll_object_wrapper(*args, **kwargs):\n \"\"\"Takes arguments and obtains the original object from\n func(*args, **kwargs). Creates a box and puts the original\n inside that box. Creates a scrolled window and puts the\n box inside it.\n \"\"\"\n\n box = Gtk.Box()\n original = func(*args, **kwargs)\n scrolled_box = Gtk.ScrolledWindow(None, None)\n scrolled_box.set_min_content_width(width)\n scrolled_box.set_min_content_height(height)\n scrolled_box.add(original)\n box.pack_start(scrolled_box, True, True, 0)\n return box\n\n return scroll_object_wrapper\n\n return scrollable_decorator\n","sub_path":"gui/gtk/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"37714143","text":"from sklearn.cluster import KMeans\nimport numpy as np\nfrom sklearn import metrics\nfrom scipy.spatial.distance import cdist\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\n\n\ndef plot_elbow(X):\n distortions = []\n K = range(1,10)\n for k in K:\n kmeanModel = KMeans(n_clusters=k).fit(X)\n kmeanModel.fit(X)\n distortions.append(sum(np.min(cdist(X, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])\n\n # Plot the elbow\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title('The Elbow Method showing the optimal k')\n plt.show()\n\n\n\nfin = open(\"../data1.txt\",\"r\")\n\n\nX = []\ny = []\n\nfor l in fin:\n X.append(l.split(\",\")[:-2])\n y.append(int(l.split(\",\")[-2]))\n\nX = np.array(X,dtype=np.float32)\n\nscaler = MinMaxScaler()\nscaler.fit(X)\n\n\n\nX = scaler.transform(X)\ndr_X = SelectKBest(chi2,k=2).fit_transform(X,y)\n\n#obtain elbow plot\nplot_elbow(dr_X)\n\n#pick three clusters, and view a few groupings\n\nkm = KMeans(n_clusters=2,random_state=0).fit(dr_X)\n","sub_path":"hw3/dataset1/drClustering/kMeansFS.py","file_name":"kMeansFS.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451275342","text":"pathname_test = \"c:\\HSEworkspace\\Programming\\Lesson 4\\/test.txt\"\n\nwith open(pathname_test, 'r', encoding='utf-8') as inputfile:\n text = inputfile.read()\n\nsentences = []\nlast_index = 0\nfor i in range(0, len(text)):\n if text[i] == '.' and text[i+1] == ' ':\n sentences.append(text[last_index: i+2])\n last_index = i + 2\n if text[i] == '.' and text[i+1].isupper():\n sentences.append(text[last_index:i+1])\n last_index = i +1\n if text[i] == '?':\n sentences.append(text[last_index: i+1])\n last_index = i + 1\n if text[i] == '!':\n sentences.append(text[last_index: i+1])\n last_index = i + 1\n\noutputfile = open(\"c:\\HSEworkspace\\Programming\\Lesson 4\\/separated_sentences.txt\", 'w', encoding='utf-8')\nfor s in sentences:\n outputfile.write(s)\n outputfile.write('\\n')\noutputfile.close()\n\n\nprint(text)\nprint(len(sentences))\n\n","sub_path":"python-start-module1/Lesson 4/Lesson4_anvanced_sentence_task.py","file_name":"Lesson4_anvanced_sentence_task.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"279434017","text":"from brian2 import *\nimport math\nimport numpy as np\n\nnumberGC = 100\ndefaultclock.dt = (0.001)*second\nrunFullExperiment = 1 # set to 1 if you want to run the whole thing. 0 to screw around elsewhere. \ngT = 0 # target gain. Should vary a bit depending on day of training.\npT = 0 # target phase shift.\nM0=0.25\nM1=0.25\nG0=1 # Set to 1.8 for excite mutants\nG1=1\nVe0 = 2.25\nVt0=1\nH=0.03\nL=1\nalpha=0.19\nalphaD=4.5*(1e-6)*1e3*(1/second)\nalphaPG=3.5*(1e-5)*1e3*(1/second)\nalphavm=5.6*(1e-6)*1e3*(1/second)\npie=math.pi\nsigma=0.02\nerrorDelay = 0*ms\nTauPG = 900*second\nT = 1.666*(second) # rate at which the platform rotates.\ntrialInt = 50*T\nnightInt = 1440*T\n\nupperPG=2.85\nlowerPG=0.85\nlowerVM=0\n\ngTphase = pie/2 # equations 20 and 21 differ only in this term. When gT goes negative, I will change it through this variable. \n\nWig = 2.5\nWpi = 0\nWigini = 2.5\nI0 = Wig*G0 - 0.85\nWpgini = 1\nWvmini = 1.19\ngibbi=0\n\n\n## make the neuron groups\n\nMF = NeuronGroup(1,'''M = M1*cos((((2*pie)/T)*t)-(pie/2))+M0 : 1''') \n# Cosine Function for the mossy fibers\n\nGC = NeuronGroup(numberGC,\n\t'''G = G1*cos((((2*pie)/T)*t)-(pie/2)-x) + G0 : 1\n\tx : 1\n\t''')\n\n# The GC array is consistent throughout. Only weights to PC change. \n\n# Some phase delays of the above function\n\t\nfor i in range(0, numberGC):\n\tGC.x[i] = (i/numberGC)*pie*2 + alpha*cos((i/numberGC)*pie*2)\n# makes it so that the delays are distributed evenly\n\n\nIC = NeuronGroup(1,\n\t'''\n\tGtot : 1\n\tI = (Wig/numberGC)*Gtot-I0: 1\n\tIini = (Wigini/numberGC)*Gtot-I0 : 1\n\t''') # Iini = I, basically always no matter what, and it is supposed to. \n\n\n# I is like P, except the weights never change. This is pretty key. \n\t\t\t\t\t\t\nPC = NeuronGroup(1,\n\t'''\n\tP1 : 1\n\tP2 : 1\n\tP1ini : 1\n\tP2ini : 1\n\tP=(P1/numberGC)-P2 : 1\n\tPini=(P1ini/numberGC)-P2ini : 1\n\tC : 1\n\tV : 1\n\t''') \n\t\n# The functions are defined by the synapses later. P is the purkinje cell activity, V is MVN activity, which is sent back o that it cen be used for the weight calculation.\n\nMVN = NeuronGroup(1, \n\t''' \n\tP : 1\n\tPini : 1\n\tA : 1\n\tAini : 1\n\tVE = A-P+Ve0 : 1\n\tVEini = Aini - Pini+Ve0 : 1\n\tVI : 1\n\tV = VE-VI : 1\n\tVini = VEini - VI : 1''')\n\nCF=NeuronGroup(1,\n\t'''\n\tV : 1\n\tVt : 1\n\tM : 1\n\tC=-L*(V-Vt)-H*(M-M0) : 1\n\t''')\n\nVT=NeuronGroup(1,\n\t'''\n\tVt=abs(gT)*M1*cos((2*pie*t/T)-(gTphase))+Vt0 : 1\n''')\n\n## make the synapses\n\n# Spg = Synapses(GC,PC,model='''P_post = Wpg*G_pre : 1 (summed)\n# \t\t\t\t\t\t\t\tWpg = x_pre : 1''')\n\nSig=Synapses(GC,IC,\n\t'''\n\tGtot_post=G_pre : 1 (summed)\n\t''')\n\nSip=Synapses(IC,PC,\n\t'''\n\tP2_post=Wpi*I_pre : 1 (summed)\n\tP2ini_post=Wpi*Iini_pre : 1 (summed)\n\t''')\n\nSpg = Synapses(GC,PC,\n\t'''\n\tP1_post = Wpg*G_pre : 1 (summed)\n\tP1ini_post = Wpgini*G_pre : 1 (summed)\n\tdWpg/dt = (gamma + tramma*randn())*(int((int((gamma + tramma*randn()) <0)*int(Wpg>lowerPG)+int((gamma+ tramma*randn())>0)*int(Wpg 0) : 1 (clock-driven)\n\tgamma = (-alphaPG*C_post)*G_pre+alphaD*(Wpgini-Wpg): 1\n\ttramma = sqrt(alphaPG)*sigma*G_pre : 1\n\t''')\n\t#\nSpg.connect()\nSpg.Wpg=Wpgini\n# sums granule cell activity into the purkinje cell, as weights are applied.\n\nSmv = Synapses(MF,MVN,\n\t'''\n\tVI_post=M_pre : 1 (summed)\n\tA_post=2*Wvm*(M_pre-M0) : 1 (summed)\n\tAini_post=2*Wvmini*(M_pre-M0) : 1 (summed)\n\ttheta = ((alphavm)*(M0-M_pre)*(P_post-Pini_post)) : 1\n\tdWvm/dt=theta*(1-int(theta<0)*int(Wvm\n\"\"\"\n\n\n","sub_path":"testingRetiredCode/finalCodeBeforeCleaning 9-11/exp1_imageMatching/spectral_2D_image_match.py","file_name":"spectral_2D_image_match.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155377994","text":"from __future__ import division\n\nimport math\n\ndef differenceInNumOfWord(sentence_1, sentence_2):\n return math.fabs(len(sentence_1)-len(sentence_2))\n\n# word ratio\ndef wordRatio(words, totalWords):\n wordCount = 0\n for word in words:\n wordCount += word[1]\n\n return wordCount/totalWords\n\n# significance ratio\ndef significanceRatio(sameSignificance, uniqueSignificance):\n if uniqueSignificance == 0:\n return float('inf')\n else:\n return sameSignificance/uniqueSignificance\n\n# ratio between unique words and same words\ndef sameOverUnique(same, unique):\n sameCount = 0\n uniqueCount = 0\n for word in same:\n sameCount += word[1]\n for word in unique:\n uniqueCount += word[1]\n\n if uniqueCount == 0:\n return float('inf')\n else:\n return sameCount/uniqueCount\n\n# return the sum of significance of all the same words\n# and the sum of significance of all the unique words\ndef calSignificance(sameWords, uniqueWords, dictionary):\n sumOfSameWords = dictionary.sumOfSignificance(sameWords)\n if len(uniqueWords) >= 2:\n additionalSignificance, uniqueWords = uniqueWordAnalizer(uniqueWords, dictionary.getDictionary())\n sumOfSameWords += additionalSignificance\n sumOfUniqueWords = dictionary.sumOfSignificance(uniqueWords)\n\n return sumOfSameWords, sumOfUniqueWords\n\n\n# if two or multiple unique words are actually similar words\n# average their significance and add the result to the sumOfSameWords\n# I only look for one-level of similarity\ndef uniqueWordAnalizer(uniqueWords, dictionary):\n significance = 0\n entry = uniqueWords[0]\n quit = False\n while entry != uniqueWords[len(uniqueWords)-1]:\n length = len(uniqueWords)\n found = False\n index = uniqueWords.index(entry)\n for i in range(length-index-1):\n # found one similar word\n # print len(uniqueWords), index, i\n if dictionary.has_key(entry[0]):\n if dictionary[entry[0]][2].has_key(uniqueWords[index+i+1][0]):\n word_1_significance = dictionary[entry[0]][0] * entry[1]\n word_2_significance = dictionary[uniqueWords[index+i+1][0]][0] * uniqueWords[index+i+1][1]\n totalAppearance = entry[1] + uniqueWords[index+i+1][1]\n significance += 0.8 * (word_1_significance + word_1_significance)/totalAppearance\n uniqueWords.pop(index)\n uniqueWords.pop(index+i)\n found = True\n if len(uniqueWords) >= 2:\n if index < len(uniqueWords):\n entry = uniqueWords[index]\n else:\n entry = uniqueWords[len(uniqueWords)-1]\n break\n else:\n quit = True\n break\n # exit the for loop meaning no similar words are found\n if not found:\n entry = uniqueWords[index+1]\n if len(uniqueWords) < 2 or quit == True:\n break\n\n return significance, uniqueWords\n","sub_path":"features_generator.py","file_name":"features_generator.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233691795","text":"#!/usr/bin/python3\n\nimport os\nimport time\nimport random\nfrom copy import deepcopy\nfrom typing import List, Tuple\n\nimport cv2\nimport PIL\nimport numpy as np\nfrom Augmentor import DataPipeline\n\nfrom keras.optimizers import Adam\nfrom keras.utils import multi_gpu_model, Sequence\n\nfrom .unet import unet\nfrom .utils.metric_utils import (\n dice_coef,\n dice_coef_loss,\n jacard_coef\n)\nfrom .utils.augmentor_utils import (\n GaussianNoiseAugmentor,\n InvertPartAugmentor,\n SaltPepperNoiseAugmentor\n)\nfrom .utils.img_processing_utils import (\n normalize_gt,\n normalize_in\n)\nfrom .utils.callback_utils import create_callbacks\n\n\nclass ParallelDataGenerator(Sequence):\n \"\"\"Generate images for training/validation/testing (parallel version).\n\n Parameters\n ----------\n fnames_in: List[str]\n list of input images\n fnames_gt: List[str]\n list of gt images\n batch_size: int\n batch size to generate augmentations on images\n augmentate: bool\n apply augmentate to batch of images\n\n \"\"\"\n def __init__(\n self,\n fnames_in: List[str],\n fnames_gt: List[str],\n batch_size: int,\n augmentate: bool\n ):\n self.fnames_in = deepcopy(fnames_in)\n self.fnames_gt = deepcopy(fnames_gt)\n self.batch_size = batch_size\n self.augmentate = augmentate\n self.idxs = np.array([i for i in range(len(self.fnames_in))])\n\n def __len__(self):\n return int(np.ceil(float(self.idxs.shape[0]) / float(self.batch_size)))\n\n def on_epoch_end(self):\n np.random.shuffle(self.idxs)\n\n def __apply_augmentation__(self, p: object) -> List[np.ndarray]:\n \"\"\"Apply augmentation on batch of images\"\"\"\n batch = []\n for i in range(0, len(p.augmentor_images)):\n images_to_return = [\n PIL.Image.fromarray(x) for x in p.augmentor_images[i]\n ]\n\n for operation in p.operations:\n r = round(random.uniform(0, 1), 1)\n if r <= operation.probability:\n images_to_return = operation.perform_operation(\n images_to_return\n )\n\n images_to_return = [np.asarray(x) for x in images_to_return]\n batch.append(images_to_return)\n return batch\n\n def augmentate_batch(\n self,\n imgs_in: List[np.ndarray],\n imgs_gt: List[np.ndarray]\n ) -> Tuple[List[np.ndarray], List[np.ndarray]]:\n \"\"\"Generate ordered augmented batch of images, using Augmentor\n\n Parameters\n ----------\n imgs_in: List[numpy.ndarray]\n list of input images as array\n imgs_gt: List[numpy.ndarray]\n list of gt image as array\n Returns\n -------\n Tuple[List[numpy.ndarray], List[numpy.ndarray]]\n List of input images after applying augmentation\n List of gt images after applying augmentation\n\n \"\"\"\n # Non-Linear transformations.\n imgs = [[imgs_in[i], imgs_gt[i]] for i in range(len(imgs_in))]\n p = DataPipeline(imgs)\n p.random_distortion(0.5, 6, 6, 4)\n # Linear transformations.\n # p.rotate(0.75, 15, 15)\n p.shear(0.75, 10.0, 10.0)\n p.zoom(0.75, 1.0, 1.2)\n p.skew(0.75, 0.75)\n imgs = self.__apply_augmentation__(p)\n imgs_in = [p[0] for p in imgs]\n imgs_gt = [p[1] for p in imgs]\n\n # Noise transformations.\n p = DataPipeline([[img] for img in imgs_in])\n gaussian_noise = GaussianNoiseAugmentor(0.25, 0, 10)\n p.add_operation(gaussian_noise)\n salt_pepper_noise = SaltPepperNoiseAugmentor(0.25, 0.005)\n p.add_operation(salt_pepper_noise)\n # Brightness transformation.\n p.random_brightness(0.75, 0.5, 1.5)\n p.random_contrast(0.75, 0.5, 1.5)\n # Colors invertion.\n invert = InvertPartAugmentor(0.25)\n p.add_operation(invert)\n p.invert(0.5)\n imgs_in = self.__apply_augmentation__(p)\n imgs_in = [p[0] for p in imgs_in]\n\n return imgs_in, imgs_gt\n\n def __getitem__(self, idx):\n \"\"\"Creates numpy arrays with images.\"\"\"\n start = idx * self.batch_size\n stop = start + self.batch_size\n if stop >= self.idxs.shape[0]:\n stop = self.idxs.shape[0]\n\n imgs_in = []\n imgs_gt = []\n for i in range(start, stop):\n imgs_in.append(\n cv2.imread(self.fnames_in[self.idxs[i]], cv2.IMREAD_GRAYSCALE)\n )\n imgs_gt.append(\n cv2.imread(self.fnames_gt[self.idxs[i]], cv2.IMREAD_GRAYSCALE)\n )\n\n # Applying augmentations.\n if self.augmentate:\n imgs_in, imgs_gt = self.augmentate_batch(imgs_in, imgs_gt)\n\n \"\"\"\n # Debug.\n for i in range(len(imgs_in)):\n cv2.imshow('in_' + str(i), imgs_in[i])\n cv2.imshow('gt_' + str(i), imgs_gt[i])\n cv2.waitKey(0)\n \"\"\"\n\n # Normalization.\n imgs_in = np.array([normalize_in(img) for img in imgs_in])\n imgs_in.shape = (\n imgs_in.shape[0],\n imgs_in.shape[1],\n imgs_in.shape[2],\n 1\n )\n imgs_gt = np.array([normalize_gt(img) for img in imgs_gt])\n imgs_gt.shape = (\n imgs_gt.shape[0],\n imgs_gt.shape[1],\n imgs_gt.shape[2],\n 1\n )\n\n return imgs_in, imgs_gt\n\n\ndef main(\n input: str = os.path.join(\".\", \"input\"),\n vis: str = os.path.join(\".\", \"vis\"),\n debug: str = os.path.join(\".\", \"train_logs\"),\n epochs: int = 1,\n batchsize: int = 32,\n augmentate: bool = True,\n train_split: int = 80,\n val_split: int = 10,\n test_split: int = 10,\n weights_path: str = os.path.join(\".\", \"bin_weights.hdf5\"),\n num_gpus: int = 1,\n extraprocesses: int = 0,\n queuesize: int = 10,\n):\n \"\"\"Train U-net with pairs of train and ground-truth images.\n\n Parameters\n ----------\n input: str, optional\n input dir with in and gt sub folders to train\n (default is os.path.join(\".\", \"input\")).\n vis: str, optional\n dir with image to use for train visualization\n (default is os.path.join(\".\", \"vis\")).\n debug: str, optional\n path to save training logs\n (default is os.path.join(\".\", \"train_logs\")).\n epochs: int, optional\n number of epocs to train robin (default is `1`).\n batchsize: int, optional\n batchsize to train robin (default is `32`).\n augmentate: bool, optional\n argumentate the original images for training robin\n (default is `True`)\n train_split: int, optional\n train dataset split percentage (default is `80`).\n val_split: int, optional\n validation dataset split percentage (default is `10`).\n test_split: int, optional\n train dataset split percentage (default is `10`).\n weights_path: str, optional\n path to save final weights\n (default is os.path.join(\".\", \"bin_weights.hdf5\")).\n num_gpus: int, optional\n number of gpus to use for training robin (default is `1`)\n extraprocesses: int, optional\n number of extraprocesses to use (default is `0`).\n queuesize: int, optional\n number of batches to generate in queue while training\n (default is `10`).\n\n Retunrs\n -------\n None\n\n Notes\n -----\n All train images should be in \"in\" directory.\n All ground-truth images should be in \"gt\" directory.\n\n Example\n -------\n robin.train.main(input, vis, logs_dir, 2, 4)\n\n \"\"\"\n assert epochs > 0\n assert batchsize > 0\n\n assert train_split >= 0\n assert val_split >= 0\n assert test_split >= 0\n\n assert num_gpus >= 1\n assert extraprocesses >= 0\n assert queuesize >= 0\n\n start_time = time.time()\n np.random.seed()\n\n # Creating data for training, validation and testing.\n fnames_in = [\n os.path.join(input, \"in\", str(i) + \"_in.png\")\n for i in range(len(os.listdir(os.path.join(input, \"in\"))))\n ]\n fnames_gt = [\n os.path.join(input, \"gt\", str(i) + \"_gt.png\")\n for i in range(len(os.listdir(os.path.join(input, \"gt\"))))\n ]\n assert len(fnames_in) == len(fnames_gt)\n n = len(fnames_in)\n\n train_start = 0\n\n train_stop = int(n * (train_split / 100))\n train_in = fnames_in[train_start:train_stop]\n train_gt = fnames_gt[train_start:train_stop]\n train_generator = ParallelDataGenerator(\n train_in,\n train_gt,\n batchsize,\n augmentate\n )\n\n validation_start = train_stop\n validation_stop = validation_start + int(n * (val_split / 100))\n validation_in = fnames_in[validation_start:validation_stop]\n validation_gt = fnames_gt[validation_start:validation_stop]\n validation_generator = ParallelDataGenerator(\n validation_in, validation_gt, batchsize, augmentate\n )\n\n test_start = validation_stop\n test_stop = n\n test_in = fnames_in[test_start:test_stop]\n test_gt = fnames_gt[test_start:test_stop]\n test_generator = ParallelDataGenerator(\n test_in,\n test_gt,\n batchsize,\n augmentate\n )\n\n # check if validation steps are more than batch size or not\n assert (validation_generator.__len__() >= batchsize)\n assert (test_generator.__len__() >= batchsize)\n\n # Creating model.\n original_model = unet()\n if num_gpus == 1:\n model = original_model\n model.compile(\n optimizer=Adam(lr=1e-4),\n loss=dice_coef_loss,\n metrics=[dice_coef, jacard_coef, \"accuracy\"],\n )\n model.summary()\n else:\n model = multi_gpu_model(original_model, gpus=num_gpus)\n model.compile(\n optimizer=Adam(lr=1e-4),\n loss=dice_coef_loss,\n metrics=[dice_coef, jacard_coef, \"accuracy\"],\n )\n model.summary()\n callbacks = create_callbacks(\n model,\n original_model,\n debug,\n num_gpus,\n augmentate,\n batchsize,\n vis,\n weights_path\n )\n\n # Running training, validation and testing.\n if extraprocesses == 0:\n model.fit_generator(\n generator=train_generator,\n steps_per_epoch=train_generator.__len__(),\n # Compatibility with old Keras versions.\n validation_data=validation_generator,\n validation_steps=validation_generator.__len__(),\n # Compatibility with old Keras versions.\n epochs=epochs,\n shuffle=True,\n callbacks=callbacks,\n use_multiprocessing=False,\n workers=0,\n max_queue_size=queuesize,\n verbose=1,\n )\n metrics = model.evaluate_generator(\n generator=test_generator,\n use_multiprocessing=False,\n workers=0,\n max_queue_size=queuesize,\n verbose=1,\n )\n else:\n model.fit_generator(\n generator=train_generator,\n steps_per_epoch=train_generator.__len__(),\n # Compatibility with old Keras versions.\n validation_data=validation_generator,\n validation_steps=validation_generator.__len__(),\n # Compatibility with old Keras versions.\n epochs=epochs,\n shuffle=True,\n callbacks=callbacks,\n use_multiprocessing=True,\n workers=extraprocesses,\n max_queue_size=queuesize,\n verbose=1,\n )\n metrics = model.evaluate_generator(\n generator=test_generator,\n use_multiprocessing=True,\n workers=extraprocesses,\n max_queue_size=queuesize,\n verbose=1,\n )\n\n print()\n print(\"total:\")\n print(\"test_loss: {0:.4f}\".format(metrics[0]))\n print(\"test_dice_coef: {0:.4f}\".format(metrics[1]))\n print(\"test_jacar_coef: {0:.4f}\".format(metrics[2]))\n print(\"test_accuracy: {0:.4f}\".format(metrics[3]))\n\n # Saving model.\n if debug != \"\":\n model.save_weights(weights_path)\n print(\"finished in {0:.2f} seconds\".format(time.time() - start_time))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"robin/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637261011","text":"import komand\nfrom .schema import UpdateSecurityGroupRuleDescriptionsEgressInput, UpdateSecurityGroupRuleDescriptionsEgressOutput\n\n# Custom imports below\nfrom komand_aws_ec2.util.common import AWSAction\n\n\nclass UpdateSecurityGroupRuleDescriptionsEgress(AWSAction):\n def __init__(self):\n super().__init__(\n name=\"update_security_group_rule_descriptions_egress\",\n description=\"[EC2-VPC only] Updates the description of an egress (outbound) security group rule\",\n input=UpdateSecurityGroupRuleDescriptionsEgressInput(),\n output=UpdateSecurityGroupRuleDescriptionsEgressOutput(),\n aws_service=\"ec2\",\n aws_command=\"update_security_group_rule_descriptions_egress\",\n pagination_helper=None,\n )\n","sub_path":"aws_ec2/komand_aws_ec2/actions/update_security_group_rule_descriptions_egress/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"129200797","text":"import numpy as np\nimport cv2\nimport time\nfrom pathlib import Path\n\nimg_path = str(Path(__file__).parent.parent / 'Images/cat_bw.jpg')\nimg = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\nimg_out = img.copy()\n\nheight = img.shape[0]\nwidth = img.shape[1]\n\nstart_time = time.time()\nfor i in np.arange(3, height - 3):\n for j in np.arange(3, width - 3):\n img_out[i, j] = np.amin(img[i-3:i+4, j-3:j+4])\n\nelapsed_time = time.time() - start_time\nprint(elapsed_time)\n\ncv2.imshow('image', img_out)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"MinMaxMedianFilter/MinFilter2.py","file_name":"MinFilter2.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113574275","text":"from time import sleep\ninfo = {}\ncadastro = []\nmedia = totm = 0\nwhile True:\n info['nome'] = (str(input('\\nNome: ')).capitalize())\n info['sexo'] = (str(input('Sexo [F/M]: ')).upper())\n while info['sexo'] not in 'FM' or info['sexo'].isdigit():\n info['sexo'] = (str(input('Por favor, apenas F ou M: ')).upper())\n n = input('Idade: ')\n while not n.isdigit():\n n = input('Por favor, digite a idade apenas em números: ')\n if n.isdigit():\n info['idade'] = int(n)\n media += info['idade']\n cadastro.append(info.copy())\n esc = str(input('\\nQuer continuar? ')).upper()\n while esc not in 'SN' or esc.isdigit():\n esc = str(input('\\nPor favor, apenas Sim ou Não: ')).upper()\n if esc == 'N':\n print('\\nProcessando', end='')\n sleep(0.5)\n print('.', end='')\n sleep(0.5)\n print('.', end='')\n sleep(0.5)\n print('.\\n')\n break\nmedia /= len(cadastro)\nprint(f'{\" RESULTADOS \":-^30}')\nprint(f'-> O grupo é formado por {len(cadastro)} pessoas')\nprint(f'-> A média de idade do grupo é {media:.2f} anos')\nprint(f'-> As mulheres cadastradas foram: ')\nfor p in cadastro:\n if p['sexo'] == 'F':\n totm += 1\n print(f' * {p[\"nome\"]}')\nif totm == 0:\n print(' * Nenhuma mulher cadastrada')\nprint(f'-> Lista das pessoas com idade acima da média de {media}: ')\nfor p in cadastro:\n if p['idade'] > media:\n for k, v in p.items():\n print(f'{k} = {v};', end=' ')\n print()\nprint('PROGRAMA FINALIZADO')\n","sub_path":"Mundo-03/ex94-pessoas_dicionario.py","file_name":"ex94-pessoas_dicionario.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567698929","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nSETTINGS_ROOT = os.path.abspath(os.path.dirname(__file__))\n\n\nSECRET_KEY = '(x-0x798!jx8hz(l$0%!ybiigh7f0sl&xwc&o=)z$x@!j64or='\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nALLOWED_HOSTS = []\n\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n # 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'easy_thumbnails',\n 'catalog'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n # 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nROOT_URLCONF = 'demo_catalog.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n # 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.static',\n 'django.template.context_processors.media'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'demo_catalog.wsgi.application'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': 'db-srv.vm.local:11211',\n 'TIMEOUT': 60*60*24*10\n }\n}\n\n\nTIME_ZONE = 'UTC'\nUSE_I18N = False\nUSE_L10N = False\nUSE_TZ = True\n\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\nSTATIC_ROOT = os.path.join(SETTINGS_ROOT, 'static/')\nMEDIA_ROOT = os.path.join(SETTINGS_ROOT, 'media/')\nSTATICFILES_DIRS = (\n os.path.join(SETTINGS_ROOT, 'static-dev/'),\n)\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'thumb': {'size': (100, 100), 'crop': True},\n },\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n # Outputs all SQL queries to stderr\n # 'django.db.backends': {\n # 'level': 'DEBUG',\n # 'handlers': ['console', 'django_log_handler'],\n # 'propagate': True,\n # },\n 'catalog.views': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n },\n 'catalog.tasks': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n },\n }\n}\n","sub_path":"demo_catalog/demo_catalog/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330790429","text":"\"\"\"Tests for certbot.compat.\"\"\"\nimport certbot.tests.util as test_util\nfrom certbot.compat import misc\nfrom certbot.compat import os\n\n\nclass OsReplaceTest(test_util.TempDirTestCase):\n \"\"\"Test to ensure consistent behavior of os_rename method\"\"\"\n\n def test_os_rename_to_existing_file(self):\n \"\"\"Ensure that os_rename will effectively rename src into dst for all platforms.\"\"\"\n src = os.path.join(self.tempdir, 'src')\n dst = os.path.join(self.tempdir, 'dst')\n open(src, 'w').close()\n open(dst, 'w').close()\n\n # On Windows, a direct call to os.rename will fail because dst already exists.\n misc.os_rename(src, dst)\n\n self.assertFalse(os.path.exists(src))\n self.assertTrue(os.path.exists(dst))\n","sub_path":"certbot/tests/compat/compat_test.py","file_name":"compat_test.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602202582","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport os\nimport logging\nfrom collections import OrderedDict\nimport sys\nimport pkgutil\nimport importlib\nimport inspect\nimport copy\nfrom ._basedriver import BaseDriver\nfrom qiskit_chemistry.preferences import Preferences\nfrom collections import namedtuple\nfrom qiskit_chemistry import QiskitChemistryError\nimport pkg_resources\n\nlogger = logging.getLogger(__name__)\n\n_NAMES_TO_EXCLUDE = ['configurationmanager']\n\n_FOLDERS_TO_EXCLUDE = ['__pycache__']\n\nRegisteredDriver = namedtuple(\n 'RegisteredDriver', ['name', 'cls', 'configuration'])\n\n\"\"\"Singleton configuration class.\"\"\"\n\n\nclass ConfigurationManager(object):\n\n __INSTANCE = None # Shared instance\n\n def __init__(self):\n \"\"\" Create singleton instance \"\"\"\n if ConfigurationManager.__INSTANCE is None:\n ConfigurationManager.__INSTANCE = ConfigurationManager.__ConfigurationManager()\n\n # Store instance reference as the only member in the handle\n self.__dict__['_ConfigurationManager__instance'] = ConfigurationManager.__INSTANCE\n\n def __getattr__(self, attr):\n \"\"\" Delegate access to implementation \"\"\"\n return getattr(self.__INSTANCE, attr)\n\n def __setattr__(self, attr, value):\n \"\"\" Delegate access to implementation \"\"\"\n return setattr(self.__INSTANCE, attr, value)\n\n class __ConfigurationManager(object):\n\n def __init__(self):\n self._discovered = False\n self._registration = OrderedDict()\n\n def register_driver(self, cls):\n \"\"\"\n Registers a driver class\n Args:\n cls (object): Driver class.\n Returns:\n name: driver name\n \"\"\"\n self._discover_on_demand()\n if not issubclass(cls, BaseDriver):\n raise QiskitChemistryError(\n 'Could not register class {} is not subclass of BaseDriver'.format(cls))\n\n return self._register_driver(cls)\n\n def _register_driver(self, cls):\n # Verify that the driver is not already registered.\n if cls in [driver.cls for driver in self._registration.values()]:\n raise QiskitChemistryError(\n 'Could not register class {} is already registered'.format(cls))\n\n # Verify that it has a minimal valid configuration.\n try:\n driver_name = cls.CONFIGURATION['name']\n except (LookupError, TypeError):\n raise QiskitChemistryError('Could not register driver: invalid configuration')\n\n # Verify that the driver is valid\n check_driver_valid = getattr(cls, 'check_driver_valid', None)\n if check_driver_valid is not None:\n try:\n check_driver_valid()\n except Exception as e:\n logger.debug(str(e))\n raise QiskitChemistryError('Could not register class {}. Name {} is not valid'.format(cls, driver_name)) from e\n\n if driver_name in self._registration:\n raise QiskitChemistryError('Could not register class {}. Name {} {} is already registered'.format(cls,\n driver_name,\n self._registration[driver_name].cls))\n\n # Append the driver to the `registered_classes` dict.\n self._registration[driver_name] = RegisteredDriver(\n driver_name, cls, copy.deepcopy(cls.CONFIGURATION))\n return driver_name\n\n def deregister_driver(self, driver_name):\n \"\"\"Remove driver from list of available drivers\n Args:\n driver_name (str): name of driver to unregister\n Raises:\n QiskitChemistryError if name is not registered.\n \"\"\"\n self._discover_on_demand()\n\n if driver_name not in self._registration:\n raise QiskitChemistryError('Could not deregister {} not registered'.format(driver_name))\n\n self._registration.pop(driver_name)\n\n def get_driver_class(self, driver_name):\n \"\"\"Return the class object for the named module.\n Args:\n driver_name (str): the module name\n Returns:\n Clas: class object for module\n Raises:\n QiskitChemistryError: if module is unavailable\n \"\"\"\n self._discover_on_demand()\n\n if driver_name not in self._registration:\n raise QiskitChemistryError('{} not registered'.format(driver_name))\n\n return self._registration[driver_name].cls\n\n def get_driver_configuration(self, driver_name):\n \"\"\"Return the configuration for the named module.\n Args:\n driver_name (str): the module name\n Returns:\n dict: configuration dict\n Raises:\n QiskitChemistryError: if module is unavailable\n \"\"\"\n self._discover_on_demand()\n\n if driver_name not in self._registration:\n raise QiskitChemistryError('{} not registered'.format(driver_name))\n\n return copy.deepcopy(self._registration[driver_name].configuration)\n\n def get_driver_instance(self, name):\n \"\"\"Return an instance for the name in configuration.\n Args:\n name (str): the name\n Returns:\n Object: module instance\n Raises:\n QiskitChemistryError: if module is unavailable\n \"\"\"\n cls = self.get_driver_class(name)\n try:\n return cls()\n except Exception as err:\n raise QiskitChemistryError('{} could not be instantiated: {}'.format(cls, err))\n\n def local_drivers(self):\n \"\"\"\n Accesses chemistry drivers names\n Returns:\n names: chemistry drivers names\n \"\"\"\n self._discover_on_demand()\n return [input.name for input in self._registration.values()]\n\n def refresh_drivers(self):\n \"\"\"\n Attempts to rediscover all driver modules\n \"\"\"\n self._discovered = False\n self._registration = OrderedDict()\n self._discover_local_drivers()\n self._discover_entry_point_chemistry_drivers()\n self._discover_preferences_drivers()\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"Found: drivers {} \".format(self.local_drivers()))\n\n def _discover_on_demand(self):\n \"\"\"\n Attempts to discover drivers modules, if not already discovered\n \"\"\"\n if not self._discovered:\n self._discovered = True\n self._registration = OrderedDict()\n self._discover_local_drivers()\n self._discover_entry_point_chemistry_drivers()\n self._discover_preferences_drivers()\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"Found: has drivers {} \".format(self.local_drivers()))\n\n def _discover_entry_point_chemistry_drivers(self):\n \"\"\"\n Discovers the chemistry driver modules defined by entry_points in setup\n and attempts to register them. Chem.Drivers modules should subclass BaseDriver Base class.\n \"\"\"\n for entry_point in pkg_resources.iter_entry_points('qiskit.chemistry.drivers'):\n try:\n ep = entry_point.load()\n _registered = False\n if issubclass(ep, BaseDriver):\n self._register_driver(ep)\n _registered = True\n # print(\"Registered entry point chemistry driver '{}' class '{}'\".format(entry_point, ep))\n logger.debug(\"Registered entry point chemistry driver '{}' class '{}'\".format(entry_point, ep))\n break\n\n if not _registered:\n # print(\"Unknown entry point chemistry driver '{}' class '{}'\".format(entry_point, ep))\n logger.debug(\"Unknown entry point chemistry driver '{}' class '{}'\".format(entry_point, ep))\n except Exception as e:\n # Ignore entry point that could not be initialized.\n # print(\"Failed to load entry point '{}' error {}\".format(entry_point, str(e)))\n logger.debug(\"Failed to load entry point '{}' error {}\".format(entry_point, str(e)))\n\n def _discover_preferences_drivers(self):\n \"\"\"\n Discovers the chemistry drivers on the directory and subdirectories of the preferences package\n and attempts to register them. Drivers modules should subclass BaseDriver Base class.\n \"\"\"\n preferences = Preferences()\n packages = preferences.get_packages(Preferences.PACKAGE_TYPE_DRIVERS, [])\n for package in packages:\n try:\n mod = importlib.import_module(package)\n if mod is not None:\n self._discover_local_drivers_in_dirs(os.path.dirname(mod.__file__),\n mod.__name__,\n names_to_exclude=[\n '__main__'],\n folders_to_exclude=['__pycache__'])\n else:\n # Ignore package that could not be initialized.\n logger.debug('Failed to import package {}'.format(package))\n except Exception as e:\n # Ignore package that could not be initialized.\n logger.debug(\n 'Failed to load package {} error {}'.format(package, str(e)))\n\n def _discover_local_drivers_in_dirs(self,\n directory,\n parentname,\n names_to_exclude=_NAMES_TO_EXCLUDE,\n folders_to_exclude=_FOLDERS_TO_EXCLUDE):\n for _, name, ispackage in pkgutil.iter_modules([directory]):\n if ispackage:\n continue\n\n # Iterate through the modules\n if name not in names_to_exclude: # skip those modules\n try:\n fullname = parentname + '.' + name\n modspec = importlib.util.find_spec(fullname)\n mod = importlib.util.module_from_spec(modspec)\n modspec.loader.exec_module(mod)\n for _, cls in inspect.getmembers(mod, inspect.isclass):\n # Iterate through the classes defined on the module.\n try:\n if cls.__module__ == modspec.name and issubclass(cls, BaseDriver):\n self._register_driver(cls)\n importlib.import_module(fullname)\n except Exception as e:\n # Ignore operator that could not be initialized.\n logger.debug('Failed to load {} error {}'.format(fullname, str(e)))\n except Exception as e:\n # Ignore operator that could not be initialized.\n logger.debug('Failed to load {} error {}'.format(fullname, str(e)))\n\n for item in os.listdir(directory):\n fullpath = os.path.join(directory, item)\n if item not in folders_to_exclude and not item.endswith('dSYM') and os.path.isdir(fullpath):\n self._discover_local_drivers_in_dirs(\n fullpath, parentname + '.' + item, names_to_exclude, folders_to_exclude)\n\n def _discover_local_drivers(self,\n directory=os.path.dirname(__file__),\n parentname=os.path.splitext(__name__)[0]):\n \"\"\"\n Discovers the chemistry drivers modules on the directory and subdirectories of the current module\n and attempts to register them. Driver modules should subclass BaseDriver Base class.\n Args:\n directory (str, optional): Directory to search for input modules. Defaults\n to the directory of this module.\n parentname (str, optional): Module parent name. Defaults to current directory name\n \"\"\"\n\n def _get_sys_path(directory):\n syspath = [os.path.abspath(directory)]\n for item in os.listdir(directory):\n fullpath = os.path.join(directory, item)\n if item != '__pycache__' and not item.endswith('dSYM') and os.path.isdir(fullpath):\n syspath += _get_sys_path(fullpath)\n\n return syspath\n\n syspath_save = sys.path\n sys.path = _get_sys_path(directory) + sys.path\n try:\n self._discover_local_drivers_in_dirs(directory, parentname)\n finally:\n sys.path = syspath_save\n","sub_path":"qiskit_chemistry/drivers/configurationmanager.py","file_name":"configurationmanager.py","file_ext":"py","file_size_in_byte":14227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490312155","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport logging\nimport math\nimport os\nimport random\nimport sys\nfrom configparser import ConfigParser\n\nclass Generator:\n \n config = None\n out_directory = os.path.dirname(os.path.realpath(__file__)) + \"/out/\"\n out_filename = \"data.csv\"\n\n num_rows = 1\n lower_bound = 0\n upper_bound = 1\n # number of rows between values that falls outside the min-max bounds\n # e.g., if rows_between_outliers equals 0.2 and num_rows equals 1000,\n # then there will be an outlier every 200 rows\n rows_between_outliers = 1\n\n # 0=no noise, i.e., outliers will be placed exactly where it is indicated by the rows_between_outliers parameter\n # 1=maximum noise\n # example: rows_between_outliers=100, noise_ratio=0.1\n # -> outliers will be placed at rows corresponding to 90-110, 190-210...\n noise_ratio = 0\n\n def __init__(self):\n \"\"\"\n initialize the application by loading properties from the configuration file\n \"\"\"\n # read configuration file name from command line argument\n if len(sys.argv) < 2:\n logging.info(\"Missing argument: configuratiojn file name.\")\n sys.exit(1)\n \n # instantiating the configuration parser\n self.config = ConfigParser()\n config_file_path = os.path.dirname(os.path.realpath(__file__)) +\"/configs/\"+ sys.argv[1]+\".ini\"\n self.config.read(config_file_path)\n\n invalid_values = [None, \"null\", \"None\", \"\"]\n\n # reading configuration properties from config.ini\n if self.config['general']['out_directory'] not in invalid_values:\n self.out_directory = self.config['general']['out_directory']\n\n if self.config['general']['out_filename'] not in invalid_values:\n self.out_filename = self.config['general']['out_filename']\n\n if self.config['generator']['num_rows'] not in invalid_values:\n self.num_rows = int(self.config['generator']['num_rows'])\n\n if self.config['generator']['lower_bound'] not in invalid_values:\n self.lower_bound = float(self.config['generator']['lower_bound'])\n\n if self.config['generator']['upper_bound'] not in invalid_values:\n self.upper_bound = float(self.config['generator']['upper_bound'])\n\n if self.config['generator']['upper_bound'] not in invalid_values:\n self.rows_between_outliers = float(self.config['generator']['rows_between_outliers'])\n\n if self.config['generator']['noise_ratio'] not in invalid_values:\n self.noise_ratio = float(self.config['generator']['noise_ratio'])\n\n logging.debug(\"configuration values: \")\n logging.debug(\"out_directory: {}\".format(self.out_directory))\n logging.debug(\"out_filename: {}\".format(self.out_filename))\n logging.debug(\"num_rows: {}\".format(self.num_rows))\n logging.debug(\"lower_bound: {}\".format(self.lower_bound))\n logging.debug(\"upper_bound: {}\".format(self.upper_bound))\n logging.debug(\"rows_between_outliers: {}\".format(self.rows_between_outliers))\n logging.debug(\"noise_ratio: {}\".format(self.noise_ratio))\n\n def clean(self):\n \"\"\"\n deletes previously generated files\n \"\"\"\n if os.path.exists(self.out_directory + self.out_filename):\n os.remove(self.out_directory + self.out_filename)\n else:\n pass\n\n def generate(self):\n \"\"\"\n generates the dataset\n \"\"\"\n with open(self.out_directory + self.out_filename, 'w') as csv_file:\n number_of_outliers = 0\n file_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\n file_writer.writerow(['identifier', 'value'])\n\n target_row = 0\n logging.debug(\"target_row: {}\".format(target_row))\n for i in range(self.num_rows):\n identifier = i\n # check if I have reached a row in which I have to put an outlier\n if i == target_row:\n number_of_outliers += 1\n value = random.uniform(self.upper_bound, 2 * self.upper_bound)\n # update target row based on noise value\n target_row = i + math.floor(self.rows_between_outliers + self.get_noise_value())\n logging.debug(\"target_row: {}\".format(target_row))\n else:\n value = random.uniform(self.lower_bound, self.upper_bound)\n\n file_writer.writerow([identifier, value])\n\n logging.info(\"number of outliers: {} ({}%)\"\n .format(str(number_of_outliers), str((number_of_outliers/self.num_rows)*100)))\n\n\n def get_noise_value(self):\n \"\"\"\n :return: + or - noise (calculated as rows_between_outliers*noise_ratio)\n \"\"\"\n sign = bool(int(random.randrange(0, 1, 1)))\n noise = random.uniform(0, self.rows_between_outliers * self.noise_ratio)\n return noise if sign else noise * -1\n\n\n# entry point\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n logging.info(\"initializing...\")\n g = Generator()\n\n logging.info(\"cleaning...\")\n g.clean()\n\n logging.info(\"generating...\")\n g.generate()\n","sub_path":"implementation/dataset-generator/dataset-generator.py","file_name":"dataset-generator.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478185341","text":"from codecs import open\nfrom os import path\n\nfrom setuptools import setup, find_packages\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='pyautodl-trackers',\n\n version='0.0.1',\n\n description='Containing some tracker files which may be useful for some users of pyautodl.',\n long_description=long_description,\n\n url='https://github.com/pyautodl/pyautodl-trackers',\n\n author='someotherusername',\n\n license='GPLv3',\n\n classifiers=[\n 'License :: OSI Approved :: GPLv3 License',\n 'Programming Language :: Python :: 3.5',\n ],\n\n keywords='pyautodl',\n\n packages=find_packages(exclude=['contrib', 'docs', 'tests*']),\n namespace_packages = ['pyautodl'],\n\n install_requires=[],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606447809","text":"import pandas\nimport sys\n\nfin1 = sys.argv[1]\nfin1_col = sys.argv[2]\nfin2 = sys.argv[3]\nfin2_col = sys.argv[4]\nfout = sys.argv[5]\n\ndf = pandas.DataFrame({ 'path1':pandas.read_csv(fin1)[fin1_col], \n 'path2':pandas.read_csv(fin2)[fin2_col] })\n\ndf.to_csv(fout, index=False)\n\n","sub_path":"Santa/src/backup/paste_tours.py","file_name":"paste_tours.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"111091592","text":"# Bartłomiej Janiszewski\nfrom wc_funkcje import ustaw_logger, wc\nimport log\nimport datetime\nimport argparse\n\nif __name__ == '__main__':\n start = datetime.datetime.now()\n\n parser = argparse.ArgumentParser(description='Prosty program do logów.')\n parser.add_argument('-l', '--loglevel', choices=log._nameToLevel, default='WARNING',\n help='Uruchomienia programu w trybie logging LEVEL')\n parser.add_argument('filename', help='Nazwa pliku, który będzie sprawdzany.')\n\n args = parser.parse_args()\n print(args)\n x = args.loglevel\n print(x)\n poziom_logowania = x\n\n nazwa_pliku = args.filename\n print(nazwa_pliku)\n wybrany_szablon = 'pelny'\n\n ustaw_logger(poziom_logowania)\n wynik = wc(nazwa_pliku, wybrany_szablon)\n\n print(wynik)\n\n czas_wykonania = datetime.datetime.now() - start\n log.debug(f'czas wykonywania programu: {czas_wykonania}')\n","sub_path":"Python - advanced/zajecia08/zadanie_domowe/zaddom08_etap2.py","file_name":"zaddom08_etap2.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413375288","text":"#!/home/yli11/.conda/envs/py2/bin/python\nfrom joblib import Parallel, delayed\n\nimport os\nimport sys\nimport uuid\nimport argparse\n\n\n\n\ndef my_args():\n\t\n\tmainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,description=\"bw operations\")\n\tmainParser.add_argument('-b1',help=\"bigwiggle file 1\",required=True)\t\n\tmainParser.add_argument('-b2',help=\"bigwiggle file 2\",required=True)\t\n\tmainParser.add_argument('-o',help=\"output name\",required=True)\t\n\tmainParser.add_argument('-op',help=\"operation: log2,ratio,subtract,add,mean,reciprocal_ratio,first,second, diff_mean_log2\",default=\"diff_mean_log2\")\t\n\tmainParser.add_argument('-pc',help=\"pseudocount\",default=1,type=float)\t\n\tmainParser.add_argument('-bs',help=\"bin size\",default=10,type=int)\t\n\n\t##------- add parameters above ---------------------\n\targs = mainParser.parse_args()\t\n\treturn args\n\n\ndef main():\n\n\targs = my_args()\n\taddon_string = str(uuid.uuid4()).split(\"-\")[-1]\n\tif args.op != \"diff_mean_log2\":\n\t\tcommand = f\"bigwigCompare -b1 {args.b1} -b2 {args.b2} --operation {args.op} --pseudocount {args.pc} -bs {args.bs} -p 8 -o {args.o}\"\n\t\tprint (command)\n\t\tos.system(command)\n\telse:\n\t\tdiff_out = addon_string+\".diff\"\n\t\tmean_out = addon_string+\".mean\"\n\t\tcommand1 = f\"bigwigCompare -b1 {args.b1} -b2 {args.b2} --operation subtract -bs {args.bs} -p 8 -o {diff_out}\"\n\t\tcommand2 = f\"bigwigCompare -b1 {args.b1} -b2 {args.b2} --operation mean -bs {args.bs} -p 8 -o {mean_out}\"\n\t\tParallel(n_jobs=2,verbose=10)(delayed(os.system)(m) for m in [command1,command2])\n\t\tcommand3 = f\"bigwigCompare -b1 {diff_out} -b2 {mean_out} --operation log2 --pseudocount {args.pc} -bs {args.bs} -p 8 -o {args.o}\"\n\t\t# command3 = f\"bigwigCompare -b1 {diff_out} -b2 {args.b2} --operation log2 --pseudocount {args.pc} -bs {args.bs} -p 8 -o {args.o}\"\n\t\tos.system(command3)\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n\n\n\n","sub_path":"bin/diff_bw.py","file_name":"diff_bw.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221824968","text":"# # 게임 개발 118p\n\nn, m = map(int, input().split())\nx, y, d = map(int, input().split())\nmap_list = [list(map(int, input().split())) for i in range(n)]\nresult_list = [[0] * m for _ in range(n)] # 들렀던 곳인지 확인하는 테스트\nresult_list[x][y]=1\n\ndx = [-1, 0, 1, 0] # 북동남서\ndy = [0, 1, 0, -1] # 북동남서\n\ndef turn_left():\n global d\n d -= 1\n if d == -1:\n d = 3\n\ncnt = 1\nrotate_time = 0\n\nwhile True:\n turn_left()\n nx = x + dx[d]\n ny = y + dy[d]\n\n if result_list[nx][ny] == 0 and map_list[nx][ny] == 0:\n result_list[nx][ny] = 1\n x = nx\n y = ny\n cnt += 1\n rotate_time = 0\n continue\n else:\n rotate_time += 1\n if rotate_time == 4:\n nx = x - dx[d]\n ny = y - dy[d]\n if map_list[nx][ny] == 0:\n x = nx\n y = ny\n else:\n break\n rotate_time = 0\nprint(cnt)\n\n\n\n\n\n# n, m = map(int, input().split())\n# x, y, d = map(int, input().split())\n# result_list = [[0] * n for i in range(n)] # 들렀던 곳인지 확인하는 테스트\n# map_list = [list(map(int, input().split())) for i in range(n)]\n# d[x][y] = 1 # 방문처리하기\n#\n# dx = [-1, 0, 1, 0] # 북동남서\n# dy = [0, 1, 0, -1] # 북동남서\n#\n#\n# def tunr_left():\n# global d\n# d -= 1\n# if d == -1: # 0은 북쪽,1은 서쪽, 2는 남, 3은 동쪽\n# d = 3\n\n# if map_list[next_row][next_row] != 1: # 왼쪽\n# d = 1\n# cnt += 1\n# y -= 1\n# result_list[next_row][next_col] = 1\n#\n# elif map_list[next_row][next_col] != 1: # 아래\n# d = 2\n# cnt+=1\n# x+=1\n# result_list[next_row][next_col] = 1\n# print(\"3c\")\n#\n# elif map_list[next_row][next_col] != 1: # 오른쪽\n# d = 3\n# cnt += 1\n# y += 1\n# result_list[next_row][next_col] = 1\n# print(\"4d\")\n#\n# elif map_list[next_row][next_col] != 1: # 위\n# d = 0\n# cnt += 1\n# x -= 1\n# result_list[next_row][next_col] = 1\n# print(\"1a\")\n","sub_path":"eboong/chapter4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14499505","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n#\n# 代码中的类名、方法名、参数名已经指定,请勿修改,直接返回方法规定的值即可\n# 向右翻转列表\n# @param head ListNode类 listnode 中的头元素\n# @param k int整型 向右翻转次数\n# @return ListNode类\n#\nclass Solution:\n def rotateRight(self , head , k ):\n # write code here\n nums = []\n p = head\n while p:\n nums.append(p.val)\n p = p.next\n new_list = nums[-k:] + nums[:-k]\n new_head = ListNode(nums[0])\n last = new_head\n for d in nums[1:]:\n tmp = ListNode(d)\n last.next = tmp\n last = tmp\n return new_head\n","sub_path":"好未来/翻转数组.py","file_name":"翻转数组.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180831961","text":"import numpy as np\nfrom skimage import filter\nfrom scipy import ndimage\nimport types\n\ndef get_sectioned_image(im):\n \"\"\"Sections image in proximity regions for points of interests\"\"\"\n\n d = ndimage.distance_transform_edt(im==0)\n k = np.array([[-1, 2, -1]])\n d2 = ndimage.convolve(d, k) + ndimage.convolve(d, k.T)\n d2 = ndimage.binary_dilation(d2 > d2.mean(), border_value=1) == 0\n labled, labels = ndimage.label(d2)\n\n return labled, labels\n\n\ndef get_iterative_threshold(im, filt=None):\n \"\"\"Iteratively refined threshold to stably section bg/object, needs initial\n guess. If not supplied becomes the four corner pixels\"\"\"\n\n if filt is None:\n filt = np.zeros(im.shape, dtype=np.bool)\n filt[0, 0] = 1\n filt[0, -1] = 1\n filt[-1, 0] = 1\n filt[-1, -1] = 1\n\n oldT = -1\n T = -2\n\n while oldT != T:\n\n oldT = T\n O = im[filt]\n B = im[filt == False]\n T = (O.sum() / np.float(O.size) + B.sum() / np.float(B.size)) / 2.0\n filt = im > T\n\n return T\n\n\ndef get_p_tile_threshold(im, p, comparison='greater'):\n \"\"\"Sets a threshold based on a priory knowledge of how large fraction\n of image should fullfill the threshold condition\"\"\"\n\n if comparison == 'greater':\n c = np.greater\n t = 256\n dt = -1\n else:\n c = np.less\n t = 0\n dt = 1\n\n p *= im.size\n\n while c(im, t).sum() < p:\n\n t += dt\n\n if abs((im < t).sum() - p) < abs((im < t - 1).sum() - p):\n \n return t\n\n else:\n\n return t - 1\n\ndef _get_context(c=8):\n\n im = np.zeros((3, 3))\n\n if c == 8:\n im[:,:] = 1\n im[1,1] = 0\n if c == 4:\n im[1,0] = 1\n im[0,1] = 1\n im[2,1] = 1\n im[1,2] = 1\n\n return im\n\n\nclass _F_Wrapper(object):\n\n def __init__(self, f, arg2):\n\n self.f = f\n self.arg2 = arg2\n\n def __call__(self, arg1):\n\n return self.f(arg1, self.arg2)\n\n\ndef get_hysteresis_segmentation(im, t1=None, t1_kwargs={},\n t2=None, t2_kwargs={}, context=8, comparison=None,\n origin=(1,1)):\n \"\"\"t1 and t2 are either functions or threshold values\"\"\"\n\n #Get context matrix\n if type(context) == types.IntType:\n context = _get_context(context)\n\n #10 is an arbitrary number large enough to make it different\n context[origin] = context.size * 10\n\n #Set t1 and t2 as functions if they where just values\n if comparison is None or type(comparsion) != types.FunctionType:\n\n if comparison == \"greater\":\n comparison = np.greater\n else:\n comparison = np.less\n\n if type(t1) == types.FunctionType:\n t1 = t1(im, **t1_kwargs)\n if type(t2) == types.FunctionType:\n t2 = t2(im, **t2_kwargs)\n\n t1_im = comparison(im, t1)\n t2_im = comparison(im, t2)\n t2_im[t1_im] = 0\n\n ret_im = np.zeros(t1_im.shape)\n\n while (ret_im-t1_im).any():\n\n ret_im[:,:] = t1_im[:,:]\n c_eval = ndimage.convolve(t1_im + context[origin]*t2_im, context,\n origin=origin, mode='constant', cval=0.0)\n c_eval %= context[origin]\n\n #Add points from 2nd threshold that were neighbours to t1-points\n t1_im[t2_im] = c_eval[t2_im] > 0\n\n return ret_im\n\n\ndef get_adaptive_threshold(im, threshold_filter=None, segments=60, \n sigma=None, *args, **kwargs):\n \"\"\"Gives a 2D surface of threshold based on smoothed local measures\"\"\"\n \n if threshold_filter is None:\n threshold_filter = filter.threshold_otsu\n if sigma is None:\n sigma = np.sqrt(im.size)/5\n\n if segments is None or segments == 5:\n #HACK\n T = np.zeros(im.shape)\n T[im.shape[0]/4, im.shape[1]/4] = 1\n T[im.shape[0]/4, im.shape[1]*3/4] = 1\n T[im.shape[0]*3/4, im.shape[1]/4] = 1\n T[im.shape[0]*3/4, im.shape[1]*3/4] = 1\n T[im.shape[0]/2, im.shape[1]/2] = 1\n else:\n p = 1 - np.float(segments)/im.size\n T = (np.random.random(im.shape) > p).astype(np.uint8)\n\n\n labled, labels = get_sectioned_image(T)\n\n for l in range(1, labels + 1):\n\n if (labled==l).sum() > 1:\n\n T[ndimage.binary_dilation(labled == l, iterations=4)] = \\\n threshold_filter(im[labled == l], *args, **kwargs)\n\n return ndimage.gaussian_filter(T, sigma=sigma)\n\n","sub_path":"dev/IA/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257489221","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom ._validators import util_source_version_validator\n\n\ndef load_arguments(self, _):\n\n with self.argument_context('util update') as c:\n c.argument('version', options_list=['--version', '-v'], help='Version (tag). Default: latest stable.',\n validator=util_source_version_validator)\n c.argument('prerelease', options_list=['--pre'], action='store_true',\n help='Deploy latest prerelease version.')\n\n with self.argument_context('util group delete') as c:\n c.argument('prefix', options_list=['--prefix', '-p'],\n help='Resource group name prefix (case insensitive).')\n c.argument('skip', options_list=['--skip', '-s'], nargs='*',\n help='Space-separated resource groups to skip.')\n\n with self.argument_context('util keyvault purge') as c:\n c.argument('skip', options_list=['--skip', '-s'], nargs='*',\n help='Space-separated keyvaults to skip.')\n","sub_path":"util/azext_util/_params.py","file_name":"_params.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362317514","text":"import sys\nsys.path.append('.') #get rid of this at some point with central test script or when package is built\n\nimport MSI.simulations.instruments.shock_tube as st\nimport MSI.cti_core.cti_processor as pr\nimport MSI.optimization.matrix_loader as ml\nimport MSI.optimization.opt_runner as opt\nimport MSI.simulations.absorbance.curve_superimpose as csp\nimport MSI.simulations.yaml_parser as yp\nimport cantera as ct\n#################################################################################\n# This first test includes only one observable and no absorbance \n################################################################################\ntest_p = pr.Processor('MSI/data/test_data/FFCM1.cti')\ntest_tube = st.shockTube(pressure=1.74,\n temperature=1880,\n observables=['OH'],\n kineticSens=1,\n physicalSens=1,\n conditions={'H2O': 0.013,'O2':.0099 ,'H':0.0000007,'Ar':0.9770993},\n initialTime=0,\n finalTime=0.001,\n thermalBoundary='Adiabatic',\n mechanicalBoundary='constant pressure',\n processor=test_p,\n save_timeHistories=1,\n save_physSensHistories=1)\n\ncsv_paths = ['MSI/data/test_data/hong_oh_1.csv']\nexp_data = test_tube.importExperimentalData(csv_paths)\n\ntest_tube.run() #set up original time history\n\nparser = yp.Parser()\n#exp1_loaded = parser.load_to_obj('MSI/data/test_data/Troe_6.yaml')\n#put in once opt_runner has load functionality\nint_ksens_exp_mapped= test_tube.map_and_interp_ksens()#ksens is wiped on rerun so int it before\ntest_tube.sensitivity_adjustment(temp_del = .01)\ntest_tube.sensitivity_adjustment(pres_del = .01)\ntest_tube.species_adjustment(.01) #do some sensitivity adjustments\n\nint_tp_psen_against_experimental = test_tube.interpolate_experimental([test_tube.interpolate_physical_sensitivities(index=1),\n test_tube.interpolate_physical_sensitivities(index=2)])\n\nint_spec_psen_against_experimental = test_tube.interpolate_experimental(pre_interpolated=test_tube.interpolate_species_sensitivities())\n\n#################################################################################\n# This Second test includes two observables and an absorbance file \n################################################################################\ntest_p2 = pr.Processor('MSI/data/test_data/FFCM1.cti')\ntest_tube2 = st.shockTube(pressure=1.672,\n temperature=1182,\n observables=['H2O','OH','HO2','H2O2'],\n kineticSens=1,\n physicalSens=1,\n conditions={'H2O2':0.002046 ,'H2O': 0.001113,'O2':0.000556,'Ar':0.996285},\n initialTime=0,\n finalTime=0.001,\n thermalBoundary='Adiabatic',\n mechanicalBoundary='constant pressure',\n processor=test_p,\n save_timeHistories=1,\n save_physSensHistories=1)\n\ncsv_paths2 = ['MSI/data/test_data/hong_h2o_4.csv','MSI/data/test_data/hong_oh_4.csv']\nexp_data2 = test_tube2.importExperimentalData(csv_paths2)\n\ntest_tube2.run() #set up original time history\nabs2_instance = csp.Absorb()\nabs2_loaded = parser.load_to_obj('MSI/data/test_data/Hong_4_abs.yaml')\nabs2_data = abs2_instance.superimpose_shock_tube(test_tube2,abs2_loaded,15.2,kinetic_sens=1)\n\nperturbed_coef2 = abs2_instance.perturb_abs_coef(.01,\n test_tube2,\n abs2_loaded,30,\n summed_data = abs2_data[0]) \n\nint_ksens_exp_mapped2= test_tube2.map_and_interp_ksens()#ksens is wiped on rerun so int it before\ntest_tube2.sensitivity_adjustment(temp_del = .01)\ntest_tube2.sensitivity_adjustment(pres_del = .01)\ntest_tube2.species_adjustment(.01) #do some sensitivity adjustments\n\nabs2_phys_sens = abs2_instance.absorb_phys_sensitivities(test_tube2,abs2_data[0],abs2_loaded,15.2,dk=.01)\n\nloaded_experimental_data2 = abs2_instance.import_experimental_data(['MSI/data/test_data/hong_abs_4.csv'])\n\ninterp_abs2_exp= abs2_instance.interpolate_experimental(test_tube2,loaded_experimental_data2,\n original_summed_absorption=abs2_data[0],\n abs_kinetic_sens = abs2_data[1],\n abs_phys_sens = abs2_phys_sens,\n abs_coef_sens = perturbed_coef2)\n\n\nint_tp_psen_against_experimental2 = test_tube2.interpolate_experimental([test_tube2.interpolate_physical_sensitivities(index=1),\n test_tube2.interpolate_physical_sensitivities(index=2)])\nint_spec_psen_against_experimental2 = test_tube2.interpolate_experimental(pre_interpolated=test_tube2.interpolate_species_sensitivities())\n\n\n ####################################\n# Stick the two experiments together #\n ####################################\nlist_of_interpolated_kinetic_sens = [int_ksens_exp_mapped,int_ksens_exp_mapped2]\nlist_of_interpolated_tp_sens = [int_tp_psen_against_experimental,int_tp_psen_against_experimental2]\nlist_of_interpolated_species_sens = [int_spec_psen_against_experimental,int_spec_psen_against_experimental2]\n#def build_single_exp_dict(self,exp_index:int,\n# simulation:sim.instruments.shock_tube.shockTube,\n# interpolated_kinetic_sens:dict,\n# interpolated_tp_sens:list,\n# interpolated_species_sens:list,\n# interpolated_absorbance:list=[]):\noptimization_instance = opt.Optimization_Utility() \nexp_1 = optimization_instance.build_single_exp_dict(1,test_tube,\n int_ksens_exp_mapped,\n int_tp_psen_against_experimental,\n int_spec_psen_against_experimental) #no absorbance in experiment 1\n \nexp_2 = optimization_instance.build_single_exp_dict(2,test_tube2,\n int_ksens_exp_mapped2,\n int_tp_psen_against_experimental2,\n int_spec_psen_against_experimental2,\n interpolated_absorbance=interp_abs2_exp) #absorbance in experiment 2\nprint(\"Experiments built successfully\")\n#print(exp_2['ksens']['A'][0].shape)\n#print(exp_2['species'][0].shape)\n#print(exp_2['species'][1].shape)\n#print(exp_2['species'][2].shape)\n\n\n\n ####################\n# Build the S matrix #\n ####################\nmloader = ml.OptMatrix()\nS = mloader.load_S([exp_1,exp_2])\n\nprint(S)\n","sub_path":"tests/matrix_loading_test.py","file_name":"matrix_loading_test.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52304869","text":"from mongoengine import context_managers\nfrom pyramid.view import view_defaults, view_config\n\nfrom stackcite.api import views, exceptions as exc\nfrom stackcite.users import auth, models, resources, schema\n\n\n@view_defaults(context=resources.ConfirmResource, renderer='json')\nclass ConfirmationViews(views.BaseView):\n\n @view_config(request_method='POST')\n @views.managed_view\n def create(self):\n data = self.request.json_body\n schm = schema.CreateConfirmationToken(strict=True)\n data = schm.load(data).data\n\n # Forbid creating new tokens for confirmed users\n user = models.User.objects.get(email=data['email'])\n if auth.USERS in user.groups:\n msg = 'User is already confirmed.'\n raise exc.APIForbidden(detail=msg)\n\n self.context.create(data)\n self.request.response.status_code = 201\n\n @view_config(request_method='PUT')\n @views.managed_view\n def update(self):\n data = self.request.json_body\n schm = schema.UpdateConfirmationToken(strict=True)\n data = schm.load(data).data\n conf_token = self.context.update(data)\n with context_managers.no_dereference(models.ConfirmToken):\n return {\n 'user': {\n 'id': str(conf_token.user.id)\n }\n }\n","sub_path":"stackcite/users/views/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595719475","text":"def geraGrafo():\n return [\n [0, 3],\n [1, 3, 5],\n [2, 4, 5],\n [3, 0, 1, 6],\n [4, 2, 8],\n [5, 1, 2, 7, 8],\n [6, 3, 7],\n [7, 5, 6],\n [8, 4, 5]\n ]\n\n\ndef largura(grafo, vertice):\n fila = [vertice]\n visitados = [vertice]\n while len(fila):\n n = fila.pop(0)\n for m in grafo[n]:\n if(m not in visitados):\n visitados.append(m)\n fila.append(m)\n return visitados\n\n\ndef profundidade(grafo, vertice, visitados):\n if vertice in visitados:\n return visitados\n visitados.append(vertice)\n for vizinho in grafo[vertice]:\n if vizinho not in visitados:\n visitados = profundidade(grafo, vizinho, visitados)\n return visitados\n\n\ndef main():\n grafo = geraGrafo()\n print(largura(grafo, 0), '\\n----------------------------------')\n print(profundidade(grafo, 0, []))\n\n\nmain()","sub_path":"Aula05/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298220107","text":"def getTimes(time, direction):\n final_times = [0 for _ in time]\n exit_list = []\n enter_list = []\n for index, value in enumerate(direction):\n if value == 1:\n exit_list.append(index)\n else:\n enter_list.append(index)\n enter_index = 0\n exit_index = 0\n enter_length = len(enter_list)\n exit_length = len(exit_list)\n last_time = -1\n last_direction = -1\n while (enter_index < enter_length) and (exit_index < exit_length):\n exit_time = time[exit_list[exit_index]]\n enter_time = time[enter_list[enter_index]]\n if exit_time <= last_time:\n exit_time = last_time + 1\n if enter_time <= last_time:\n enter_time = last_time + 1\n print(enter_time, exit_time, last_time)\n if enter_time < exit_time:\n final_times[enter_list[enter_index]] = enter_time\n last_time = enter_time\n last_direction = 0\n enter_index += 1\n elif exit_time < enter_time:\n final_times[exit_list[exit_index]] = exit_time\n last_time = exit_time\n last_direction = 1\n exit_index += 1\n elif ((enter_time - 1) == last_time) and (last_direction == 0):\n final_times[enter_list[enter_index]] = enter_time\n last_time = enter_time\n last_direction = 0\n enter_index += 1\n else:\n final_times[exit_list[exit_index]] = exit_time\n last_time = exit_time\n last_direction = 1\n exit_index += 1\n while (exit_index < exit_length):\n exit_time = time[exit_list[exit_index]]\n if exit_time <= last_time:\n exit_time = last_time + 1\n last_time = exit_time\n final_times[exit_list[exit_index]] = exit_time\n exit_index += 1\n while (enter_index < enter_length):\n enter_time = time[enter_list[enter_index]]\n if enter_time <= last_time:\n enter_time = last_time + 1\n last_time = enter_time\n final_times[enter_list[enter_index]] = enter_time\n enter_index += 1\n return final_times\n","sub_path":"assignments/week3/day4/Daniel_Zabari_HR_turnstile.py","file_name":"Daniel_Zabari_HR_turnstile.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35916531","text":"import argparse\nimport logging\n\nimport numpy as np\nfrom calamari_ocr.ocr.dataset import DataSetType\nfrom tfaip.base.data.pipeline.definitions import PipelineMode\nfrom tfaip.util.multiprocessing.parallelmap import tqdm_wrapper\n\nfrom calamari_ocr.ocr.dataset.datareader.factory import DataReaderFactory\nfrom calamari_ocr.utils import glob_all, split_all_ext\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", nargs=\"+\", required=True,\n help=\"List of all image files with corresponding gt.txt files\")\n parser.add_argument(\"--dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--line_height\", type=int, default=48,\n help=\"The line height\")\n parser.add_argument(\"--pad\", type=int, default=16,\n help=\"Padding (left right) of the line\")\n\n args = parser.parse_args()\n\n logger.info(\"Resolving files\")\n image_files = glob_all(args.files)\n gt_files = [split_all_ext(p)[0] + \".gt.txt\" for p in image_files]\n\n ds = DataReaderFactory.create_data_reader(\n args.dataset,\n PipelineMode.Training,\n images=image_files, texts=gt_files, non_existing_as_empty=True)\n\n logger.info(f\"Loading {len(image_files)} files\")\n images, texts, metas = list(zip(*map(lambda s: (s.inputs, s.targets, s.meta), tqdm_wrapper(ds.generate(), progress_bar=True, total=len(ds)))))\n statistics = {\n \"n_lines\": len(images),\n \"chars\": [len(c) for c in texts],\n \"widths\": [img.shape[1] / img.shape[0] * args.line_height + 2 * args.pad for img in images\n if img is not None and img.shape[0] > 0 and img.shape[1] > 0],\n \"total_line_width\": 0,\n \"char_counts\": {},\n }\n\n for image, text in zip(images, texts):\n for c in text:\n if c in statistics[\"char_counts\"]:\n statistics[\"char_counts\"][c] += 1\n else:\n statistics[\"char_counts\"][c] = 1\n\n statistics[\"av_line_width\"] = np.average(statistics[\"widths\"])\n statistics[\"max_line_width\"] = np.max(statistics[\"widths\"])\n statistics[\"min_line_width\"] = np.min(statistics[\"widths\"])\n statistics[\"total_line_width\"] = np.sum(statistics[\"widths\"])\n\n statistics[\"av_chars\"] = np.average(statistics[\"chars\"])\n statistics[\"max_chars\"] = np.max(statistics[\"chars\"])\n statistics[\"min_chars\"] = np.min(statistics[\"chars\"])\n statistics[\"total_chars\"] = np.sum(statistics[\"chars\"])\n\n statistics[\"av_px_per_char\"] = statistics[\"av_line_width\"] / statistics[\"av_chars\"]\n statistics[\"codec_size\"] = len(statistics[\"char_counts\"])\n\n del statistics[\"chars\"]\n del statistics[\"widths\"]\n\n logger.info(statistics)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"calamari_ocr/scripts/dataset_statistics.py","file_name":"dataset_statistics.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452043632","text":"#!/usr/bin/env python3\n#\n# Copyright 2020 IBM\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.IBM Confidential\n#\n\n\nimport os\n\nfrom multiprocessing import cpu_count\n\n\nTRUE = ('TRUE', 'True', 'true', '1')\n\n\nuse_ssl = True if os.getenv('ENABLE_SSL') in TRUE else False\nsettings = os.getenv('SETTINGS')\n\n\n# Gunicorn config variables\nworkers = int(os.getenv('GUNICORN_WORKER_NUM')) \\\n if os.getenv('GUNICORN_WORKER_NUM') and int(os.getenv('GUNICORN_WORKER_NUM')) > 0 \\\n else cpu_count() * 2 + 1\n# Gunicorn needs to store its temporary file in memory (e.g. /dev/shm)\nworker_tmp_dir = '/dev/shm'\n# Container schedulers typically expect logs to come out on stdout/stderr, thus gunicorn is configured to do so\nlog_file = '-'\nssl_version = 'TLSv1_2'\nbind = ':8080'\nca_certs = f'{settings}/ca.crt' if use_ssl else None\ncertfile = f'{settings}/server.crt' if use_ssl else None\nkeyfile = f'{settings}/server.key' if use_ssl else None\ntimeout = int(os.getenv('GUNICORN_TIMEOUT')) \\\n if os.getenv('GUNICORN_TIMEOUT') and int(os.getenv('GUNICORN_TIMEOUT')) > 0 \\\n else 30\n","sub_path":"ops-implementations/ads-ml-service/app/gunicorn.init.py","file_name":"gunicorn.init.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348731540","text":"\n# Exemplo de dicionaário com estoque e operações de venda\n# Página 128 do livro.\nvenda = []\nestoque = {\"tomate\": [1000, 2.30],\n \"alface\": [500, 0.45],\n \"batata\": [2001, 1.20],\n \"feijão\": [100, 1.50],\n \"arroz\": [200, 10.50],\n \"limão\": [1800, 2.10]}\n# o nome do produto é a chave do dicionario\n# e a lista consiste nos valores associados\n# uma lista por chaves\n# o 1º elemento da lista é a quant. disponivel,\n# o 2º é o preço do produto.\n\n\nprint(\"\\n __ SEJA BEM-VINDOS AO HORTIFRUT __\")\nprint(\"| PRODUTO QUANT PREÇO |\")\nfor chave, dados in estoque.items():\n print(f\"| {chave:10s}{dados[0]:12}{dados[1]:10.2f} |\")\nprint(\"|________ O QUE TEMOS HOJE ________|\")\n\nprint(\"O que você vai querer?\")\nprint(\"digite 'sair' para concluir\")\n\n\nwhile True:\n pedido = str(\n input(\"Digite o nome do produto: \"))\n if pedido == \"sair\":\n break\n else:\n if pedido not in estoque:\n print(\"\\nNão temos esse produto!, digite corretamente.\\n\")\n else:\n quantidadeVenda = int(input(\"Quantidade: \"))\n print()\n venda.append((pedido, quantidadeVenda))\n\n\ntotal = 0\nquantidade = 0\n\nprint(\"\\n ___________ NOTA FISCAL ___________\")\nfor operação in venda:\n produto, quantidade = operação\n preço = estoque[produto][1]\n custo = preço * quantidade\n print(\n f\"| {produto:10s}: {quantidade:3d} X {preço:6.2f} = {custo:6.2f} |\")\n estoque[str(produto)][0] -= quantidade\n total = total + custo\nprint(\"| |\")\nprint(f\"| Custo total: {total:20.2f} |\")\nprint(\"|___________________________________|\")\n\n\nprint(\"\\n ______ EM ESTOQUE ______\")\nprint(\" | PRODUTO QUANT |\")\nfor chave, dados in estoque.items():\n print(f\" | {chave:10s}{dados[0]:12} |\")\nprint(\" |________________________|\\n\")\n","sub_path":"Python_estoque_venda_dicionario.py","file_name":"Python_estoque_venda_dicionario.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447047241","text":"from flask import Flask, render_template, request\nimport pandas as pd\n# from bokeh.plotting.figure import\nfrom bokeh.embed import components\nfrom ml import training_data_preprocessing, model, predict\n\napp = Flask(__name__)\n\n# Load the Iris Data Set\n# iris_df = pd.read_csv(\"data/iris.data\",\n# names=[\"Sepal Length\", \"Sepal Width\", \"Petal Length\", \"Petal Width\", \"Species\"])\n# feature_names = iris_df.columns[0:-1].values.tolist()\n\n\n# Create the main plot\n# def create_figure(current_feature_name, bins):\n# p = Histogram(iris_df, current_feature_name, title=current_feature_name, color='Species',\n# bins=bins, legend='top_right', width=600, height=400)\n#\n# # Set the x axis label\n# p.xaxis.axis_label = current_feature_name\n#\n# # Set the y axis label\n# p.yaxis.axis_label = 'Count'\n# return p\n\n\n# Index page\n@app.route('/')\ndef index():\n # Determine the selected feature\n # current_feature_name = request.args.get(\"feature_name\")\n # if current_feature_name == None:\n # current_feature_name = \"Sepal Length\"\n #\n # # Create the plot\n # plot = create_figure(current_feature_name, 10)\n #\n # # Embed plot into HTML via Flask Render\n # script, div = components(plot)\n # return render_template(\"iris_index1.html\", script=script, div=div,\n # feature_names=feature_names, current_feature_name=current_feature_name)\n from bokeh.plotting import figure, output_file, show\n\n # x = [1, 2, 3, 4, 5]\n # y = [6, 7, 2, 4, 5]\n #\n # # output to static HTML file\n # # output_file(\"lines.html\")\n #\n # # create a new plot with a title and axis labels\n # p = figure(title=\"simple line example\", x_axis_label='x', y_axis_label='y')\n #\n # # add a line renderer with legend and line thickness\n # p.line(x, y, legend=\"Temp.\", line_width=2)\n # script, div = components(p)\n # Start of Bokeh\n X_df, y_df = training_data_preprocessing()\n # x = [1, 2, 3, 4, 5]\n # y = [6, 7, 2, 4, 5]\n p = figure(title=\"House Price\", x_axis_label='Feature', y_axis_label='Price')\n\n # add a line renderer with legend and line thickness\n p.scatter(X_df['OverallQual'], y_df, legend=\"Price\", line_width=.1)\n p.scatter([8], 250000, legend=\"Your House\", line_width=.1, fill_color=\"red\", size=10)\n script, div = components(p)\n\n p1 = figure(title=\"House Price\", x_axis_label='Feature', y_axis_label='Price')\n p1.scatter(X_df['OverallQual'], y_df, legend=\"Price\", line_width=.1)\n script1, div1 = components(p1)\n\n # End of Bokeh\n return render_template(\"iris_index1.html\", script=script, div=div, script1=script1, div1=div1)\n # feature_names=feature_names, current_feature_name=current_feature_name)\n\n# With debug=True, Flask server will auto-reload\n# when there are code changes\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n","sub_path":"24_API_KT/-Trash/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581359663","text":"# coding=UTF-8\nimport optparse\nimport socket\n\n'''\n为了从捕获我们的目标主机的应用标识,我们必须首先插入额外的验证代码到connScan函数中。\n一旦发现开放的端口,我们发送一个字符串数据到这个端口然后等待响应。收集这些响应并推断可能会得到运行在目标主机端口上的应用程序的一些信息\n'''\ndef connScan(tgtHost, tgtPort):\n try:\n connSkt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connSkt.connect((tgtHost, tgtPort)) # 产生一个到目标主机端口的连接\n connSkt.send(\"ViolentPython\\r\\n\")\n results = connSkt.recv(100)\n print('[+]%d/tcp\topen' % tgtPort)\n print('[+] ' + str(results))\n connSkt.close()\n except:\n print('[-]%d/tcp\tclosed' % tgtPort)\n\n\ndef portScan(tgtHost, tgtPorts):\n try:\n tgtIP = socket.gethostbyname(tgtHost) # 通过目标主机名得到目标的网络IP地址\n except:\n print(\"[-]\tCannot\tresolve\t'%s':\tUnknown\thost\" % tgtHost)\n return\n try:\n '''\n # 这个函数传入一个IP地址将返回一个包含三个元素的元组,分别是\n 给定地址的主要的主机名、同一IP地址的可选的主机名的一个列表、关于同一主机的同一接口的其它IP地址的一个列表(列表可能都是空的)\n '''\n tgtName = socket.gethostbyaddr(tgtIP)\n print('\\n[+]\tScan\tResults\tfor:\t' + tgtName[0])\n except:\n print('\\n[+]\tScan\tResults\tfor:\t' + tgtIP)\n socket.setdefaulttimeout(1)\n for tgtPort in tgtPorts:\n print('Scanning\tport\t' + str(tgtPort))\n connScan(tgtHost, int(tgtPort))\n\n\ndef main():\n # optparse标准库来解析命令行选项\n parser = optparse.OptionParser('usage %prog –H -p ') # 创建一个选项分析器\n parser.add_option('-H', dest='tgtHost', type='string', help='specify target host') # 指定命令选项\n parser.add_option('-p', dest='tgtPort', type='int', help='specify target port')\n (options, args) = parser.parse_args() # 获取我们定义的选项和参数\n tgtHost = options.tgtHost\n tgtPort = options.tgtPort\n args.append(tgtPort)\n if (tgtHost == None) | (tgtPort == None):\n print('[-] You must specify a target host and port[s]!')\n portScan(tgtHost, args)\n\n# 执行 python port_scan_0.py -H 192.168.xx.xx -p 21 22 80 3306\n\nif __name__ == '__main__':\n main()\n","sub_path":"port_scan_0.py","file_name":"port_scan_0.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"117023821","text":"from flask import Blueprint, jsonify, request\nfrom app.models import Meet, WeightClass, AgeClass, Federation\nfrom app import db\n\napi = Blueprint('api', __name__)\n\n\n# List all meets\n@api.route('/meets/', methods=['GET'])\ndef fetch_meets():\n meets = Meet.query.all()\n return jsonify({'meets': [m.to_dict() for m in meets]})\n\n\n# Get meets created by a given user id\n@api.route('/meets/', methods=['GET'])\ndef meets_by_owner(uid):\n meets = Meet.query.filter_by(user_id=uid)\n return jsonify({'meets': [m.to_dict() for m in meets]})\n\n\n# Creates a new meet and returns it\n@api.route('/meets/', methods=['POST'])\ndef create_meet():\n data = request.get_json()\n meet = Meet(title=data.get('title'))\n meet.occurred = data.get('occurred')\n # Once I do sessions, this will be the logged in user\n meet.user_id = 1\n db.session.add(meet)\n db.session.commit()\n return jsonify(meet.to_dict()), 201\n\n\n# Returns a meet by a given id\n@api.route('/meet/view//', methods=['GET'])\ndef fetch_survey(id):\n meet = Meet.query.get(id)\n return jsonify({'meet': meet.to_dict()})\n\n\n# Updates a meet, then returns it\n@api.route('/meet//', methods=['PUT'])\ndef update_survey(id):\n data = request.get_json()\n meet = Meet.query.get(id)\n if meet:\n # Update it\n meet.title = data.get('title', meet.title)\n meet.occurred = data.get('occurred', meet.occurred)\n db.session.commit()\n meet = Meet.query.get(data['id'])\n return jsonify(meet.to_dict()), 201\n\n\n# Returns a list of weightclasses for the federation\n# a given meet is in\n@api.route('/weightclasses//', methods=['GET'])\ndef get_weightclasses(meetid):\n meet = Meet.query.get(meetid)\n weightclasses = WeightClass.query.filter_by(federationid=meet.federationid)\n return jsonify({'weightclasses': [w.to_dict() for w in weightclasses]})\n\n\n# Returns a list of ageclasses for the federation\n# a given meet is in\n@api.route('/ageclasses//', methods=['GET'])\ndef get_ageclasses(meetid):\n meet = Meet.query.get(meetid)\n ageclasses = AgeClass.query.filter_by(federationid=meet.federationid)\n return jsonify({'ageclasses': [a.to_dict() for a in ageclasses]})\n","sub_path":"app/api_endpoints.py","file_name":"api_endpoints.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500103238","text":"# -*- coding: utf-8 -*-\n# ==============================================================================\n# MIT License\n#\n# Copyright (c) 2019 Albert Moky\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\n\"\"\"\n Request Handler\n ~~~~~~~~~~~~~~~\n\n Handler for each connection\n\"\"\"\n\nimport hashlib\nimport json\nimport struct\nfrom socketserver import BaseRequestHandler\nfrom typing import Optional\n\nfrom dimp import User\nfrom dimp import InstantMessage, ReliableMessage\nfrom dimsdk import NetMsgHead, NetMsg, CompletionHandler\nfrom dimsdk import MessengerDelegate\n\nfrom libs.common import Log, base64_encode\nfrom libs.server import Session\nfrom libs.server import ServerMessenger\nfrom libs.server import HandshakeDelegate\n\nfrom .config import g_database, g_facebook, g_keystore, g_session_server\nfrom .config import g_dispatcher, g_receptionist, g_monitor\nfrom .config import current_station, station_name, chat_bot\n\n\nclass RequestHandler(BaseRequestHandler, MessengerDelegate, HandshakeDelegate):\n\n def __init__(self, request, client_address, server):\n super().__init__(request=request, client_address=client_address, server=server)\n # messenger\n self.__messenger: ServerMessenger = None\n # handlers with Protocol\n self.process_package = None\n self.push_data = None\n\n def info(self, msg: str):\n Log.info('%s >\\t%s' % (self.__class__.__name__, msg))\n\n def error(self, msg: str):\n Log.error('%s >\\t%s' % (self.__class__.__name__, msg))\n\n @property\n def chat_bots(self) -> list:\n bots = []\n # Tuling\n tuling = chat_bot('tuling')\n if tuling is not None:\n bots.append(tuling)\n # XiaoI\n xiaoi = chat_bot('xiaoi')\n if xiaoi is not None:\n bots.append(xiaoi)\n return bots\n\n @property\n def messenger(self) -> ServerMessenger:\n if self.__messenger is None:\n m = ServerMessenger()\n m.barrack = g_facebook\n m.key_cache = g_keystore\n m.dispatcher = g_dispatcher\n m.delegate = self\n # set context\n m.context['database'] = g_database\n m.context['session_server'] = g_session_server\n m.context['receptionist'] = g_receptionist\n m.context['bots'] = self.chat_bots\n m.context['handshake_delegate'] = self\n m.context['remote_address'] = self.client_address\n self.__messenger = m\n return self.__messenger\n\n @property\n def remote_user(self) -> Optional[User]:\n if self.__messenger is not None:\n return self.__messenger.remote_user\n\n #\n #\n #\n def setup(self):\n self.__messenger: ServerMessenger = None\n self.process_package = None\n self.push_data = None\n address = self.client_address\n self.info('set up with %s [%s]' % (address, station_name))\n g_session_server.set_handler(client_address=address, request_handler=self)\n g_monitor.report(message='Client connected %s [%s]' % (address, station_name))\n\n def finish(self):\n address = self.client_address\n user = self.remote_user\n if user is None:\n g_monitor.report(message='Client disconnected %s [%s]' % (address, station_name))\n else:\n nickname = g_facebook.nickname(identifier=user.identifier)\n session = g_session_server.get(identifier=user.identifier, client_address=address)\n if session is None:\n self.error('user %s not login yet %s %s' % (user, address, station_name))\n else:\n g_monitor.report(message='User %s logged out %s [%s]' % (nickname, address, station_name))\n # clear current session\n g_session_server.remove(session=session)\n # remove request handler fro session handler\n g_session_server.clear_handler(client_address=address)\n self.__messenger = None\n self.info('finish with %s %s' % (address, user))\n\n \"\"\"\n DIM Request Handler\n \"\"\"\n\n def handle(self):\n self.info('client connected (%s, %s)' % self.client_address)\n data = b''\n while current_station.running:\n # receive all data\n incomplete_length = len(data)\n data = self.receive()\n if len(data) == incomplete_length:\n self.info('no more data, exit (%d, %s)' % (incomplete_length, self.client_address))\n break\n\n # check protocol\n while self.process_package is None:\n # (Protocol A) Web socket?\n if data.find(b'Sec-WebSocket-Key') > 0:\n self.process_package = self.process_ws_handshake\n self.push_data = self.push_ws_data\n break\n\n # (Protocol B) Tencent mars?\n try:\n head = NetMsgHead(data=data)\n if head.version == 200:\n # OK, it seems be a mars package!\n self.process_package = self.process_mars_package\n self.push_data = self.push_mars_data\n break\n except ValueError:\n # self.error('not mars message pack: %s' % error)\n pass\n\n # (Protocol C) raw data (JSON in line)?\n if data.startswith(b'{\"') and data.find(b'\\0') < 0:\n self.process_package = self.process_raw_package\n self.push_data = self.push_raw_data\n break\n\n # unknown protocol\n data = b''\n # raise AssertionError('unknown protocol')\n break\n if self.process_package is None:\n continue\n\n # process package(s) one by one\n # the received data packages maybe spliced,\n # if the message data was wrap by other transfer protocol,\n # use the right split char(s) to split it\n data = self.process_package(data)\n\n #\n # Protocol: WebSocket\n #\n ws_magic = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'\n ws_prefix = b'HTTP/1.1 101 Switching Protocol\\r\\n' \\\n b'Server: DIM-Station\\r\\n' \\\n b'Upgrade: websocket\\r\\n' \\\n b'Connection: Upgrade\\r\\n' \\\n b'WebSocket-Protocol: dimchat\\r\\n' \\\n b'Sec-WebSocket-Accept: '\n ws_suffix = b'\\r\\n\\r\\n'\n\n def process_ws_handshake(self, pack: bytes):\n pos1 = pack.find(b'Sec-WebSocket-Key:')\n pos1 += len('Sec-WebSocket-Key:')\n pos2 = pack.find(b'\\r\\n', pos1)\n key = pack[pos1:pos2].strip()\n sec = hashlib.sha1(key + self.ws_magic).digest()\n sec = base64_encode(sec)\n res = self.ws_prefix + bytes(sec, 'UTF-8') + self.ws_suffix\n self.send(res)\n self.process_package = self.process_ws_package\n return b''\n\n def process_ws_package(self, pack: bytes):\n msg_len = pack[1] & 127\n if msg_len == 126:\n mask = pack[4:8]\n content = pack[8:]\n elif msg_len == 127:\n mask = pack[10:14]\n content = pack[14:]\n else:\n mask = pack[2:6]\n content = pack[6:]\n data = ''\n for i, d in enumerate(content):\n data += chr(d ^ mask[i % 4])\n res = self.received_package(bytes(data, 'UTF-8'))\n self.push_ws_data(res)\n return b''\n\n def push_ws_data(self, body: bytes) -> bool:\n head = struct.pack('B', 129)\n msg_len = len(body)\n if msg_len < 126:\n head += struct.pack('B', msg_len)\n elif msg_len <= (2 ** 16 - 1):\n head += struct.pack('!BH', 126, msg_len)\n elif msg_len <= (2 ** 64 - 1):\n head += struct.pack('!BQ', 127, msg_len)\n else:\n raise ValueError('message is too long: %d' % msg_len)\n return self.send(head + body)\n\n #\n # Protocol: Tencent mars\n #\n def process_mars_package(self, pack: bytes):\n mars = NetMsg(pack)\n head = mars.head\n # check completion\n mars_len = head.head_length + head.body_length\n pack_len = len(pack)\n if mars_len > pack_len:\n # partially data, keep it for next loop\n return pack\n # cut sticky packages\n remaining = pack[mars_len:]\n pack = pack[:mars_len]\n if head.cmd == 3:\n # TODO: handle SEND_MSG request\n if head.body_length == 0:\n raise ValueError('messages not found')\n body = self.received_package(mars.body)\n res = NetMsg(cmd=head.cmd, seq=head.seq, body=body)\n elif head.cmd == 6:\n # TODO: handle NOOP request\n self.info('receive NOOP package, cmd=%d, seq=%d, package: %s' % (head.cmd, head.seq, pack))\n res = pack\n else:\n # TODO: handle Unknown request\n self.error('receive unknown package, cmd=%d, seq=%d, package: %s' % (head.cmd, head.seq, pack))\n res = b''\n self.send(res)\n # return the remaining incomplete package\n return remaining\n\n def push_mars_data(self, body: bytes) -> bool:\n # kPushMessageCmdId = 10001\n # PUSH_DATA_TASK_ID = 0\n data = NetMsg(cmd=10001, seq=0, body=body)\n return self.send(data)\n\n #\n # Protocol: raw data (JSON string)\n #\n def process_raw_package(self, pack: bytes):\n pack_len = len(pack)\n pos = 0\n # skip leading empty packages\n while pack[pos] == b'\\n' or pack[pos] == b' ':\n pos += 1\n if pos == pack_len:\n # NOOP: heartbeat package\n self.info('respond : %s' % pack)\n self.send(b'\\n')\n return b''\n # check whether contain incomplete message\n pos = pack.rfind(b'\\n')\n if pos < 0:\n return pack\n # maybe more than one message in a time\n res = self.received_package(pack[:pos])\n self.send(res)\n # return the remaining incomplete package\n return pack[pos+1:]\n\n def push_raw_data(self, body: bytes) -> bool:\n data = body + b'\\n'\n return self.send(data=data)\n\n def push_message(self, msg: ReliableMessage) -> bool:\n data = json.dumps(msg)\n body = data.encode('utf-8')\n return self.push_data(body=body)\n\n #\n # receive message(s)\n #\n def received_package(self, pack: bytes) -> Optional[bytes]:\n lines = pack.splitlines()\n body = b''\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n self.info('ignore empty message')\n continue\n try:\n res = self.messenger.received_package(data=line)\n if res is None:\n # station MUST respond something to client request\n res = b''\n else:\n res = res + b'\\n'\n except Exception as error:\n self.error('parse message failed: %s' % error)\n # from dimsdk import TextContent\n # return TextContent.new(text='parse message failed: %s' % error)\n res = b''\n body = body + res\n # all responses in one package\n return body\n\n #\n # Socket IO\n #\n def receive(self) -> bytes:\n data = b''\n while True:\n try:\n part = self.request.recv(1024)\n except IOError as error:\n self.error('failed to receive data %s' % error)\n part = None\n if part is None:\n break\n data += part\n if len(part) < 1024:\n break\n return data\n\n def send(self, data: bytes) -> bool:\n try:\n self.request.sendall(data)\n return True\n except IOError as error:\n self.error('failed to send data %s' % error)\n return False\n\n #\n # MessengerDelegate\n #\n def send_package(self, data: bytes, handler: CompletionHandler) -> bool:\n if self.push_data(body=data):\n if handler is not None:\n handler.success()\n return True\n else:\n if handler is not None:\n error = IOError('MessengerDelegate error: failed to send data package')\n handler.failed(error=error)\n return False\n\n def upload_data(self, data: bytes, msg: InstantMessage) -> str:\n # upload encrypted file data\n pass\n\n def download_data(self, url: str, msg: InstantMessage) -> Optional[bytes]:\n # download encrypted file data\n pass\n\n #\n # HandshakeDelegate\n #\n def handshake_accepted(self, session: Session):\n sender = session.identifier\n session_key = session.session_key\n client_address = session.client_address\n user = g_facebook.user(identifier=sender)\n self.messenger.remote_user = user\n self.info('handshake accepted %s %s %s, %s' % (user.name, client_address, sender, session_key))\n g_monitor.report(message='User %s logged in %s %s' % (user.name, client_address, sender))\n # add the new guest for checking offline messages\n g_receptionist.add_guest(identifier=sender)\n\n def handshake_success(self):\n # TODO: broadcast 'login'\n pass\n","sub_path":"station/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":14596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335345175","text":"################################################################################\n# reads emodel files and return a data frame each of entity embedding vectors\n################################################################################\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\n\n# SET YOUR TARGET DATASET:\ninput = \"./norange/MUSHv2_extracted_norange_withPaths.csv\" # only for MUSHv2 now\n\ndf = pd.read_csv(input, index_col=0)\n\n# excel for labelled data:\nlabels = \"./norange/Mushroom_labels.csv\"\n\nlabel_df = pd.read_csv(labels)\n\n################################################################################\n############################# READ FILES ##############################\n################################################################################\n\nfor index, row in df.iterrows():\n\n filePath = row[\"filePath\"] # eg. \"./BPA/model-H1A/BPA...\"\n\n with open(filePath) as f:\n content = f.readlines()\n content = [x.strip() for x in content] # remove white space (needed)\n\n ### find position of entity and relation embeddings\n entityLine = [s for s in enumerate(content) if \"[entity.embeddings]\" in s][0][0]\n relationLine = [s for s in enumerate(content) if \"[relation.embeddings]\" in s][0][0]\n\n ### get entity chunk\n entityChunk = content[entityLine:relationLine]\n\n################################################################################\n######################### SAMPLE EMBEDDINGS ##########################\n################################################################################\n\n ### useful data from entityChunk\n # split chunk of text into distinct values\n entitySplitted = map(lambda embed: embed.split(\"\\t\"), entityChunk[1:])\n # make table\n entityTable = pd.DataFrame(entitySplitted) # one way to remove first column (relation name)\n\n sampleTable = entityTable[entityTable.iloc[:,0].str.contains(\"Sample\")]\n for index2, row2 in sampleTable.iterrows():\n row2[0] = row2[0].split(\":\")[1]\n\n # construct feature names\n features = list(range(1, len(sampleTable.columns)))\n for i, feature in enumerate(features):\n features[i] = \"embed\" + str(feature)\n colnames = [\"sample\"] + features\n\n sampleTable.columns = colnames\n\n # sort samples\n sampleTable = sampleTable.sort_values(\"sample\") # avoid inplace copy issues\n sampleTable = sampleTable.reset_index(drop=True) # avoid inplace copy issues\n\n # cast types\n for col in sampleTable[1:]:\n sampleTable[col] = sampleTable[col].astype(dtype=\"float64\")\n sampleTable[\"sample\"] = sampleTable[\"sample\"].astype(dtype=\"int64\")\n\n\n################################################################################\n############################## GET CLASS ##############################\n################################################################################\n\n labelledTable = sampleTable.join(label_df.set_index(\"sample\"), on=\"sample\")\n\n # output csv\n outDir = \"/\".join([\".\", \"norange\", \"classification_data\", filePath.split(\"/\")[1], filePath.split(\"/\")[2]])\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n\n outPath = outDir + \"/\" + filePath.split(\"/\")[3][:-7] + \".csv\"\n labelledTable.to_csv(outPath)\n","sub_path":"codes/embed4ML_MUSH.py","file_name":"embed4ML_MUSH.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474418704","text":"\nimport configparser\nimport msvcrt\nimport re\nimport time\nimport sys\nimport os\nimport shlex\nimport subprocess\nfrom datetime import datetime\nimport zipfile\nimport argparse\nimport shutil\nfrom glob import glob\nimport traceback\n\nimport mtm.util.BuildUtil as BuildUtil\n\nfrom mtm.util.ConfigXml import ConfigXml\nfrom mtm.util.VarManager import VarManager\nfrom mtm.util.Logger import Logger\nfrom mtm.util.SystemHelper import SystemHelper\nfrom mtm.util.LogStreamFile import LogStreamFile\nfrom mtm.util.LogStreamConsole import LogStreamConsole\nfrom mtm.util.ProcessRunner import ProcessRunner\nfrom mtm.util.JunctionHelper import JunctionHelper\nfrom mtm.upm.VisualStudioSolutionGenerator import VisualStudioSolutionGenerator\nfrom mtm.upm.VisualStudioHelper import VisualStudioHelper\nfrom mtm.upm.ProjectSchemaLoader import ProjectSchemaLoader\nfrom mtm.util.BuildRunner import BuildRunner\n\nfrom mtm.util.BuildUtil import Platforms\nfrom mtm.upm.PackageManager import PackageManager\n\nimport mtm.ioc.Container as Container\nfrom mtm.ioc.Inject import Inject\nimport mtm.ioc.Assertions as Assertions\n\nfrom mtm.util.UnityHelper import UnityHelper\n\nLogFileName = 'BuildLog'\nLogExtension = '.txt'\n\nclass Runner:\n _buildRunner = Inject('BuildRunner')\n _config = Inject('Config')\n _packageMgr = Inject('PackageManager')\n _unityHelper = Inject('UnityHelper')\n _log = Inject('Logger')\n _gitHelper = Inject('GitHelper')\n _sys = Inject('SystemHelper')\n _varMgr = Inject('VarManager')\n _vsSolutionGenerator = Inject('VisualStudioSolutionGenerator')\n _vsSolutionHelper = Inject('VisualStudioHelper')\n\n def __init__(self, args):\n self._args = args\n\n def runPreBuild(self):\n if self._args.clearProjectGeneratedFiles:\n self._packageMgr.clearProjectGeneratedFiles(self._project)\n\n if self._args.deleteAllJunctions:\n self._packageMgr.deleteAllJunctions()\n\n if self._args.initAll:\n self._initAllProjects()\n\n if self._args.updateJunctions:\n self._packageMgr.updateProjectJunctions(self._project, self._platform)\n\n if self._args.updateUnitySln:\n self._vsSolutionHelper.updateUnitySolution(self._project, self._platform)\n\n if self._args.updateCustomSln:\n self._vsSolutionHelper.updateVisualStudioSolution(self._project, self._platform)\n\n # This will set up all the directory junctions for all projects for all platforms\n def _initAllProjects(self):\n oldProject = self._project\n for projectName in self._sys.walkDir('[UnityProjectsDir]'):\n self._project = projectName\n self._log.heading('Initializing project \"{0}\"'.format(projectName))\n\n try:\n #for platform in Platforms.All:\n for platform in [Platforms.Windows]:\n self._platform = platform\n self._packageMgr.updateProjectJunctions(self._project, self._platform)\n\n self._log.good('Successfully initialized project \"{0}\"'.format(projectName))\n except Exception as e:\n self._log.warn('Failed to initialize project \"{0}\": {1}'.format(projectName, e))\n\n def runBuild(self):\n if self._args.buildCustomSolution:\n self._vsSolutionHelper.buildCustomSolution(self._project, self._platform)\n\n def runInternal(self):\n\n if not self._projectExists(self._args.project):\n self._args.project = self._getProjectFromAlias(self._args.project)\n\n self._project = self._args.project\n self._platform = self._convertToFullPlatformName(self._args.platform)\n\n self.runPreBuild()\n self.runBuild()\n\n if self._args.openUnity:\n self._unityHelper.openUnity(self._project, self._platform)\n\n if self._args.openCustomVisualStudioSolution:\n self._vsSolutionHelper.openCustomSolution(self._project, self._platform)\n\n def _convertToFullPlatformName(self, platform):\n if platform == 'win':\n return Platforms.Windows\n\n if platform == 'and':\n return Platforms.Android\n\n if platform == 'webp':\n return Platforms.WebPlayer\n\n if platform == 'webgl':\n return Platforms.WebGl\n\n if platform == 'osx':\n return Platforms.OsX\n\n if platform == 'ios':\n return Platforms.Ios\n\n if platform == 'linux':\n return Platforms.Linux\n\n assert False\n return ''\n\n def _projectExists(self, projectName):\n return self._sys.directoryExists('[UnityProjectsDir]/{0}'.format(projectName)) or self._sys.fileExists('[UnityProjectsDir]/{0}.ini'.format(projectName))\n\n def _getProjectFromAlias(self, alias):\n aliasMap = self._config.getDictionary('ProjectAliases')\n\n assert alias in aliasMap.keys(), \"Unrecognized project alias '{0}'\".format(alias)\n return aliasMap[alias]\n\n def run(self):\n result = self._buildRunner.runWrapper(self.runInternal)\n\ndef getArguments(argv):\n parser = argparse.ArgumentParser(description='Unity Package Manager')\n\n parser.add_argument('-p', '--project', default='Modest3D', metavar='PROJECT_NAME', type=str, help=\"The project to apply changes to. By default this is set to Modest3D\")\n parser.add_argument('-pl', '--platform', type=str, default='win', choices=['win', 'webp', 'webgl', 'and', 'osx', 'ios', 'linux'], help='The platform to use. If unspecified, windows is assumed.')\n\n parser.add_argument('-cfg', '--config', required=True, metavar='CONFIG_PATH', type=str, help=\"The path to the config xml file containing settings for UPM\")\n\n parser.add_argument('-uj', '--updateJunctions', action='store_true', help='Updates directory links for the given project using package manager')\n\n parser.add_argument('-uus', '--updateUnitySln', action='store_true', help='Equivalent to executing the menu option \"Assets/Sync MonoDevelop Project\" in unity')\n parser.add_argument('-ucs', '--updateCustomSln', action='store_true', help='Updates the custom solution for the given project with the files found in the Assets/ folder. It will also take settings from the generated unity solution such as defines, and references.')\n\n parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging')\n parser.add_argument('-vv', '--veryVerbose', action='store_true', help='If set, detailed logging will be output to stdout rather than file (' + LogFileName + LogExtension + ')')\n parser.add_argument('-rel', '--release', action='store_true', help='Build all projects using the Release configuration for full optimization')\n\n parser.add_argument('-b', '--buildCustomSolution', action='store_true', help='Build the generated custom solution for the given project')\n parser.add_argument('-bpb', '--buildPreBuiltProjects', action='store_true', help='Build UnityPrebuilt.sln, which includes pre-built DLLs such as Teak, Zenject, etc. Also copies result to the package directory for each pre-built project.')\n\n parser.add_argument('-clp', '--clearProjectGeneratedFiles', action='store_true', help='Remove the generated files for the given project')\n parser.add_argument('-daj', '--deleteAllJunctions', action='store_true', help='Delete all directory junctions for all projects')\n\n parser.add_argument('-ina', '--initAll', action='store_true', help='Initialize all projects for all platforms')\n\n parser.add_argument('-bf', '--buildFull', action='store_true', help='Perform a full build of the given project')\n parser.add_argument('-bf2', '--buildFull2', action='store_true', help='Same as build full except skips the prebuild')\n\n parser.add_argument('-ou', '--openUnity', action='store_true', help='Open unity for the given project')\n parser.add_argument('-ocs', '--openCustomVisualStudioSolution', action='store_true', help='Open the solution for the given project/platform')\n\n if len(argv) == 0:\n print(\"\"\"\n __ __ .______ .___ ___.\n | | | | | _ \\ | \\/ |\n | | | | | |_) | | \\ / |\n | | | | | ___/ | |\\/| |\n | `--' | | | | | | |\n \\______/ | _| |__| |__|\"\"\")\n\n print('\\n Unity Package Manager\\n')\n print(' If this is your first time running Upm, please refer to the Quick Start Tutorial (ReadMe.html)')\n print(' Run \"Upm -h\" to print the full list of command line options')\n print()\n sys.exit(2)\n\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n\n if (sys.version_info < (3, 0)):\n print('Wrong version of python! Install python 3 and try again')\n sys.exit(2)\n\n args = getArguments(sys.argv[1:])\n\n if args.buildFull or args.buildFull2:\n args.updateJunctions = True\n args.updateUnitySln = True\n args.updateCustomSln = True\n args.buildCustomSolution = True\n\n scriptDir = os.path.dirname(os.path.realpath(__file__))\n # TODO: Add support for secondary _config so we can use '/BuildCustom.ini'\n Container.bind('Config').toSingle(ConfigXml, scriptDir + '/UpmConfig.xml', scriptDir + '/UpmConfigCustom.xml')\n\n Container.bind('VarManager').toSingle(VarManager)\n Container.bind('SystemHelper').toSingle(SystemHelper)\n Container.bind('Logger').toSingle(Logger)\n Container.bind('LogStream').toSingle(LogStreamFile)\n Container.bind('LogStream').toSingle(LogStreamConsole, args.verbose, args.veryVerbose)\n Container.bind('UnityHelper').toSingle(UnityHelper)\n Container.bind('PackageManager').toSingle(PackageManager)\n Container.bind('ProcessRunner').toSingle(ProcessRunner)\n Container.bind('BuildRunner').toSingle(BuildRunner)\n Container.bind('JunctionHelper').toSingle(JunctionHelper)\n Container.bind('VisualStudioSolutionGenerator').toSingle(VisualStudioSolutionGenerator)\n Container.bind('VisualStudioHelper').toSingle(VisualStudioHelper)\n Container.bind('ProjectSchemaLoader').toSingle(ProjectSchemaLoader)\n\n runner = Runner(args)\n runner.run()\n\n\n","sub_path":"Upm/Source/mtm/upm/Upm.py","file_name":"Upm.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429275531","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 28 20:01:48 2015\n\nThe redis script_load method is inspired by 'Redis in Action' from Dr. Josiah L. Carlson\n\n--see https://github.com/josiahcarlson/redis-in-action\n\n@author: Eike\n\"\"\"\nimport redis\n\n\ndef script_load(script):\n sha = [None]\n def call(conn, keys=[], args=[], force_eval=False):\n if force_eval:\n return conn.execute_command(\n \"EVAL\", script, len(keys), *(keys+args))\n \n if not sha[0]:\n sha[0] = conn.execute_command(\n \"SCRIPT\", \"LOAD\", script, parse=\"LOAD\")\n try:\n return conn.execute_command(\n \"EVALSHA\", sha[0], len(keys), *(keys+args))\n except redis.exceptions.ResponseError as msg:\n if not msg.args[0].startswith(\"NOSCRIPT\"):\n raise\n \n return call","sub_path":"tsm/tsdb/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246653638","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndf=pd.read_csv('sales1.csv',engine=\"python\")\n\nlist = df['Price'] * df['Qty']\ndf['FPrice'] = list\ndf1=df\n\ndef totalsale():\n total_sale = df['FPrice'].sum()\n print('TOTAL SALE=', total_sale)\ndef itemsold():\n items_sold = df['Qty'].sum()\n print('ITEMS SOLD', items_sold)\ndef avgsale():\n total_sale = df['FPrice'].sum()\n items_sold = df['Qty'].sum()\n avg_sale = total_sale/items_sold\n print('AVERAGE SALE', avg_sale)\ndef topfivesalesbyprice():\n prod_sales = pd.DataFrame(df.groupby('Item_Name').sum()['FPrice'])\n # Top 5 sales ie By Price\n prod_sales.sort_values(by=['FPrice'], inplace=True, ascending=False)\n top_prods = prod_sales.head(5)\n print(top_prods)\ndef mostsellingproducts():\n # Most selling products ie More Quantity\n best_selling_prods = pd.DataFrame(df.groupby('Item_Name').sum()['Qty'])\n best_selling_prods.sort_values(by=['Qty'], inplace=True, ascending=False)\n best_selling_prods = best_selling_prods.head(5)\n print('MOST SELLING PRODUCTS', best_selling_prods)\ndef revenueperitem():\n print('REVENUE PER ITEM')\n x = pd.DataFrame(df.groupby('Item_Name').sum()['FPrice'])\n print(x)\ndef revenuepercat():\n print('REVENUE PER CATEGORY')\n x = pd.DataFrame(df.groupby('Item_Category').sum()['FPrice'])\n print(x)\ndef revenuepercustomer():\n print('REVENUE PER CUSTOMER')\n mostvalcus = pd.DataFrame(df.groupby('Customer').sum()['FPrice'])\n print(mostvalcus)\ndef mostvaluablecustomer():\n mostvalcus = pd.DataFrame(df.groupby('Customer').sum()['FPrice'])\n mostvalcus.sort_values(by=['FPrice'], inplace=True, ascending=False)\n mostvalcus = mostvalcus.head(1)\n print(\"MOST VALUABLE CUSTOMER=\",mostvalcus)\ndef tfs_graph():\n list = df['Price'] * df['Qty']\n df['FPrice'] = list\n # 1\n prod_sales = pd.DataFrame(df.groupby('Item_Name').sum()['FPrice'])\n # Top 5 sales ie By Price\n prod_sales.sort_values(by=['FPrice'], inplace=True, ascending=False)\n top_prods = prod_sales.head(5)\n l1 = []\n l2 = []\n for i in top_prods.iterrows():\n l1.append(i[0])\n l2.append(i[1][0])\n plt.bar(l1, l2, width=0.5)\n plt.ylabel('Price', size=10, color='g')\n plt.xlabel('Item Name', size=10, color='b')\n plt.title('Graph of Top five products per price ', size=15, color='r')\n plt.show()\ndef tfms_graph():\n best_selling_prods = pd.DataFrame(df.groupby('Item_Name').sum()['Qty'])\n best_selling_prods.sort_values(by=['Qty'], inplace=True, ascending=False)\n best_selling_prods = best_selling_prods.head(5)\n l3=[]\n l4=[]\n\n for i in best_selling_prods.iterrows():\n l3.append(i[0])\n l4.append(i[1][0])\n plt.plot(l3,l4)\n plt.ylabel('Item_Name',size=10)\n plt.xlabel('Qty',size=10)\n plt.title('Top 5 sales by quantity',size=15)\n plt.show()\ndef rpcu():\n mostvalcus = pd.DataFrame(df.groupby('Customer').sum()['FPrice'])\n\n l5=[]\n l6=[]\n for i in mostvalcus.iterrows():\n l5.append(i[0])\n l6.append(i[1][0])\n plt.plot(l5,l6)\n\n plt.xlabel('Customers',size=10)\n plt.ylabel('Revenue',size=10)\n plt.title(\"Revenue per Customer\",size=15)\n plt.show()\ndef rpca():\n x = pd.DataFrame(df.groupby('Item_Category').sum()['FPrice'])\n\n l7 = []\n l8 = []\n for i in x.iterrows():\n l7.append(i[0])\n l8.append(i[1][0])\n plt.bar(l7, l8, width=0.5)\n\n plt.xlabel('Item_Category ', size=10)\n plt.ylabel('Revenue', size=10)\n plt.title(\"Revenue per Category\", size=15)\n plt.show()","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321726051","text":"import enum\nimport logging\n\nfrom sentry.incidents.models import IncidentStatus\nfrom sentry.models import Integration\nfrom sentry.shared_integrations.exceptions import ApiError\n\nfrom .client import MsTeamsClient, MsTeamsPreInstallClient, get_token_data\n\nMSTEAMS_MAX_ITERS = 100\n\nlogger = logging.getLogger(\"sentry.integrations.msteams\")\n\n\n# MS Teams will convert integers into strings in value inputs sent in adaptive\n# cards, may as well just do that here first.\nclass ACTION_TYPE(str, enum.Enum):\n RESOLVE = \"1\"\n IGNORE = \"2\"\n ASSIGN = \"3\"\n UNRESOLVE = \"4\"\n UNASSIGN = \"5\"\n\n\ndef channel_filter(channel, name):\n # the general channel has no name in the list\n # retrieved from the REST API call\n if channel.get(\"name\"):\n return name.lower() == channel.get(\"name\").lower()\n else:\n return name.lower() == \"general\"\n\n\ndef get_user_conversation_id(integration: Integration, user_id: str) -> str:\n \"\"\"\n Get the user_conversation_id even if `integration.metadata.tenant_id` is not set.\n \"\"\"\n client = MsTeamsClient(integration)\n\n tenant_id = integration.metadata.get(\"tenant_id\")\n\n if not tenant_id:\n # This is definitely an integration of `integration.metadata.installation_type` == `team`,\n # so use the `integration.external_id` (team_id) to get the tenant_id.\n members = client.get_member_list(integration.external_id).get(\"members\")\n tenant_id = members[0].get(\"tenantId\")\n\n conversation_id = client.get_user_conversation_id(user_id, tenant_id)\n\n return conversation_id\n\n\ndef get_channel_id(organization, integration_id, name):\n try:\n integration = Integration.objects.get(\n provider=\"msteams\", organizations=organization, id=integration_id\n )\n except Integration.DoesNotExist:\n return None\n\n team_id = integration.external_id\n client = MsTeamsClient(integration)\n\n # handle searching for channels first\n channel_list = client.get_channel_list(team_id)\n filtered_channels = list(filter(lambda x: channel_filter(x, name), channel_list))\n if len(filtered_channels) > 0:\n return filtered_channels[0].get(\"id\")\n\n # handle searching for users\n members = client.get_member_list(team_id, None)\n for i in range(MSTEAMS_MAX_ITERS):\n member_list = members.get(\"members\")\n continuation_token = members.get(\"continuationToken\")\n\n filtered_members = list(\n filter(lambda x: x.get(\"name\").lower() == name.lower(), member_list)\n )\n if len(filtered_members) > 0:\n # TODO: handle duplicate username case\n user_id = filtered_members[0].get(\"id\")\n tenant_id = filtered_members[0].get(\"tenantId\")\n return client.get_user_conversation_id(user_id, tenant_id)\n\n if not continuation_token:\n return None\n\n members = client.get_member_list(team_id, continuation_token)\n\n return None\n\n\ndef send_incident_alert_notification(action, incident, metric_value, new_status: IncidentStatus):\n from .card_builder import build_incident_attachment\n\n channel = action.target_identifier\n integration = action.integration\n attachment = build_incident_attachment(incident, new_status, metric_value)\n client = MsTeamsClient(integration)\n try:\n client.send_card(channel, attachment)\n except ApiError as e:\n logger.info(\"rule.fail.msteams_post\", extra={\"error\": str(e)})\n\n\ndef get_preinstall_client(service_url):\n # may want try/catch here since this makes an external API call\n access_token = get_token_data()[\"access_token\"]\n return MsTeamsPreInstallClient(access_token, service_url)\n","sub_path":"src/sentry/integrations/msteams/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650430516","text":"# -*- coding: utf-8 -*-\r\n# @author ZhengZhong,Jiang\r\n# @time 2017/4/27 0027 下午 15:03\r\n\r\nfrom xml.etree.ElementTree import Element, ElementTree, tostring\r\n\r\nf = open('demo.xml', 'r')\r\n\r\net = parse(f)\r\n\r\nroot = et.getroot()\r\n\r\nprint(root)\r\n\r\nprint(root.tag)\r\n\r\nprint(root.attrib)\r\n\r\nprint(root.text)\r\n\r\nprint(root.text.strip())\r\n\r\nfor child in root:\r\n print(child.get('category'))\r\n\r\n\r\nprint(root.find('book'))\r\n\r\nprint(root.findall('book'))\r\n\r\ngenerator_obj = root.iterfind('book')\r\n\r\nfor i in generator_obj:\r\n print(i.get('category'))\r\n\r\n\r\n# find(), findall(), iterfind() 只能找当前节点直接的子元素\r\n\r\n# iter() 查找当前节点所有子元素\r\n\r\n# findall(匹配模式)\r\n\r\n# book/*\r\n\r\nfor x in root.findall('book/*'): # 匹配book下所有子元素\r\n print(x)\r\n\r\nfor x in root.findall('.//year'): # 无论year在哪个节点\r\n print(x)\r\n\r\n# root.findall('.//year/..') # 后面的..描述父元素\r\n\r\n# root.findall('title[@lang]') # title 包含lang属性的元素\r\n\r\n# root.findall('title[@lang=en]') # title 包含lang属性等于en的元素\r\n# root.findall('title[@lang=en]') # title 包含lang属性等于en的元素\r\n\r\n# root.findall('title[1]') 找到的第一个子元素\r\n\r\n# root.findall('title[2]') 找到的第二个子元素\r\n\r\n# root.findall('title[last()]') 找到的最后子元素\r\n\r\n# root.findall('title[last()-1]') 找到的最后倒数第二个元素\r\n\r\ne = Element('Data')\r\n\r\ne.set('name', 'abc')\r\n\r\ntostring(e)\r\n\r\ne.text = '123'\r\n\r\ntostring(e)\r\n\r\nprint(tostring(e))\r\n\r\ne2 = Element('Row')\r\n\r\ne.append(e2)\r\n\r\ne3 = Element('Open')\r\n\r\ne3.text = '8.80'\r\n\r\ne2.append(e3)\r\n\r\nprint(tostring(e))\r\n\r\n\r\net = ElementTree(e)\r\n\r\nprint('---', e2.tail)\r\n\r\net.write('test.xml')\r\n\r\n\r\n","sub_path":"drill/chapter_six/6-3.py","file_name":"6-3.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403780552","text":"import random\nfrom DATA_chatbot import *\nfrom CLASS_plant_feeling import Feeling\n\nclass Response:\n def chatbot(self, user_message, plant_info):\n feeling = Feeling(plant_info)\n #plant_condition = feeling.feeling\n #clevel = plant_info['Closeness level']\n\n info_dic = {'key':{'conv':[], 'sens':[], 'feel':[]},\n 'num':[0,0,0],\n 'msg':['','',''],\n 'clev':plant_info['Closeness level'],\n 'cond':feeling.feeling\n }\n\n bot_message = ''\n\n self.find_keywords(user_message, info_dic)\n print('{0}\\n{1}\\n{2}'.format(info_dic['key'],info_dic['num'],info_dic['msg']))\n\n bot_message = self.package_message(info_dic, plant_info)\n\n\n return bot_message\n\n def find_keywords(self, user_msg, info_dic):\n\n for key1 in conv_DB:\n for key2 in conv_DB[key1]['user']:\n if key2 in user_msg:\n if key1 in info_dic['key']['conv']:\n pass\n else:\n info_dic['key']['conv'].append(key1)\n info_dic['num'][0] += 1\n\n for key1 in sens_DB:\n for key2 in sens_DB[key1]:\n if key2 in user_msg:\n if key1 in info_dic['key']['sens']:\n pass\n else:\n info_dic['key']['sens'].append(key1)\n info_dic['num'][1] += 1\n\n for key in feel_DB['Feel']['user']:\n if key in user_msg:\n if 'Feel' in info_dic['key']['feel']:\n pass\n else:\n info_dic['key']['feel'].append('Feel')\n info_dic['num'][2] += 1\n\n #print(keyword_in_user_msg)\n\n def package_message(self, info_dic, plant_info):\n message = ''\n\n if info_dic['num'][0] > 0:\n info_dic['msg'][0] = self.handle_conv(info_dic)\n if info_dic['num'][1] > 0:\n info_dic['msg'][1] = self.handle_sens(info_dic, plant_info)\n if info_dic['num'][2] > 0:\n info_dic['msg'][2] = self.handle_feel(info_dic)\n\n if sum(info_dic['num']) == 1:\n for i in range(len(info_dic['num'])):\n if info_dic['num'][i] == 1:\n message += info_dic['msg'][i]\n break\n\n elif sum(info_dic['num']) == 0:\n message = '죄송합니다. 제가 많이 부족합니다. (by 개발자)'\n\n elif sum(info_dic['num']) > 1:\n for i in range(len(info_dic['num'])):\n if info_dic['num'][i] >= 1:\n message += info_dic['msg'][i]\n if not(i == (len(info_dic['num'])-1)):\n message += '\\n'\n\n if info_dic['cond'] > 2:\n rand = random.randrange(len(feel_DB['Emoticon']['good']))\n message = message + ' ' + feel_DB['Emoticon']['good'][rand]\n elif info_dic['cond'] < 2:\n rand = random.randrange(len(feel_DB['Emoticon']['bad']))\n message = message + ' ' + feel_DB['Emoticon']['bad'][rand]\n\n return message\n\n def handle_conv(self, info_dic):\n conv_msg = ''\n for key in info_dic['key']['conv']:\n rand = random.randrange(len(conv_DB[key]['bot'][info_dic['clev']]))\n conv_msg += conv_DB[key]['bot'][info_dic['clev']][rand]\n return conv_msg\n\n def handle_sens(self, info_dic, plant_info):\n sens_msg = ''\n for key in info_dic['key']['sens']:\n sens_msg += ('{0} is {1}'.format(sens_DB[key][0], plant_info[key]))\n if key != info_dic['key']['sens'][len(info_dic['key']['sens'])-1]:\n sens_msg += ' and '\n return sens_msg\n\n def handle_feel(self, info_dic):\n feel_msg = ''\n rand = random.randrange(len(feel_DB['Feel']['bot'][info_dic['cond']]))\n feel_msg += feel_DB['Feel']['bot'][info_dic['cond']][rand]\n return feel_msg\n","sub_path":"드라이브 박제/180529_Day6/신재코드/CLASS_chatbot.py","file_name":"CLASS_chatbot.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644497720","text":"#!/usr/bin/python\n\"\"\"\nThis is the from simple example to showcase Containernet.\nRun it bt sudo python /path/to/file/cluster.py\n\"\"\"\nfrom mininet.net import Containernet\nfrom mininet.node import Controller\nfrom mininet.cli import CLI\nfrom mininet.link import TCLink\nfrom mininet.log import info, setLogLevel\nsetLogLevel('info')\n\nnet = Containernet(controller=Controller)\ninfo('*** Adding controller\\n')\nnet.addController('c0')\ninfo('*** Adding docker containers\\n')\n\n# TODO\n# create topology here.\n# Hint :\ns0 = net.addSwitch('S0')\ns1 = net.addSwitch('S1')\ns2 = net.addSwitch('S2')\n\nd11 = net.addDocker('D11', ip='10.0.0.11', dimage=\"new-cnp\")\nd12 = net.addDocker('D12', ip='10.0.0.12', dimage=\"new-cnp\")\nd21 = net.addDocker('D21', ip='10.0.0.21', dimage=\"new-cnp\")\nd22 = net.addDocker('D22', ip='10.0.0.22', dimage=\"new-cnp\")\n\ninfo('*** Creating links\\n')\n\n# TODO\n# create other links\n# Hint :\nnet.addLink(s0, s1, cls=TCLink, delay='5ms', bw=1)\nnet.addLink(s0, s2, cls=TCLink, delay='5ms', bw=1)\n\nnet.addLink(s1, d11, cls=TCLink, delay='10ms', bw=5)\nnet.addLink(s1, d12, cls=TCLink, delay='10ms', bw=5)\nnet.addLink(s2, d21, cls=TCLink, delay='10ms', bw=5)\nnet.addLink(s2, d22, cls=TCLink, delay='10ms', bw=5)\n\ninfo('*** Starting network\\n')\nnet.start()\ninfo('*** Testing connectivity\\n')\n# you can test connectivity between nodes with:\nnet.ping([d21, d22])\nnet.ping([d11, d22])\n\ninfo('*** Setup nodes\\n')\n\n# start ssh on hosts\nfor host in net.hosts:\n host.cmd('/usr/sbin/sshd -D &')\n\n# TODO\n# By using d1.cmd('command') you can run `command` on container d1\n# Run 'export HADOOP_HOSTS=\"list of comma separated host for example : `10.0.0.1 master, 10.0.0.2 slave1`\"' on each host\n# to declare nodes of cluster for it and on master node you also need to run 'export MY_ROLE=\"master\"' to define it's role.\n# run '/$HADOOP_HOME/etc/hadoop/start.sh > result' on each host to start hadoop on it.\nexportVar = 'export HADOOP_HOSTS=\"10.0.0.11 slave1, 10.0.0.12 slave2, 10.0.0.21 slave3, 10.0.0.22 master\"'\nd11.cmd(exportVar)\nd12.cmd(exportVar)\nd21.cmd(exportVar)\nd22.cmd(exportVar) #master\n\nd22.cmd('export MY_ROLE=\"master\"')\n\nrunCmd = '/$HADOOP_HOME/etc/hadoop/start.sh > result'\nd11.cmd(runCmd)\nd12.cmd(runCmd)\nd21.cmd(runCmd)\nd22.cmd(runCmd) #master\n\n# Download wikipedia data!\ninfo('*** Download wikipedia data!\\n')\nd22.cmd(\"hadoop com.sun.tools.javac.Main Crawler.java HdfsWriter.java\")\nd22.cmd(\"jar cf new.jar Crawler.class HdfsWriter.class\")\nd22.cmd(\"hadoop jar new.jar Crawler wikipedia.keys\")\n\n# Move 'stop-words.keys' to HDFS!\ninfo('*** Move stop-words.keys to HDFS!\\n')\nd22.cmd(\"hadoop jar new.jar HdfsWriter stop-words.keys /user/sina/data/stop-words.keys\")\n\n# Compile WordCount.java!\ninfo('*** Compile WordCount.java!\\n')\nd22.cmd(\"hadoop com.sun.tools.javac.Main WordCount.java\")\nd22.cmd(\"jar cf w.jar WordCount*.class\")\n\n# RUN!\ninfo('*** RUN!\\n')\ninfo('#### output2!\\n')\nd22.cmd(\"hadoop jar w.jar WordCount /user/data/raw.data /user/sina/data/out2.data output2 2\")\ninfo('#### output3!\\n')\nd22.cmd(\"hadoop jar w.jar WordCount /user/data/raw.data /user/sina/data/out3.data output3 3\")\ninfo('#### output4!\\n')\nd22.cmd(\"hadoop jar w.jar WordCount /user/data/raw.data /user/sina/data/out4.data output4 4\")\ninfo('#### output5!\\n')\nd22.cmd(\"hadoop jar w.jar WordCount /user/data/raw.data /user/sina/data/out5.data output5 5\")\ninfo('#### output10!\\n')\nd22.cmd(\"hadoop jar w.jar WordCount /user/data/raw.data /user/sina/data/out10.data output10 10\")\n# /user/data/mahdiz.big\n\ninfo('*** FINISH!\\n')\n\ninfo('*** Running CLI\\n')\nCLI(net)\ninfo('*** Stopping network')\nnet.stop()\n","sub_path":"Computer Networks/CA2/6/cluster-q6.py","file_name":"cluster-q6.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376199996","text":"# 1. Составить программу, которая требует ввести два числа.\n# Если первое число больше второго, то программа печатает слово больше.\n# Если первое число меньше второго, то программа печатает слово меньше.\n# А если числа равны, программа напечатает сообщение Эти числа равны.\n\n\ndef ex1():\n a = input(\"Введите первое число \\n > \")\n b = input(\"Введите второе число \\n > \")\n\n if a > b:\n print('первое число больше второго')\n elif a == b:\n print('числа равны')\n else:\n print('второе число больше первого')\n\n\n# 2. Составить алгоритм увеличения всех трех, введённых с клавиатуры, переменных на 5,\n# если среди них есть хотя бы две равные. В противном случае выдать ответ «равных нет».\n\ndef ex2():\n a = int(input(\"Введите первое число \\n > \"))\n b = int(input(\"Введите второе число \\n > \"))\n c = int(input(\"Введите третье число \\n > \"))\n\n if a == b or a == c or b == c:\n print(a+5, b+5, c+5)\n else:\n print('равных нет')\n\n\n# Вывести таблицу умножения на экран.\ndef ex3():\n number = 0\n for number in range(11):\n for second_number in range(11):\n print(number, '*', second_number, '=', number*second_number)\n print()\n\n\n# Вывести на экран фигуры со звездочек (Ромб, Елочка, Треугольник, Квадрат, ступеньки)\ndef ex4():\n value = input(\"input value\")\n a = \" \"\n b = \"*\"\n\n\n# Напишите программу, запрашивающую имя, фамилия, отчество и номер группы студента и выводящую введённые данные в\n# следующем виде:\n# ************************************\n# *Лабораторная работа № 1 *\n# *Выполнил(а): ст. гр. ЗИ-123 *\n# *Иванов Андрей Петрович *\n# ************************************\n# Необходимо, чтобы программа сама определяла нужную длину рамки. Сама же длина рамки зависит от длины наибольшей\n# строки внутри рамки. Используя циклы for легко можно выровнять стороны рамки.\ndef ex5():\n name = input('введите Ваши ФИО')\n group = input('введите номер группы')\n first_line = 'Лабораторная работа № 1'\n second_line = '{}{}'.format(\"Выполнил(а): ст. гр.\", group)\n d = [len(name), len(first_line), len(second_line)]\n\n maxlong = 0\n for i in d:\n if i > maxlong:\n maxlong = i\n\n print('*' * (maxlong+2))\n print('*{}{}*'.format(first_line, ' ' * (maxlong - len(first_line))))\n print('*{}{}*'.format(second_line, ' ' * (maxlong - len(second_line))))\n print('*{}{}*'.format(name, ' ' * (maxlong - len(name))))\n print('*' * (maxlong + 2))\n\n\n# 5. Дано двузначное число. Определить:\n# • входит ли в него цифра 3\n# • входит ли в него цифра а\ndef ex6():\n value = input('input value \\n > ')\n a = '3'\n b = 'a'\n for i in value:\n if i == a:\n print('a = ok')\n elif i == b:\n print('b = ok')\n\n\n# 6. Определить, является ли треугольник со сторонами a, b, c равнобедренным\n\ndef ex7():\n a = int(input('input first side \\n > '))\n b = int(input('input second side \\n > '))\n c = int(input('input third side \\n > '))\n\n if a + b > c and b + c > a and a + c > b:\n if a == b == c:\n print('This is equilateral triangle')\n elif a == b or a == c or b == c:\n print('This is isosceles triangle')\n else:\n print('It isn`t isosceles triangle')\n else:\n print('It can`t be triangle')\n\n\n# 7. Даны три различных числа. Определить, какое из них (первое, второе или третье)\n# • самое большое\n# • самое маленькое\n# • является средним\ndef ex8():\n a = int(input('input first value \\n > '))\n b = int(input('input second value \\n > '))\n c = int(input('input third value \\n > '))\n d = [a, b, c]\n maxvalue = 0\n for i in d:\n if i > maxvalue:\n maxvalue = i\n print('maxvalue =', maxvalue)\n\n minvalue = maxvalue\n for i in d:\n if i < minvalue:\n minvalue = i\n print('minvalue =', minvalue)\n\n average = 0\n for i in d:\n if minvalue < i < maxvalue:\n average = i\n print('average =', average)\n\n","sub_path":"homework_lesson1/lesson2.py","file_name":"lesson2.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550866391","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nimport os\nfrom fnmatch import fnmatch, fnmatchcase\nimport re\nimport random\nimport xlwt\nimport windnd\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\nFcNamePatten = re.compile(r'[a-zA-Z][0-9a-fA-F]-[0-9][0-9]')\n\nVersion = 0.01\n\nclass FeatrueExtractmain:\n ComboParaCfg = {} # 下拉参数配置字典\n BitComboParaCfg = {} # Bit位下拉参数配置字典\n DefinitionCfg = {} # 宏定义参数配置字典\n FileVersionCfg = {} # 文件版本信息配置字典\n\n MotorInfoSum = {} # 提取的工程信息汇总字典\n\n repeatPjt = {} # 重复的工程信息汇总字典\n errFuncCode = {} # 读取功能码错误的信息汇总\n\n # 模块及特征文件配置,支持通配符,可配置多个模块和多个条件 \n FuncSelCfg = {\n 'Tm':{\n 'File1': r'Project\\.ccsproject',\n 'File2': r'Project\\.ccs*ject',\n 'File3': r'Project\\.ccsproject',\n },\n 'Eps':{\n 'File1': r'Project\\.ccsproject',\n 'File2': r'Project\\.ccsproject',\n 'File3': r'Project\\.ccsproject',\n },\n 'Acm':{\n 'File1': r'Project\\.ccsproject',\n 'File2': r'Project\\.ccsproject',\n 'File3': r'Project\\.ccsproject',\n },\n 'Test':{\n 'File1': r'Project\\.ccsproject',\n 'File2': r'Project\\.ccsproject',\n 'File3': r'Project\\.ccsproject',\n },\n }\n # 可选择选择的功能模块配置,默认读取第一个\n FuncSelList = []\n CurSelectFunc = None\n \n InfoDispNum = 1\n InfoDispColor= (('Error','red'),('Warning','orange'),('Operation','blue'),('Result','blue')) # 信息显示颜色配置\n\n def __init__(self):\n # 获取功能选择的选项\n for key in self.FuncSelCfg.keys():\n self.FuncSelList.append(key)\n\n # 建立主窗体\n self.__SetupMainWindow()\n # 初始化读取配置文件\n self.CurSelectFunc = self.FuncSelList[0]\n self.__ReadCfgFile(r'FeatureCfg_' + str(self.CurSelectFunc) + '.ini')\n # 运行窗体主循环\n self.MainWin.mainloop()\n\n # 建立主窗口\n def __SetupMainWindow(self):\n self.MainWin = tk.Tk()\n self.MainWin.title('Feature Extract Tool - V' + str(Version))\n self.MainWin.geometry('800x500')\n\n # 创建菜单\n # 在大窗口下定义一个菜单实例\n self.menuBar = Menu(self.MainWin)\n # 增加第一个菜单项\n #self.menuBar.add_command(label = '文件',command = self.__menuBarTestCmd)\n # 增加文件菜单的子菜单\n self.subMenuFile = Menu(self.menuBar, tearoff=False)\n self.subMenuFile.add_command(label = '选择路径', command = self.__ButSelectRootPath)\n self.subMenuFile.add_separator()\n self.subMenuFile.add_command(label = '退出', command = self.__menuBarQuit)\n self.menuBar.add_cascade(label = '文件', menu = self.subMenuFile)\n \n # 增加编辑子菜单\n self.subMenuEdit = Menu(self.menuBar, tearoff=False)\n self.subMenuEdit.add_command(label = '打开路径', command = self.__OpenAnsysPath)\n self.subMenuEdit.add_command(label = '清除信息', command = self.__InfoDispClear)\n self.subMenuEdit.add_separator()\n self.subMenuEdit.add_command(label = '开始分析', command = self.__StartAnalysisPath)\n self.menuBar.add_cascade(label = '编辑', menu = self.subMenuEdit)\n \n # 增加帮助子菜单\n self.subMenuHelp = Menu(self.menuBar, tearoff=False)\n self.subMenuHelp.add_command(label = '帮助', command = self.__menuBarHelp)\n self.subMenuHelp.add_command(label = '关于', command = self.__menuBarAbout)\n self.menuBar.add_cascade(label = '帮助', menu = self.subMenuHelp)\n\n self.MainWin['menu']=self.menuBar\n\n # 创建区域\n style = ttk.Style()\n style.configure(\"TPanedwindow\", background='#DDDDDD' )\n self.LeftPaneWin = ttk.PanedWindow(self.MainWin, orient=HORIZONTAL, style=\"TPanedwindow\")\n self.LeftPaneWin.pack(fill = BOTH, padx = 0, pady = 5, expand = TRUE, side=TOP)\n \n # 创建显示文件树的区域LabelFrame\n self.FileTreeLableFrame = ttk.LabelFrame(self.LeftPaneWin, text = ' 工程目录 ')\n self.FileTreeLableFrame.pack(fill = BOTH, padx = 5, pady = 5, expand = TRUE, side=TOP)\n # 创建滑动条\n self.FileTreeBar = ttk.Scrollbar(self.FileTreeLableFrame)\n # 创建Treeview\n self.FileDispTreeView = ttk.Treeview(self.FileTreeLableFrame,show = 'tree', yscrollcommand = self.FileTreeBar.set)\n # 绑定并Pack滚动条及Treeview\n self.FileTreeBar.config(command = self.FileDispTreeView.yview)\n self.FileTreeBar.pack(fill = Y, expand = FALSE, padx = 0, pady = 10, side=RIGHT)\n self.FileDispTreeView.pack(fill = BOTH, padx = 5, pady = 5, expand = TRUE, side=RIGHT)\n\n self.FileDispTreeView.bind(\"\", self.__MouseRightClickFileTree)\n\n # 将文件树区域添加进来\n self.LeftPaneWin.add(self.FileTreeLableFrame)\n\n # 创建第二个多窗口\n self.RightPaneWin = ttk.PanedWindow(self.LeftPaneWin, orient=VERTICAL, style=\"fkit.TPanedwindow\")\n self.LeftPaneWin.add(self.RightPaneWin)\n\n # 创建操作区域LabelFrame\n self.OprtLableFrame = ttk.LabelFrame(self.RightPaneWin, text = ' 功能操作 ')\n self.OprtLableFrame.pack(fill = BOTH, padx = 5, pady = 5, expand = TRUE, side=TOP)\n\n # 创建选择框\n self.rootPathFrame = ttk.Frame(self.OprtLableFrame)\n self.rootPathFrame.pack(fill=X, padx=5, pady=5, expand = True, side = TOP)\n self.rootPathLabel = ttk.Label(self.rootPathFrame, width = 7)\n self.rootPathLabel.config(text = '扫描目录')\n self.rootPathLabel.pack(fill=X, padx=5, pady=5, expand = False, side = LEFT, anchor = CENTER)\n self.rootPathVar = StringVar()\n self.rootPahtEntry = ttk.Entry(self.rootPathFrame, state = 'readonly', textvariable = self.rootPathVar)\n self.rootPahtEntry.pack(fill=X, padx=0, pady=5, expand = True, side = LEFT, anchor = CENTER)\n self.rootPathBut = ttk.Button(self.rootPathFrame)\n self.rootPathBut.config(text = '...',width = 6,command = self.__ButSelectRootPath)\n self.rootPathBut.pack(fill=X, padx=0, pady=5, expand = False, side = RIGHT, anchor = CENTER)\n\n windnd.hook_dropfiles(self.rootPahtEntry, self.__DropSelectRootPath)\n\n # 创建功能按钮Frame\n self.funcFrame = ttk.Frame(self.OprtLableFrame) \n self.funcFrame.pack(fill=X, padx=5, pady=0, expand = True, side = TOP)\n # 创建功能选择下拉框的说明标签\n self.SelFuncLabel = ttk.Label(self.funcFrame, width = 7)\n self.SelFuncLabel.config(text = '配置模块')\n self.SelFuncLabel.pack(fill=X, padx=5, pady=5, expand = False, side = LEFT, anchor = CENTER)\n # 创建功能选择下拉框\n self.selComBox = ttk.Combobox(self.funcFrame)\n self.selComBox.config(values = self.FuncSelList, width = 12)\n self.selComBox.current(0)\n self.selComBox.pack(fill=NONE, padx=0, pady=5, expand = True, side = LEFT, anchor = 'w')\n self.selComBox.bind(\"<>\", self.__FuncSelected)\n # 创建参数提取按钮\n self.startAnsysBut = ttk.Button(self.funcFrame)\n self.startAnsysBut.config(text = '开始分析',width = 8, command = self.__StartAnalysisPath)\n self.startAnsysBut.pack(fill=X, padx=0, pady=5, expand = False, side = RIGHT, anchor = CENTER)\n # 创建打开路径按钮\n self.OpenPathBut = ttk.Button(self.funcFrame)\n self.OpenPathBut.config(text = '打开路径', width = 8, command = self.__OpenAnsysPath)\n self.OpenPathBut.pack(fill=X, padx=10, pady=5, expand = False, side = RIGHT, anchor = CENTER)\n # 创建清除信息按钮\n self.ClearInfoBut = ttk.Button(self.funcFrame)\n self.ClearInfoBut.config(text = '清除信息', width = 8, command = self.__InfoDispClear)\n self.ClearInfoBut.pack(fill=X, padx=0, pady=5, expand = False, side = RIGHT, anchor = CENTER)\n\n # 创建信息显示的Frame\n self.InfoLableFrame = ttk.LabelFrame(self.RightPaneWin,text = ' 信息显示 ')\n self.InfoLableFrame.pack(fill = BOTH, padx = 5, pady = 5, expand = TRUE, side=BOTTOM)\n # 创建信息显示的Treeview\n self.InfoTreeView = ttk.Treeview(self.InfoLableFrame)\n self.DiffBar = ttk.Scrollbar(self.InfoLableFrame)\n self.InfoTreeView.config(columns=('Number','Type','Info'), show='headings', height = 1000, yscrollcommand = self.DiffBar.set)\n self.InfoTreeView.column('Number', width = 40, anchor = 'center')\n self.InfoTreeView.heading('Number', text ='序号')\n self.InfoTreeView.column('Type', width = 60, anchor ='center')\n self.InfoTreeView.heading('Type', text ='类型')\n self.InfoTreeView.column('Info', width = 1500, anchor = 'w')\n self.InfoTreeView.heading('Info', text ='信息')\n # 创建信息显示绑定的的滚动条\n self.DiffBar.config(command = self.InfoTreeView.yview)\n self.DiffBar.pack(fill = Y, expand = FALSE, padx = 0, pady = 10, side=RIGHT)\n self.InfoTreeView.pack(expand = TRUE, fill = BOTH, padx = 0, pady = 10, side=LEFT)\n\n # 创建显示分析结果的Frame\n self.MotInfoSumLabelFrame = ttk.LabelFrame(self.RightPaneWin,text = ' 分析结果 ')\n self.MotInfoSumLabelFrame.pack(fill = BOTH, padx = 5, pady = 5, expand = TRUE, side=BOTTOM)\n self.MotInfoHFrame = ttk.Frame(self.MotInfoSumLabelFrame)\n\n self.MotInfoSumTreeView = ttk.Treeview(self.MotInfoHFrame)\n self.MotInfoVBar = ttk.Scrollbar(self.MotInfoHFrame, orient=VERTICAL)\n self.MotInfoHBar = ttk.Scrollbar(self.MotInfoSumLabelFrame, orient=HORIZONTAL)\n self.MotInfoSumTreeView.config(show='headings', height = 8, yscrollcommand = self.MotInfoVBar.set, xscrollcommand = self.MotInfoHBar.set)\n\n self.MotInfoVBar.config(command = self.MotInfoSumTreeView.yview)\n self.MotInfoVBar.pack(fill = Y, expand = FALSE, padx = 0, pady = 0, side=RIGHT)\n self.MotInfoSumTreeView.pack(expand = TRUE, fill = BOTH, padx = 0, pady = 0, side=RIGHT)\n self.MotInfoHBar.config(command = self.MotInfoSumTreeView.xview)\n self.MotInfoHBar.pack(fill = X, expand = FALSE, padx = 0, pady = 0, side=BOTTOM)\n self.MotInfoHFrame.pack(fill=BOTH, padx=0, pady=0, expand = True, side = TOP)\n\n # 将子控件添加到窗口布局管理中去\n self.RightPaneWin.add(self.OprtLableFrame)\n self.RightPaneWin.add(self.MotInfoSumLabelFrame)\n self.RightPaneWin.add(self.InfoLableFrame)\n \n # 递归搜索目录,判断是否存在工程路径\n def __PathRecursionSearch(self, curPath):\n print(curPath)\n if self.__JudgeIsPathValidPjt(curPath, self.CurSelectFunc) == True:\n # 获取工程名\n MotorInfoName = os.path.split(curPath)[-1]\n \n # 检查是否有重名的工程\n if MotorInfoName in self.MotorInfoSum.keys():\n if MotorInfoName in self.repeatPjt.keys():\n RepeatNum = self.repeatPjt[MotorInfoName]['RepeatNum'] + 1\n else:\n RepeatNum = 1\n \n self.repeatPjt[MotorInfoName] = {'RepeatNum':RepeatNum}\n MotorInfoName = str(MotorInfoName) + '(' + str(RepeatNum) + ')'\n \n # 对工程信息进行提取\n self.MotorInfoSum[MotorInfoName]={} # 先新建一个列表字典\n self.__GetComboParaInfo(curPath, MotorInfoName) # 读取工程中下拉框参数\n self.__GetBitComboParaInfo(curPath, MotorInfoName) # 读取工程中Bi位下拉框参数\n self.__GetDefinitionInfo(curPath, MotorInfoName) # 读取工程中宏定义参数\n self.__GetFileVersionInfo(curPath, MotorInfoName) # 读取工程中文件名称的版本号\n\n # 创建文件树木根目录\n root = self.FileDispTreeView.insert(\"\",\"end\",text=MotorInfoName,values=(curPath))\n\n self.__LoadTreeSubList(curPath, root)\n\n self.__InfoDispAdd('Result', '已分析完第' + str(len(self.MotorInfoSum)) + '个工程')\n elif os.path.isdir(curPath):\n pathList = os.listdir(curPath)\n for path in pathList:\n if(os.path.isdir(curPath + '\\\\' + path)):\n self.__PathRecursionSearch(curPath + '\\\\' + path)\n \n # 读取配置文件\n def __ReadCfgFile(self, cfgPath):\n if not os.path.exists(cfgPath):\n self.__InfoDispAdd('Error','配置文件丢失:' + str (cfgPath))\n return\n\n self.ComboParaCfg.clear()\n self.BitComboParaCfg.clear()\n self.DefinitionCfg.clear()\n self.FileVersionCfg.clear()\n with open(cfgPath, 'r', encoding = 'utf-8') as cfgFile:\n cfgLines = cfgFile.readlines()\n\n ComboParaStart = False\n BitComboParaStart = False\n DefinitionStart = False\n FileVersionStart = False\n for line in cfgLines:\n line = line.strip()\n \n # 跳过空白行\n if(line == ''):\n continue\n\n # 下拉框参数配置读取\n if(ComboParaStart == True):\n if line.startswith('[ComboParaEnd]'):\n ComboParaStart = False\n else:\n if(line.startswith('[List-')):\n lineTemp = line.split(':')\n CbParListName = lineTemp[1].split(',')[0].strip()\n CbParFuncCode = lineTemp[1].split(',')[1].strip()\n self.ComboParaCfg[CbParListName]={'FuncCode':CbParFuncCode}\n else:\n lineTemp = line.split(':')\n if((len(lineTemp) == 2) and (lineTemp[0].isdigit())):\n self.ComboParaCfg[CbParListName].update({lineTemp[0].strip() : lineTemp[1].strip()})\n else:\n if line.startswith('[ComboPara]'):\n ComboParaStart = True\n\n # Bit位下拉框���数配置读取\n if(BitComboParaStart == True):\n if line.startswith('[BitComboParaEnd]'):\n BitComboParaStart = False\n else:\n if(line.startswith('[List-')):\n BitParaFuncCode = line.split(':')[1].strip()\n elif(line.startswith('bit')):\n BitNumber = line.split(':')[0].replace('bit','').strip()\n BitParaListName = line.split(':')[1].strip()\n self.BitComboParaCfg[BitParaListName]={\n 'FuncCode': BitParaFuncCode,\n 'bit': BitNumber\n }\n else:\n lineTemp = line.split(':')\n if((len(lineTemp) == 2) and (lineTemp[0].isdigit())):\n self.BitComboParaCfg[BitParaListName].update({lineTemp[0].strip() : lineTemp[1].strip()})\n else:\n if line.startswith('[BitComboPara]'):\n BitComboParaStart = True\n\n # 宏定义参数配置读取\n if(DefinitionStart == True):\n if line.startswith('[DefinitionEnd]'):\n DefinitionStart = False\n else:\n lineTemp = line.split(':')\n DefListName = lineTemp[0].strip()\n DefFilePath = lineTemp[1].split(',')[0].strip().strip('\\\\')\n DefinitionName = lineTemp[1].split(',')[1].strip()\n self.DefinitionCfg[DefListName]={\n 'filePath': DefFilePath,\n 'definition': DefinitionName\n }\n else:\n if line.startswith('[Definition]'):\n DefinitionStart = True\n\n # 文件版本号配置读取\n if(FileVersionStart == True):\n if line.startswith('[FileVersionEnd]'):\n FileVersionStart = False\n else:\n lineTemp = line.split(':')\n FileVerListName = lineTemp[0].strip()\n FileVerPath = lineTemp[1].split(',')[0].strip().strip('\\\\')\n FileVerKeyWord = lineTemp[1].split(',')[1].strip()\n self.FileVersionCfg[FileVerListName]={\n 'filePath':FileVerPath,\n 'keyWord':FileVerKeyWord\n }\n else:\n if line.startswith('[FileVersion]'):\n FileVersionStart = True\n\n # 获取下拉框参数类信息\n def __GetComboParaInfo(self, RootDir, MotorName):\n for key in self.ComboParaCfg.keys():\n FcName = self.ComboParaCfg[key]['FuncCode']\n FcValue = self.__GetFuncCodeValue(RootDir, FcName)\n if(FcValue == None):\n FcInfo = '未找到功能码'\n elif str(FcValue) in self.ComboParaCfg[key].keys():\n FcInfo = self.ComboParaCfg[key][str(FcValue)]\n else:\n FcInfo = str(FcName) + ' = ' +str(FcValue)\n \n self.MotorInfoSum[MotorName].update({key:FcInfo})\n\n # 获取Bit位下拉框类信息\n def __GetBitComboParaInfo(self, RootDir, MotorName):\n for key in self.BitComboParaCfg.keys():\n FcName = self.BitComboParaCfg[key]['FuncCode']\n FcValue = self.__GetFuncCodeValue(RootDir, FcName)\n if(FcValue == None):\n FcBitInfo = '未找到功能码'\n else:\n FcBit = self.BitComboParaCfg[key]['bit']\n FcBitValue = (FcValue & (1 << int(FcBit))) >> int(FcBit)\n\n if(str(FcBitValue) in self.BitComboParaCfg[key].keys()):\n FcBitInfo = self.BitComboParaCfg[key][str(FcBitValue)]\n else:\n FcBitInfo = str(FcName) + ' bit' + str(FcBit) + ' = ' + str(FcBitValue)\n\n self.MotorInfoSum[MotorName].update({key: FcBitInfo})\n\n # 获取宏定义信息\n def __GetDefinitionInfo(self, RootDir, MotorName):\n for key in self.DefinitionCfg.keys():\n defInfo = '未找到宏定义'\n defPath = str(RootDir) + '\\\\' + self.DefinitionCfg[key]['filePath']\n if os.path.exists(defPath):\n with open(defPath, 'r', encoding='utf-8') as defFile:\n defLines = defFile.readlines()\n for line in defLines:\n line = line.strip()\n if line.startswith('#define'):\n line = line.replace('#define','').strip()\n if line.startswith(self.DefinitionCfg[key]['definition']):\n defInfo = line.replace(self.DefinitionCfg[key]['definition'],'').strip()\n defInfo = defInfo.replace('(','').replace(')','').replace('U','').replace('u','')\n pass\n else:\n defInfo = '未找到文件'\n \n self.MotorInfoSum[MotorName].update({key: defInfo})\n\n pass\n\n # 获取文件版本号信息\n def __GetFileVersionInfo(self, RootDir, MotorName):\n for key in self.FileVersionCfg.keys():\n fileVerInfo = '未找到文件'\n\n fileVerPath = str(RootDir) + '\\\\' + self.FileVersionCfg[key]['filePath']\n # 文件是通配符,所以先查找目录下所有文件,再逐个对比确认文件\n FileDir = os.path.split(fileVerPath)[0] # 获取文件所在目录\n ListFile = os.listdir(FileDir) # 获取文件所在目录下所有文件\n \n # 对目录下所有文件进行逐个对比\n for file in ListFile:\n if fnmatch((str(FileDir) + '\\\\' + str(file)), str(fileVerPath)):\n FileVerName = file\n VersionKeyWord = self.FileVersionCfg[key]['keyWord'] + r'[0-9]+'\n CurFindResult = re.compile(VersionKeyWord).findall(FileVerName)\n if len(CurFindResult) >=1:\n VerFindEnd = False\n while VerFindEnd == False:\n fileVerInfo = CurFindResult[0].replace(self.FileVersionCfg[key]['keyWord'], '')\n VersionKeyWord = VersionKeyWord + r'.[0-9]+'\n CurFindResult = re.compile(VersionKeyWord).findall(FileVerName)\n if(len(CurFindResult) == 0):\n VerFindEnd = True\n else:\n fileVerInfo = '未找到版本号'\n self.MotorInfoSum[MotorName].update({key: fileVerInfo})\n\n # 获取功能码值\n def __GetFuncCodeValue(self, RootDir, FuncCodeName):\n FcValue = None \n FcAttPath = RootDir + r'\\01_Para1\\f_FcAttribute.c'\n # 检查功能码格式是否错误\n if not FcNamePatten.findall(FuncCodeName):\n self.__ErrFuncCodeInfoAdd(FuncCodeName, FcAttPath, '功能码错误')\n return FcValue\n # 检查功能码文件是否存在\n if not os.path.exists(FcAttPath):\n self.__ErrFuncCodeInfoAdd(FuncCodeName, FcAttPath, '文件不存在')\n return FcValue\n # 准备查找功能码\n FcGroup = FuncCodeName.split('-')[0]\n FcGroupindex = int(FuncCodeName.split('-')[1])\n GroupPatten = re.compile(FcGroup)\n GroupStart = False\n # 打开功能码文件,逐行查找\n with open(FcAttPath, 'r', encoding = 'utf-8') as FcAttFile:\n FcAttLines = FcAttFile.readlines()\n for line in FcAttLines:\n line = line.strip()\n # 逐行进行查找\n if(GroupStart == False):\n if(line.startswith('/*') and len(GroupPatten.findall(line)) >= 4):\n GroupStart = True\n CurGroupIndex = 0\n else:\n if(line.startswith('{') and len(line.split(','))>=5):\n if(CurGroupIndex == FcGroupindex):\n FcValue = line.split(',')[2].strip()\n break\n CurGroupIndex = CurGroupIndex + 1\n elif line.startswith('/*'):\n break\n # 确认是否找到功能码值\n if(FcValue == None):\n self.__ErrFuncCodeInfoAdd(FuncCodeName, FcAttPath, '功能码未找到')\n return FcValue\n # 判断功能码是否为十六进制或无效值\n HexNumPattern = re.compile(r'0x[A-Fa-f0-9]+')\n if not str(FcValue).isdigit():\n if(FcValue == HexNumPattern.findall(FcValue)[0]):\n FcValue = int(FcValue, 16)\n else:\n self.__ErrFuncCodeInfoAdd(FuncCodeName, FcAttPath, '功能码值无效:' + str(FcValue))\n FcValue = None\n else:\n FcValue = int(FcValue)\n\n return FcValue\n\n def __JudgeIsPathValidPjt(self, Path, FuncSel):\n Result = False\n\n if FuncSel in self.FuncSelCfg.keys():\n # 对配置的各个条件进行逐条匹配\n for key in self.FuncSelCfg[FuncSel].keys():\n ConfigPath = Path + '\\\\' + self.FuncSelCfg[FuncSel][key].strip('\\\\')\n ConfigDir = os.path.split(ConfigPath)[0]\n ConfigFile = os.path.split(ConfigPath)[-1]\n # 判断配置的工程路径是否存在\n if not os.path.exists(ConfigDir):\n Result = False\n break\n # 判断路径下是否存在匹配的文件\n Result = False\n dirList = os.listdir(ConfigDir)\n for Curdir in dirList:\n if True == fnmatch(Curdir, ConfigFile):\n Result = True\n break\n # 任意路径条件下不存在匹配文件则退出\n if Result == False:\n break\n else:\n Result = False\n\n return Result\n\n # 增加错误的功能码查找信息,文件查找结束后统一输出\n def __ErrFuncCodeInfoAdd(self, FuncCodeName, FcAttPath, errType):\n if(FuncCodeName in self.errFuncCode.keys()):\n if FcAttPath not in self.errFuncCode[FuncCodeName]['Path']:\n self.errFuncCode[FuncCodeName]['Path'].append(FcAttPath)\n self.errFuncCode[FuncCodeName]['Type'].append(errType)\n else:\n self.errFuncCode[FuncCodeName]={\n 'Path': [FcAttPath],\n 'Type': [errType]\n }\n\n # 将信息保存到Excel中\n def __SaveDateToExcel(self, SavePath):\n # 写入Excel中\n MotInfoBook = xlwt.Workbook(encoding='utf-8')\n sheet = MotInfoBook.add_sheet('电机信息汇总', cell_overwrite_ok=True)\n\n sheet.write(0, 0, '电机名称')\n\n RowCount = 0\n for key0 in self.MotorInfoSum.keys():\n # 对首行进行赋值\n if(RowCount == 0):\n ColumnCount = 1\n for key1 in self.MotorInfoSum[key0].keys():\n sheet.write(RowCount, ColumnCount, key1)\n ColumnCount = ColumnCount + 1\n RowCount = RowCount + 1\n \n # 对首列进行赋值\n ColumnCount = 0\n sheet.write(RowCount, ColumnCount, key0)\n ColumnCount = ColumnCount + 1\n\n # 对其他内容进行赋值\n for key1 in self.MotorInfoSum[key0].keys():\n sheet.write(RowCount, ColumnCount, self.MotorInfoSum[key0][key1])\n ColumnCount = ColumnCount + 1\n \n RowCount = RowCount + 1 \n try:\n MotInfoBook.save(str(SavePath) + r'\\电机信息汇总表.xls')\n except Exception as e:\n self.__InfoDispAdd('Error', '保存数据异常,请关闭文件后重试!')\n self.__InfoDispAdd('Error', str(e))\n\n # 拖拽选取文件夹\n def __DropSelectRootPath(self, rootDir):\n rootDir = str(rootDir[0], encoding= 'gbk').replace('\\\\\\\\', '\\\\').strip('b').strip('\\'')\n if(os.path.exists(rootDir) and os.path.isdir(rootDir)):\n self.rootPathVar.set(rootDir)\n\n # 按钮选取文件夹\n def __ButSelectRootPath(self):\n self.rootPathVar.set(filedialog.askdirectory().replace('/','\\\\'))\n\n # 点摁开始分析数据\n def __StartAnalysisPath(self):\n if os.path.exists(self.rootPathVar.get()):\n self.__InfoDispAdd('Result', '开始分析工程信息……')\n\n self.errFuncCode.clear() # 调用递归搜索之前先执行清除操作\n self.repeatPjt.clear() # 调用递归搜索之前先执行清除操作\n self.MotorInfoSum.clear() # 调用递归搜索之前先执行清除操作\n for item in self.FileDispTreeView.get_children():\n self.FileDispTreeView.delete(item)\n\n self.__PathRecursionSearch(self.rootPathVar.get()) # 读取工程信息\n\n # 显示查找异常的功能码 \n for key in self.errFuncCode.keys():\n SameNum = len(self.errFuncCode[key]['Path'])\n i = 0\n while i < SameNum:\n self.__InfoDispAdd('Warning', \\\n '功能码(' + str(key) + \\\n ')异常:' + str(self.errFuncCode[key]['Type'][i]) + \\\n ' 路径为:' + str(self.errFuncCode[key]['Path'][i]))\n i = i + 1\n\n self.__InfoDispAdd('Result', '分析结果存储中……')\n self.__SaveDateToExcel(self.rootPathVar.get()) # 将信息保存到Excel中\n\n self.__UpdateMotInfoDisap()\n\n self.__InfoDispAdd('Result', '工程信息分析结束!')\n\n # 下拉复选框被选中\n def __FuncSelected(self, SelValue):\n self.CurSelectFunc = str(self.selComBox.get())\n\n if self.CurSelectFunc in self.FuncSelList:\n self.__InfoDispAdd('Result', '已选择' + str(self.CurSelectFunc) + '模块,开始读取配置文件……')\n self.__ReadCfgFile(r'FeatureCfg_' + str(self.CurSelectFunc) + r'.ini')\n self.__InfoDispAdd('Result', str(self.CurSelectFunc) + '模块配置文件读取结束!')\n else:\n self.__InfoDispAdd('Result', '已选择' + str(self.selComBox.get()) + '选项,该选项无效!')\n\n # 增加一条显示信息\n def __InfoDispAdd(self, type, Info):\n\n CurTags = 'InfoDisp' + str(self.InfoDispNum)\n\n self.InfoTreeView.insert(\n '',\n self.InfoDispNum,\n values = (self.InfoDispNum, type, Info),\n tags = CurTags,\n iid = str(self.InfoDispNum)\n )\n\n # 根据类型改变字体颜色\n for ColorList in self.InfoDispColor:\n if(type == ColorList[0]):\n self.InfoTreeView.tag_configure(CurTags, foreground = ColorList[1])#, font = ('Arial', 10))\n\n self.InfoDispNum = self.InfoDispNum + 1\n\n # 移动滚动条到最新信息处,并更新窗口\n self.InfoTreeView.yview(MOVETO, 1.0)\n self.MainWin.update()\n\n # 打开路径按钮\n def __OpenAnsysPath(self):\n if os.path.exists(self.rootPathVar.get()):\n os.system('start explorer ' + self.rootPathVar.get())\n\n # 清空所有显示信息\n def __InfoDispClear(self):\n self.InfoDispNum = 1\n items = self.InfoTreeView.get_children()\n for item in items:\n self.InfoTreeView.delete(item)\n\n #加载指定的文件夹当中的文件,并且把它插入到树枝当中\n def __LoadTreeSubList(self, parentPath, root):\n #获取一个文件夹当中所有的元素\n if os.path.exists(parentPath) and os.path.isdir(parentPath): \n filelist = os.listdir(parentPath)\n for filename in filelist:\n absPath = os.path.join(parentPath,filename)\n #插入树枝\n treey = self.FileDispTreeView.insert(root,'end',text=filename,values=(absPath))\n #判断路径是否是文件夹\n if os.path.isdir(absPath):\n self.__LoadTreeSubList(absPath,treey)\n\n def __UpdateMotInfoDisap(self):\n items = self.MotInfoSumTreeView.get_children()\n for item in items:\n self.MotInfoSumTreeView.delete(item)\n\n TitleColumn = ['title0','title1']\n TiltleList = ['序号','电机信息']\n\n RowCount = 0\n for key0 in self.MotorInfoSum.keys():\n # 对首行进行赋值\n ColumnCount = len(TitleColumn)\n if(RowCount == 0):\n for key1 in self.MotorInfoSum[key0].keys():\n TiltleList.append(key1)\n TitleColumn.append('title' + str(ColumnCount))\n ColumnCount = ColumnCount + 1\n\n ColumnCount = 0\n self.MotInfoSumTreeView.config(columns=TitleColumn, show='headings')\n print(TitleColumn)\n\n for TileName in TiltleList:\n self.MotInfoSumTreeView.column('title' + str(ColumnCount), width = 80, anchor = 'center')\n self.MotInfoSumTreeView.heading('title' + str(ColumnCount), text =TileName)\n ColumnCount = ColumnCount + 1\n \n RowCount = RowCount + 1\n RowValue = [RowCount, key0]\n for key1 in self.MotorInfoSum[key0].keys():\n RowValue.append(key1)\n\n self.MotInfoSumTreeView.insert('', RowCount, values = RowValue)\n\n def __MouseRightClickFileTree(self, value):\n print(value)\n\n def __menuBarHelp(self):\n messagebox.showinfo('帮助','使用方法:\\n 1.选择目录\\n 2.点击\"开始分析\"\\n\\n备注:生成的Excel保存在选择目录下')\n\n def __menuBarAbout(self):\n messagebox.showinfo('About','Feature Extract Tool\\nVersion:' + str(Version))\n \n def __menuBarQuit(self):\n self.MainWin.quit()\n\nif __name__ == '__main__':\n FeatrueExtractmain()\n pass","sub_path":"Proj5_FeatureExtract_more/FeatureExtractMain_More.py","file_name":"FeatureExtractMain_More.py","file_ext":"py","file_size_in_byte":33250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514827431","text":"from setuptools import setup, find_packages\nimport os\n\ntry:\n # Try reading the version.txt from the buildout directory.\n versionfile = open(os.path.join('..', '..', 'version.txt'))\n version = versionfile.read().strip()\n versionfile.close()\nexcept IOError:\n # fallback\n version = '1.0'\n\n\nsetup(name='Products.minaraad',\n version=version,\n description=\"Product for minaraad.be\",\n long_description=\"Product for minaraad.be\",\n classifiers=[\n \"Programming Language :: Python\",\n ],\n keywords='',\n author='Zest Software',\n author_email='info@zestsoftware.nl',\n url='',\n license='',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'five.grok',\n 'quintagroup.formlib.captcha',\n ],\n)\n","sub_path":"src/Products.minaraad/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"193213908","text":"import random\n\ndef wordList():\n words = [\"banana\", \"apple\", \"orange\", \"pineapple\", \"papaya\"]\n return random.choice(words)\n\ndef playAgain():\n answer = input('WOuld you like to play again ? y/n ' ).lower()\n if answer == 'y':\n hangman()\n else:\n pass\n\ndef hangman():\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n word = wordList()\n letters_guessed = []\n tries = 7\n guessed = False\n\n print('The word contains', len(word), 'letters.')\n print(len(word) * ' _ ')\n\n while guessed == False and tries > 0:\n print('You have ' + str(tries) + ' tries')\n guess = input('Please enter one letter or the full word.').lower()\n\n # when the user input a letter\n if len(guess) == 1:\n if guess not in alphabet:\n print('You have not entered a letter.')\n elif guess in letters_guessed:\n print('you have already guessed that letter before')\n elif guess not in word:\n print('Sorry, that is not a word')\n letters_guessed.append(guess)\n tries -= 1\n elif guess in word:\n print('Good job !')\n letters_guessed.append(guess)\n else:\n print('Error, please try again.')\n \n # when the user input the full word\n elif len(guess) == len(word):\n if guess == word:\n print('Good job! You have guessed the word !')\n guessed = True\n else:\n print('Sorry, wrong word.')\n tries -= 1\n\n # when the user input wrong length of word\n else:\n print('The length of your guess is not the same as the word you guess.')\n\n # for loop to show the input word or empty space\n status =''\n if guessed == False:\n for letter in word:\n if letter in letters_guessed:\n status += letter\n else:\n status += ' _ '\n print(status)\n\n # if the user gets correct guess\n if status == word:\n print('Good Job, you guessed the word!')\n guessed = True\n elif tries == 0:\n print('You have run out of tries and you have not guessed the word.')\n\n playAgain()\n\nhangman()","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426628883","text":"import dataclasses\nimport random\nimport uuid\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, List\n\nfrom datahub.ingestion.source.bigquery_v2.bigquery_audit import (\n AuditEvent,\n BigqueryTableIdentifier,\n BigQueryTableRef,\n QueryEvent,\n ReadEvent,\n)\nfrom datahub.ingestion.source.bigquery_v2.bigquery_config import BigQueryV2Config\nfrom tests.performance.data_model import Query, Table\n\n# https://cloud.google.com/bigquery/docs/reference/auditlogs/rest/Shared.Types/BigQueryAuditMetadata.TableDataRead.Reason\nREAD_REASONS = [\n \"REASON_UNSPECIFIED\",\n \"JOB\",\n \"TABLEDATA_LIST_REQUEST\",\n \"GET_QUERY_RESULTS_REQUEST\",\n \"QUERY_REQUEST\",\n \"CREATE_READ_SESSION\",\n \"MATERIALIZED_VIEW_REFRESH\",\n]\n\n\ndef generate_events(\n queries: Iterable[Query],\n projects: List[str],\n table_to_project: Dict[str, str],\n config: BigQueryV2Config,\n) -> Iterable[AuditEvent]:\n for query in queries:\n project = ( # Most queries are run in the project of the tables they access\n table_to_project[\n query.object_modified.name\n if query.object_modified\n else query.fields_accessed[0].table.name\n ]\n if random.random() >= 0.1\n else random.choice(projects)\n )\n job_name = str(uuid.uuid4())\n yield AuditEvent.create(\n QueryEvent(\n job_name=job_name,\n timestamp=query.timestamp,\n actor_email=query.actor,\n query=query.text,\n statementType=query.type,\n project_id=project,\n destinationTable=ref_from_table(query.object_modified, table_to_project)\n if query.object_modified\n else None,\n referencedTables=[\n ref_from_table(field.table, table_to_project)\n for field in query.fields_accessed\n if not field.table.is_view()\n ],\n referencedViews=[\n ref_from_table(field.table, table_to_project)\n for field in query.fields_accessed\n if field.table.is_view()\n ],\n payload=dataclasses.asdict(query)\n if config.debug_include_full_payloads\n else None,\n )\n )\n table_accesses = defaultdict(list)\n for field in query.fields_accessed:\n table_accesses[ref_from_table(field.table, table_to_project)].append(\n field.column\n )\n\n for ref, columns in table_accesses.items():\n yield AuditEvent.create(\n ReadEvent(\n jobName=job_name,\n timestamp=query.timestamp,\n actor_email=query.actor,\n resource=ref,\n fieldsRead=columns,\n readReason=random.choice(READ_REASONS),\n payload=dataclasses.asdict(query)\n if config.debug_include_full_payloads\n else None,\n )\n )\n\n\ndef ref_from_table(table: Table, table_to_project: Dict[str, str]) -> BigQueryTableRef:\n return BigQueryTableRef(\n BigqueryTableIdentifier(\n table_to_project[table.name], table.container.name, table.name\n )\n )\n","sub_path":"metadata-ingestion/tests/performance/bigquery.py","file_name":"bigquery.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408414938","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom builtins import range\n\nfrom collections import defaultdict\n\nfrom .__init__ import seg_txt\nfrom .word2 import WORD2\n\nSMALLCHAR = set(\n('很', '则', '该', '次', '给', '又', '里', '号', '着', '名', '可', '更', '由', '下', '至', '或', '多', '大', '新', '并', '让', '她', '已', '向', '其', '股', '点', '们', '所', '会', '要', '于', '前', '来', '万', '比', '只', '及', '地', '队', '个', '不', '说', '第', '元', '人', '一', '分', '被', '我', '这', '到', '都', '从', '等', '时', '以', '上', '后', '就', '将', '而', '还', '他', '但', '对', '也', '与', '为', '中', '年', '月', '日', '有', '和', '是', '在', '了', '的', )\n)\n\nSTOPWORD = set(\"的了是在有而以但一我你他它个啊这\")\n\ndef seg_txt2(txt):\n for i in seg_txt(txt):\n i = i.lower()\n if len(i) > 3:\n yield i\n else:\n i = i.decode(\"utf-8\",\"ignore\")\n if len(i) == 1:\n if \"一\" <= i <= \"龥\" and i not in STOPWORD:\n yield i\n else:\n yield i \n\ndef seg_txt_2_dict(txt):\n result = defaultdict(int)\n for word in seg_txt_search(txt):\n result[word] += 1\n return result\n\ndef word_len2(s):\n tmp = [\"\"]\n for i in s:\n if \"一\" <= i <= \"龥\" and i not in STOPWORD:\n tmp[-1] += i\n elif tmp[-1]:\n tmp.append(\"\")\n result = []\n tmp_word = []\n for y in tmp:\n if y:\n for i in range(len(y)-1):\n w = y[i:i+2]\n if w in WORD2:\n # if len(tmp) >= 2:\n result.extend(tmp_word)\n result.append(w)\n tmp_word = []\n else:\n tmp_word.append(w)\n #if len(tmp_word) >= 2:\n result.extend(tmp_word)\n if len(y) <= 5:\n result.append(y)\n return result\n\ndef seg_title_search(txt):\n result = []\n buffer = []\n for word in seg_txt(txt):\n word = word.decode(\"utf-8\", \"ignore\")\n\n if len(word) == 1:\n buffer.append(word)\n else:\n for i in buffer:\n result.append(i)\n if len(buffer) > 1:\n result.extend(word_len2(\"\".join(buffer)))\n buffer = []\n if len(word) <= 16:\n word = word.lower()\n utf8_word = word.encode(\"utf-8\", \"ignore\")\n if utf8_word.isalnum():\n result.append(word)\n else:\n for i in word:\n result.append(i)\n if len(word) <= 2:\n result.append(utf8_word)\n else:\n result.extend(word_len2(word))\n\n if len(buffer) > 1:\n result.extend(word_len2(\"\".join(buffer)))\n elif buffer:\n if \"一\" <= buffer[0] <= \"龥\":\n if buffer[0] not in SMALLCHAR:\n result.append(buffer[0])\n\n\n result = [i.encode(\"utf-8\", \"ignore\") if type(i) is str else i for i in result]\n\n return result\n\ndef seg_keyword_search(txt):\n return sorted(seg_title_search(txt),key=lambda x:-len(x))\n\ndef seg_txt_search(txt):\n result = []\n buffer = []\n def _():\n if len(buffer) > 1:\n result.extend(word_len2(\"\".join(buffer)))\n elif buffer:\n if \"一\" <= buffer[0] <= \"龥\":\n if buffer[0] not in SMALLCHAR:\n result.append(buffer[0])\n\n for word in seg_txt(txt):\n word = word.decode(\"utf-8\", \"ignore\")\n if len(word) == 1:\n buffer.append(word)\n else:\n _()\n buffer = []\n if len(word) <= 16:\n word = word.lower()\n utf8_word = word.encode(\"utf-8\", \"ignore\")\n if utf8_word.isalnum():\n result.append(word)\n elif len(word) <= 2:\n result.append(utf8_word)\n else:\n result.extend(word_len2(word))\n\n _()\n\n result = [i.encode(\"utf-8\", \"ignore\") if type(i) is str else i for i in result]\n\n return result\n\n\n\nif __name__ == \"__main__\":\n for i in word_len2(\"是:张无忌\"):\n print(i)\n\n for i in word_len2(\"晋江文学网站驻站作家,已出版多部作品。\"):\n print(i)\n","sub_path":"mmseg/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498309579","text":"# Partition list at node with value x\nfrom singly_linkedlist import LinkedList\n\n\n# partition list at value x\ndef partiton(root, x):\n left = []\n right = []\n\n current = root\n\n while current:\n if current.value >= x:\n right.append(current.value)\n else:\n left.append(current.value)\n\n current = current.next\n\n return combine(left, right)\n\n\ndef combine(left, right):\n combined_list = LinkedList()\n\n for node in right:\n combined_list.insert(node)\n\n for node in left:\n combined_list.insert(node)\n\n return combined_list\n\n\nex = LinkedList()\nex.insert(1)\nex.insert(2)\nex.insert(10)\nex.insert(5)\nex.insert(8)\nex.insert(5)\nex.insert(3)\nex.insert(6)\nex.insert(4)\nex.insert(3)\npart = partiton(ex.head, 5)\nprint(part)\npart = partiton(ex.head, 4)\nprint(part)\n","sub_path":"2-LinkedLists/linkedlist4.py","file_name":"linkedlist4.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"15581390","text":"from django.shortcuts import render\nfrom django.http import HttpRequest, HttpResponse, HttpResponseRedirect\nfrom .models import Flight\nfrom django import forms\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404, render, get_list_or_404\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\n\nclass SearchFlightForm(forms.Form):\n flight_number = forms.IntegerField(required=False)\n flight_date = forms.DateField(required=False)\n\n\ndef getFlights(req):\n\n flights = get_list_or_404(Flight)\n return render(req, 'flights/index.html', {\n 'flights': flights,\n })\n\n\ndef getFlight(req, flight_number):\n flight = get_object_or_404(Flight, pk=flight_number)\n return render(req, 'flights/flight.html', {'flight': flight})\n\n\ndef search(req, search_method):\n try:\n if search_method == 'flight_number':\n flight_number = req.POST['flight_number']\n if flight_number:\n flight = get_object_or_404(Flight, pk=flight_number)\n return HttpResponseRedirect(reverse('flights:details', args=(flight.flight_number,)))\n elif search_method == 'departure_date':\n departure_date = req.POST['departure_date']\n if departure_date:\n flights = get_list_or_404(\n Flight, departure_date=departure_date)\n return render(req, 'flights/index.html', {\n \"flights\": flights,\n })\n else:\n return render(req, 'flights/index.html', {\n 'error_message': \"Invalid search method\",\n })\n except KeyError:\n return render(req, 'flights/index.html', {\n 'error_message': \"Unspecified search terms\",\n })\n","sub_path":"flights/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565598523","text":"### John Dogan ###\n## STATE: COMPLETED ##\nfrom collections import Counter\n\n'''\nMAIN FUNCTION\n'''\ndef main():\n \n win = open(\"mbox_stats.txt\", \"w\")\n \n win.write(\"-- MBOX STATS --\")\n \n win.write(\"\\n\\nNumber of Lines: \" + countLines());\n \n win.write(\"\\n\\nNumber of Messages: \" + countMessage());\n \n win.write(\"\\n\\nIPs: \" + listIP());\n \n win.write(\"\\n\\nMail of Top 3: \" + mailofTop3());\n \n win.write(\"\\n\\nAverage Body of Messages: \" + avgBody());\n \n win.write(\"\\n\\nMonthly Stats (Month, Number of Messages): \" + monthStats());\n\n'''\nCOUNT LINES FUNCTION\n@param count The number of lines\n@return count Returns string value of total amount of lines\n''' \ndef countLines():\n \n fin = open(\"mbox.txt\", \"r\")\n \n count = 0;\n\n for line in fin:\n \n count = count + 1;\n \n return str(count);\n\n'''\nCOUNT MESSAGES FUNCTION\n@param count The number of messages\n@return count Returns string value of total amount of messages\n''' \ndef countMessage():\n \n fin = open(\"mbox.txt\", \"r\")\n \n count = 0;\n \n for line in fin:\n \n if \"From:\" in line:\n \n count = count + 1;\n \n fin.close()\n \n return str(count);\n\n'''\nLIST IPs FUNCTION\n@param ip The list of all IPs \n@return ip Returns the list of all IPs in mbox.txt\n''' \ndef listIP():\n \n fin = open(\"mbox.txt\", \"r\")\n \n ips = []\n \n for line in fin:\n \n if \"Received:\" in line:\n \n one = line.find(\"[\")\n \n two = line.find(\"]\")\n \n if(one & two != -1):\n \n if(line[one + 1:two] != \"unix socket\"):\n \n ips.append(line[one + 1:two])\n \n ips = list(set(ips))\n \n fin.close()\n \n return str(ips);\n\n'''\nLIST E-MAILS OF TOP 3 SENDERS FUNCTION\n@param top3 List of top 3 senders\n@return top3 Returns the top3 senders in mbox.txt\n@return recipients Returns the recipients in mbox.txt\n'''\ndef mailofTop3():\n \n fin = open(\"mbox.txt\", \"r\")\n \n mails = []\n \n top3 = []\n \n recipients = [];\n \n for line in fin:\n \n if \"Author: \" in line:\n \n mails.append(line[8:-1])\n \n c = Counter(mails)\n \n mails = c.most_common();\n \n mails = mails[0:3];\n \n for list in range(3):\n \n top3.append(mails[list][0])\n \n fin = open(\"mbox.txt\", \"r\")\n \n for line in fin:\n \n if \"To:\" in line:\n \n s = line.split();\n \n recipients.append(s[1])\n \n c = Counter(recipients)\n \n recipients = c.most_common(); \n \n fin.close();\n \n return str(top3) + \"\\nAll Recipients recieved emails from the same e-mail: \" + str(recipients[0][0])\n\n'''\nAVG SIZE OF MESSAGE BODY FUNCTION\n@param bodies The added bodies of the mbox.txt\n@return avgBody Returns the bodies of mbox.txt divided by total message amount.\n'''\ndef avgBody():\n \n fin = open(\"mbox.txt\", \"r\")\n \n file = fin.readlines();\n \n body = False;\n \n bodies = 0;\n \n messages = countMessage();\n \n for i in range(len(file)):\n \n if \"X-DSPAM-Probability: \" in file[i]:\n \n body = True;\n \n elif \"From\" in file[i]:\n \n if len(file[i].split()) == 7:\n \n body = False;\n \n elif body == True:\n \n bodies = len(file[i]) + bodies;\n \n avgBody = bodies/int(messages)\n \n return str(avgBody);\n'''\nNUMBER OF MESSAGES SENT FOR EACH MONTH FUNCTION\n@param date List of mails sent in the month and year\n@return date Returns the amount of mails sent in each month in mbox.txt\n'''\ndef monthStats():\n \n fin = open(\"mbox.txt\", \"r\")\n \n date = [];\n \n for line in fin:\n \n if \"From\" in line:\n \n if len(line.split()) == 7:\n \n s = line.split()\n \n date.append(s[3])\n \n c = Counter(date)\n \n date = c.most_common()\n \n fin.close();\n\n return str(date)\n\nmain()","sub_path":"MBOX Reader.py","file_name":"MBOX Reader.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387586329","text":"import os\nimport torch\nimport codecs\n\nUSE_CUDA = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\n\n####################################################################\n# dataset.py 相关参数\n####################################################################\ncorpus_name = \"cornell movie-dialogs corpus\" # 词汇文件夹名\ncorpus = os.path.join(\"data\", corpus_name) # 词汇文件夹路径\nconv_filepath = os.path.join(corpus, \"movie_conversations.txt\") # movie_conversations文件路径\nmovie_lines_file = os.path.join(corpus, \"movie_lines.txt\") # movie_lines文件路径\ndatafile = os.path.join(corpus, \"formatted_movie_lines.txt\") # 格式化电影对话数据文件路径,一行中有两句话,中间用\\t隔开\nsave_dir = os.path.join(\"data\", \"save\") # 保存文件夹路径\n\ntrimed_datafile = \"trimed_formatted_movie_lines.txt\" # 修剪过的格式化电影对话数据文件\n\ndelimiter = '\\t'\ndelimiter = str(codecs.decode(delimiter, \"unicode_escape\")) # 分隔符\n\nMOVIE_LINES_FIELDS = [\"lineID\", \"characterID\", \"movieID\", \"character\", \"text\"] # ��影字段信息\nMOVIE_CONVERSATIONS_FIELDS = [\"character1ID\", \"character2ID\", \"movieID\", \"utteranceIDs\"] # 对话字段信息\n\nvoc_file = \"voc.pkl\"\n\n# 默认词向量\nPAD_token = 0 # 填充符号\nSOS_token = 1 # 句首符号\nEOS_token = 2 # 句尾符号\nMAX_LENGTH = 10 # 句子最大长度\nMIN_COUNT = 3 # 修剪的最小字数阈值\nsmall_batch_size = 5 # 每批数据数量\n\n\n####################################################################\n# train.py 相关参数\n####################################################################\n# 配置模型\nmodel_name = 'cb_model' # 模型名称\nattn_model = 'dot' # 注意力机制类型\n# attn_model = 'general'\n# attn_model = 'concat'\nhidden_size = 500 # GRU网络中的输出维度,GRU网络中的第二个输入\nembedding_dim = 500 # 输入单词的特征维数,GRU网络中的第一个输入\nencoder_n_layers = 2 # encoder中网络层数\ndecoder_n_layers = 2 # decoder中网络层数\ndropout = 0.1 # 神经节点丢弃率\nbatch_size = 64 # 批大小\n\n# 设置检查点以加载; 如果从头开始,则设置为None\n# loadFilename = None\ncheckpoint_iter = 4000\nloadFilename = os.path.join(save_dir, model_name, corpus_name,\n '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),\n '{}_checkpoint.tar'.format(checkpoint_iter))\n\n# 配置训练/优化\nclip = 50.0\nteacher_forcing_ratio = 1.0 # 教师强迫率\nlearning_rate = 0.0001 # encoder中Adam优化器的学习率\ndecoder_learning_ratio = 5.0 # decoder中Adam优化器的学习率\nn_iteration = 4000 # 迭代次数,取多少批数据\nprint_every = 1 # 每次打印多少\nsave_every = 500 # 每次保存多少\n\n\n","sub_path":"2.cornell_movie_chatbot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112936461","text":"# greeting.py\n#\n# Demonstration of the pyparsing module, on the prototypical \"Hello, World!\"\n# example\n#\n# Copyright 2003, by Paul McGuire\n#\nfrom pyparsing import Word, alphas\n\n# define grammar\ngreet = Word( alphas ) + \",\" + Word( alphas ) + \"!\"\n\n# input string\nhello = \"ganesh, ramesh!\"\n\ntry:\n # parse input string\n print(hello, \"->\", greet.parseString( hello ))\n\nexcept Exception as error:\n print(error)\n","sub_path":"PyParser/greeting.py","file_name":"greeting.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234652738","text":"from __future__ import absolute_import\n\nimport itertools\nimport logging\nimport os\n\nfrom populus.utils.compile import (\n get_project_source_paths,\n get_test_source_paths,\n)\n\n\ndef compile_project_contracts(project):\n logger = logging.getLogger('populus.compilation.compile_project_contracts')\n\n project_contract_source_paths = get_project_source_paths(project.contracts_source_dir)\n logger.debug(\n \"Found %s project source files: %s\",\n len(project_contract_source_paths),\n \", \".join(project_contract_source_paths),\n )\n\n test_contract_source_paths = get_test_source_paths(project.tests_dir)\n logger.debug(\n \"Found %s test source files: %s\",\n len(test_contract_source_paths),\n \", \".join(test_contract_source_paths),\n )\n\n all_source_paths = tuple(itertools.chain(\n project_contract_source_paths,\n test_contract_source_paths,\n ))\n\n compiler_backend = project.get_compiler_backend()\n compiled_contract_data = compiler_backend.get_compiled_contract_data(\n source_file_paths=all_source_paths,\n import_remappings=None,\n )\n\n logger.info(\"> Found %s contract source files\", len(all_source_paths))\n for path in all_source_paths:\n logger.info(\" - %s\", os.path.relpath(path))\n\n logger.info(\"> Compiled %s contracts\", len(compiled_contract_data))\n for contract_name in sorted(compiled_contract_data.keys()):\n logger.info(\" - %s\", contract_name)\n\n return all_source_paths, compiled_contract_data\n","sub_path":"populus/compilation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7783096","text":"# Handles the persistence of the original translation by saving to a JSON file\nimport json\nimport os.path\n\nfile_name = \"original.json\"\n\n# Save Json to file\ndef save(message_id:int, translation:str):\n data = {}\n if os.path.isfile(file_name):\n with open(file_name) as json_file:\n data = json.load(json_file)\n json_file.close()\n\n data[message_id] = translation\n with open(file_name, 'a') as outfile:\n json.dump(data, outfile)\n\n outfile.close()\n\n# Get Translation from file\ndef get(message_id:int) -> str:\n with open(file_name) as json_file:\n data = json.load(json_file)\n json_file.close()\n\n return","sub_path":"original.py","file_name":"original.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500212681","text":"# -*- coding: utf-8 -*-\n\"\"\"Example for R-graph\n\"\"\"\n# Author: Michiel Bongaerts (but not author of the R-graph method)\n# License: BSD 2 clause\n\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\n# temporary solution for relative imports in case pyod is not installed\n# if pyod is installed, no need to use the following line\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), '..')))\n\nfrom pyod.models.rgraph import RGraph\nfrom pyod.utils.data import generate_data\nfrom pyod.utils.data import evaluate_print\n\n\nif __name__ == \"__main__\":\n\n contamination = 0.1 # percentage of outliers\n n_train = 100 # number of training points\n n_test = 100 # number of testing points\n\n # Generate sample data\n X_train, X_test, y_train, y_test = generate_data(\n n_train=n_train,\n n_test=n_test,\n n_features=70,\n contamination=contamination,\n behaviour=\"new\",\n random_state=42,\n )\n\n\n # train R-graph detector\n clf_name = 'R-graph'\n clf = RGraph(n_nonzero = 100, transition_steps = 20 , gamma = 50, blocksize_test_data = 20,\n tau = 1, preprocessing=True, active_support = False, gamma_nz = False,\n algorithm= 'lasso_lars', maxiter= 100, verbose =1 )\n\n clf.fit(X_train)\n\n\n # get the prediction labels and outlier scores of the training data\n y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)\n y_train_scores = clf.decision_scores_ # raw outlier scores\n\n # # get the prediction on the test data\n y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)\n y_test_scores = clf.decision_function(X_test) # outlier scores\n\n # evaluate and print the results\n print(\"\\nOn Training Data:\")\n evaluate_print(clf_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(clf_name, y_test, y_test_scores)\n\n\n\n","sub_path":"examples/rgraph_example.py","file_name":"rgraph_example.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476428102","text":"#!/usr/bin/python3\n\n# Look at this URL for list data schema\n# http://www.nasdaqtrader.com/trader.aspx?id=symboldirdefs\n\nimport company\nimport main\nimport json\nimport pickle\nimport requests\nfrom bs4 import BeautifulSoup\nimport pprint\nimport datetime\n\n\ndef get_data(row):\n sym, name, etf = row['SYM'], row['Name'], row['ETF']\n if etf:\n if etf == 'Y':\n etf = True\n return {}\n else:\n etf = False\n else:\n etf = False\n BASE_URL = main.HOME_URL + sym\n page = requests.get(f\"{BASE_URL}\")\n soup = BeautifulSoup(page.content, features='html.parser')\n results = soup.find_all('h1', {'class': 'company__name'})\n if results:\n name = results[0].get_text()\n results = soup.find_all('bg-quote', {'class': 'value'})\n price = None\n try:\n price = results[0].get_text()\n price = float(price.replace(',', ''))\n except IndexError:\n price = None\n except ValueError:\n print(f\"Value Error {price}\")\n price = None\n results = soup.find_all('li', {'class': 'kv__item'})\n volume = None\n avg_volume = None\n peratio = None\n market_cap = None\n eps = None\n yield_data = None\n dividend = None\n revperemployee = None\n i = 0\n for result in results:\n data = result.get_text()\n data = data.strip().split()\n data = data[len(data) - 1]\n if data == 'N/A':\n data = None\n if i == 3:\n market_cap = data\n if market_cap:\n market_cap = convert_multiplier(market_cap)\n elif i == 7:\n revperemployee = data\n if revperemployee:\n revperemployee = convert_multiplier(revperemployee)\n elif i == 8:\n peratio = data\n if peratio:\n peratio = float(peratio.replace(',', ''))\n elif i == 9:\n eps = data\n if eps:\n eps = float(eps.replace(',', '').replace('$', ''))\n elif i == 10:\n yield_data = data\n if yield_data:\n yield_data = float(yield_data.replace(',', '').replace('%', ''))\n elif i == 11:\n dividend = data\n if dividend:\n dividend = float(dividend.replace(',', '').replace('$', ''))\n elif i == 15:\n avg_volume = data\n if avg_volume:\n avg_volume = convert_multiplier(avg_volume)\n i += 1\n\n profile_page = requests.get(f\"{BASE_URL}{main.PROFILE}\")\n soup = BeautifulSoup(profile_page.content, features='html.parser')\n results = soup.find_all('li', {'class': 'kv__item w100'})\n\n i = 0\n sector = None\n industry = None\n for result in results:\n data = result.get_text().strip().split('\\n')\n data = data[len(data) - 1]\n if i == 0:\n sector = data\n elif i == 1:\n industry = data\n i += 1\n\n financial_page = requests.get(f\"{BASE_URL}{main.FINANCIALS}\")\n soup = BeautifulSoup(financial_page.content, features='html.parser')\n\n i = 0\n results = soup.find_all('th', {'class': 'overflow__heading'})\n year_list = []\n revenue_history = {}\n interest_expense_history = {}\n cogs_incl_da = {}\n cogs_excl_da = {}\n gross_profit_margin = {}\n sga_expense = {}\n unusual_expense = {}\n ebit_after_unusual_expense = {}\n net_income = {}\n eps_historical = {}\n ebitda = {}\n ebitda_margin = {}\n for result in results:\n if i > 0:\n data = result.get_text().strip()\n if data != '5-year trend':\n year_list.append(int(data))\n i += 1\n i = 1\n j = 1\n col_count = len(year_list)\n results = soup.find_all('td', {'class': 'overflow__cell'})\n row_data = {}\n for result in results:\n data = result.get_text().strip()\n if 1 < i < len(year_list) + 1:\n if data and data != '-':\n if '(' in data:\n data = data.replace('(', '').replace(')', '')\n data = '-' + data\n data = data.replace('%', '')\n data = convert_multiplier(data)\n row_data[year_list[i - 2]] = data\n else:\n row_data[year_list[i - 2]] = None\n\n if not i % (col_count + 2):\n if j == 1:\n revenue_history = row_data\n elif j == 3:\n cogs_incl_da = row_data\n elif j == 5:\n cogs_excl_da = row_data\n elif j == 11:\n gross_profit_margin = row_data\n elif j == 12:\n sga_expense = row_data\n elif j == 17:\n unusual_expense = row_data\n elif j == 18:\n ebit_after_unusual_expense = row_data\n elif j == 22:\n interest_expense_history = row_data\n elif j == 46:\n net_income = row_data\n elif j == 49:\n eps_historical = row_data\n elif j == 55:\n ebitda = row_data\n elif j == 57:\n ebitda_margin = row_data\n j += 1\n i = 1\n row_data = {}\n else:\n i += 1\n\n # balance_page = requests.get(f\"{BASE_URL}{main.BALANCE_SHEET}\")\n # soup = BeautifulSoup(balance_page.content, features='html.parser')\n # results = soup.find_all('div', {\n # 'class': [\n # 'Ta(c) Py(6px) Bxz(bb) BdB Bdc($seperatorColor) Miw(120px) Miw(140px)--pnclg D(tbc)',\n # 'Ta(c) Py(6px) Bxz(bb) BdB Bdc($seperatorColor) Miw(120px) Miw(140px)--pnclg Bgc($lv1BgColor)'\n # ' fi-row:h_Bgc($hoverBgColor) D(tbc)'\n # ]\n # })\n liabilities = []\n # i = 0\n # for result in results:\n # if i in [128, 129, 130, 131]:\n # data = result.get_text()\n # if data:\n # data = float(data.replace(',', ''))\n # else:\n # data = None\n # liabilities.append(data)\n # i += 1\n company_dict = {\n 'SYM': sym,\n 'Name': name,\n 'ETF': etf,\n 'Price': price,\n 'Volume': volume,\n 'Average Volume': avg_volume,\n 'P/E Ratio': peratio,\n 'Market Cap': market_cap,\n 'EPS': eps,\n 'Industry': industry,\n 'Sector': sector,\n 'Revenue History': revenue_history,\n 'Profit History': net_income,\n 'Interest Expense History': interest_expense_history,\n 'Liabilities': liabilities,\n 'RevperEmployee': revperemployee,\n 'Dividend': dividend,\n 'Yield': yield_data,\n 'Cogs_incl_DA': cogs_incl_da,\n 'Cogs_excl_DA': cogs_excl_da,\n 'GrossProfitMargin': gross_profit_margin,\n 'SGAExpense': sga_expense,\n 'UnusualExpense': unusual_expense,\n 'EBITAfterUnusualExpense': ebit_after_unusual_expense,\n 'EPSHistorical': eps_historical,\n 'EBITDA': ebitda,\n 'EBITDAMargin': ebitda_margin\n }\n return company_dict\n\n\ndef convert_multiplier(value) -> float:\n value = value.replace('$', '').replace(',', '')\n if 'T' in value:\n value = float(value.replace('T', ''))\n multiplier = 1000000000000\n value = value * multiplier\n elif 'B' in value:\n value = float(value.replace('B', ''))\n multiplier = 1000000000\n value = value * multiplier\n elif 'M' in value:\n value = float(value.replace('M', ''))\n multiplier = 1000000\n value = value * multiplier\n elif 'K' in value:\n value = float(value.replace('K', ''))\n multiplier = 1000\n value = value * multiplier\n else:\n value = float(value)\n return value\n\n\nCOMPANIES = []\nPREPROCESSED = []\n\n\n# Reading NASDAQ file and adding data to list\ndef nasdaq_list_loader():\n with open(\"nasdaqlisted.txt\") as f:\n f.readline()\n row = f.readline()\n while row:\n row = row.split('|')\n entry = {\n \"SYM\": row[0],\n \"Name\": row[1],\n \"ETF\": row[6]\n }\n PREPROCESSED.append(entry)\n row = f.readline()\n PREPROCESSED.remove(PREPROCESSED[len(PREPROCESSED) - 1])\n\n\ndef other_list_loader():\n with open(\"otherlisted.txt\") as f:\n f.readline()\n row = f.readline()\n while row:\n row = row.split('|')\n entry = {\n \"SYM\": row[6],\n \"Name\": row[1],\n \"ETF\": row[4]\n }\n PREPROCESSED.append(entry)\n row = f.readline()\n PREPROCESSED.remove(PREPROCESSED[len(PREPROCESSED) - 1])\n\n\npp = pprint.PrettyPrinter(indent=3)\nmain_counter = 0\nnasdaq_list_loader()\nother_list_loader()\nfailure_count = 0\nfor entity in PREPROCESSED:\n try:\n company_dict = get_data(entity)\n except Exception as e:\n print(entity)\n failure_count += 1\n continue\n # pp.pprint(company_dict)\n if company_dict:\n cmpny = company.Company(**company_dict)\n COMPANIES.append(cmpny)\n main_counter += 1\n if main_counter % 100 == 0:\n print(main_counter)\nfor c in COMPANIES:\n if not c.Price:\n COMPANIES.remove(c)\nwith open(main.DATA_FILE, \"wb\") as f:\n pickle.dump(COMPANIES, f)\nprint(f\"Complete: {datetime.datetime.now()}\")\nprint(f\"Failed: {failure_count}\")","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265533887","text":"\"\"\" Classes that store and analyze family relationships\n\n:Author: Arthur Goldberg \n:Date: 2017-12-09\n:Copyright: 2017-2018, Arthur Goldberg\n:License: MIT\n\"\"\"\n\nclass Error( Exception ):\n \"\"\" Base class for exceptions in this module\n\n Attributes:\n message ( :obj:`str` ): the exception's message\n \"\"\"\n def __init__( self, message=None ):\n super( ).__init__( message )\n\n\nclass RelatedPersonError( Error ):\n \"\"\" Exception raised for errors in this module\n\n Attributes:\n message ( :obj:`str` ): the exception's message\n \"\"\"\n def __init__( self, message=None ):\n super( ).__init__( message+'_' )\n\n\nclass Gender( object ):\n \"\"\" Gender for a related person\n \"\"\"\n # gender constants to store in data\n MALE = 'M'\n FEMALE = 'F'\n UNKNOWN = 'unknown'\n\n GENDER_MAP = {\n MALE:set( ['male', 'm', '1'] ),\n FEMALE:set( ['fmale', 'f', '2'] ),\n UNKNOWN:set( ['unknown', 'na', 'not specified', '-9', '0'] )\n }\n\n def genders_string_mappings( self ):\n \"\"\" Report the mappings from strings to gender constants\n\n Returns:\n :obj:`str`: a description of the mappings from strings to gender constants\n \"\"\"\n rv = \"Legal genders, which are case insensitive, map to gender constants:\\n\"\n for gender_constant,synonyms in self.GENDER_MAP.items( ):\n rv += \"{} -> '{}'\\n\".format( synonyms, gender_constant )\n return rv\n\n def get_gender( self, gender ):\n \"\"\" Obtain a gender constant\n\n Convert a string into a gender constant, or, if that fails, raise an exception.\n\n Args:\n gender ( :obj:`str` ): a gender value\n\n Returns:\n :obj:`str`: a reference gender value, stored in constant value in this class\n\n Raises:\n :obj:`RelatedPersonError`: if `gender` does not map to a reference gender value\n \"\"\"\n for gender_constant,synonyms in Gender.GENDER_MAP.items( ):\n if gender.lower( ) in synonyms:\n return gender_constant\n raise RelatedPersonError( \"Illegal gender '{}'\".format( gender ) )\n\n\nclass RelatedPerson( object ):\n \"\"\" RelatedPerson\n\n Attributes:\n name ( :obj:`str` ): a related person's name\n gender ( :obj:`str` ): a related person's gender, which must be an attribute of `Gender`\n father ( :obj:`RelatedPerson` ): a related person's father\n mother ( :obj:`RelatedPerson` ): a related person's mother\n children ( :obj:`set` of `RelatedPerson` ): a related person's children\n \"\"\"\n\n def __init__( self, name, gender, mother=None, father=None ):\n \"\"\" Create a RelatedPerson instance\n\n Create a RelatedPerson instance. This is used by the expression RelatedPerson( ).\n The parameters name and gender are required, while other parameters are optional.\n\n Args:\n name ( :obj:`str` ): the related person's name\n gender ( :obj:`str` ): the related person's gender\n father ( :obj:`RelatedPerson`, optional ): the related person's father\n mother ( :obj:`RelatedPerson`, optional ): the related person's mother\n\n Raises:\n :obj:`RelatedPersonError`: if `gender` does not map to a reference gender value\n \"\"\"\n self.name = name\n self.gender = Gender( ).get_gender( gender )\n self.father = father\n self.mother = mother\n self.children = set( )\n\n def __repr__( self ):\n \"\"\" Provide a string representation of this related person\"\"\"\n return \"\".format( \n str( id( self ) ),\n self.name,\n self.gender\n )\n\n def __str__( self ):\n '''A representation of a RelatedPerson object'''\n return self.__repr__( )\n\n @staticmethod\n def get_related_persons_name( related_person ):\n \"\"\" Get a related person's name; if the person is not known, return 'NA'\n\n Args:\n related_person ( :obj:`RelatedPerson` ): a related person\n\n Returns:\n :obj:`str`: the related person's name, or 'NA' if they're not known\n \"\"\"\n if related_person is None:\n return 'NA'\n return related_person.name\n\n def set_father( self, father ):\n \"\"\" Set the father of this related person\n\n Args:\n father ( :obj:`RelatedPerson` ): this related person's father\n\n Raises:\n :obj:`RelatedPersonError`: if `father` is not male, or if a cycle in the ancestors\n graph would be created\n \"\"\"\n if father.gender != Gender.MALE:\n raise RelatedPersonError( \"father named '{}' is not male\".format( father.name ) )\n father.children.add( self )\n self.father = father\n\n def set_mother( self, mother ):\n \"\"\" Set the mother of this related person\n\n Args:\n mother ( :obj:`RelatedPerson` ): this related person's mother\n\n Raises:\n :obj:`RelatedPersonError`: if `mother` is not female, or if a cycle in the ancestors\n graph would be created\n \"\"\"\n if mother.gender != Gender.FEMALE:\n raise RelatedPersonError( \"mother named '{}' is not female\".format( mother.name ) )\n mother.children.add( self )\n self.mother = mother\n\n def remove_mother( self ):\n \"\"\" Remove this related person's mother\n\n Raises:\n :obj:`RelatedPersonError`: if this related person does not have a mother or this related person is not one\n of their mother's children\n \"\"\"\n if not isinstance( self.mother, RelatedPerson ):\n raise RelatedPersonError( \"mother of '{}' is not set and cannot be removed\".format( self.name ) )\n if not self in self.mother.children:\n raise RelatedPersonError( \"cannot remove mother of '{}', not one of her children\".format( self.name ) )\n self.mother.children.remove( self )\n self.mother = None\n\n def remove_father( self ):\n \"\"\" Remove this related person's father\n\n Raises:\n :obj:`RelatedPersonError`: if this related person does not have a father or this related person is not one\n of their father's children\n \"\"\"\n if not isinstance( self.father, RelatedPerson ):\n raise RelatedPersonError( \"cannot remove father of '{}', as it is not set\".format( self.name ) )\n if not self in self.father.children:\n raise RelatedPersonError( \"cannot remove father of '{}', not one of his children\".format( self.name ) )\n self.father.children.remove( self )\n\n def add_child( self, child ):\n \"\"\" Add a child to this related person's children, and set this related person as the child's father or mother\n\n Args:\n child ( :obj:`RelatedPerson` ): a child of `self`\n\n Raises:\n :obj:`RelatedPersonError`: if this related person does not have a known gender, or if a cycle in the\n ancestors graph would be created\n \"\"\"\n if self.gender not in [Gender.FEMALE, Gender.MALE]:\n raise RelatedPersonError( \"cannot add child to related person named '{}' with unknown gender\".format( \n self.name ) )\n if child in self.all_ancestors( ):\n raise RelatedPersonError( \"making '{}' a child of '{}', would create ancestor cycle\".format( \n child.name, self.name ) )\n if self.gender == Gender.FEMALE:\n child.set_father( self )\n if self.gender == Gender.MALE:\n child.set_father( self )\n\n def ancestors( self, min_depth, max_depth=None ):\n \"\"\" Return this related person's ancestors within a generational depth range\n\n Obtain ancestors whose generational depth satisfies `min_depth` <= depth <= `max_depth`. E.g.,\n a related person's parents would be obtained with `min_depth` = 1, and this related person's parents and\n grandparents would be obtained with `min_depth` = 1 and `max_depth` = 2.\n\n Args:\n min_depth ( :obj:`int` ): the minimum depth of ancestors which should be provided;\n this related person's depth is 0, their parents' depth is 1, etc.\n max_depth ( :obj:`int`, optional ): the minimum depth of ancestors which should be\n provided; if `max_depth` is not provided, then `max_depth` == `min_depth` so that only\n ancestors at depth == `min_depth` will be provided; a `max_depth` of infinity will obtain\n all ancestors at depth >= `min_depth`.\n\n Returns:\n :obj:`set` of `RelatedPerson`: this related person's ancestors\n\n Raises:\n :obj:`RelatedPersonError`: if `max_depth` < `min_depth`\n \"\"\"\n if max_depth is not None:\n if max_depth < min_depth:\n raise RelatedPersonError( \"max_depth ( {} ) cannot be less than min_depth ( {} )\".format( \n max_depth, min_depth ) )\n else:\n # collect just one depth\n max_depth = min_depth\n collected_ancestors = set( )\n return self._ancestors( collected_ancestors, min_depth, max_depth )\n useless_variable = 3\n\n def _ancestors( self, collected_ancestors, min_depth, max_depth ):\n \"\"\" Obtain this related person's ancestors who lie within the generational depth [min_depth, max_depth]\n\n This is a private, recursive method that recurses through the ancestry via parent references.\n\n Args:\n collected_ancestors ( :obj:`set` ): ancestors collected thus far by this method\n min_depth ( :obj:`int` ): see `ancestors( )`\n max_depth ( :obj:`int` ): see `ancestors( )`\n\n Returns:\n :obj:`set` of `RelatedPerson`: this related person's ancestors\n\n Raises:\n :obj:`RelatedPersonError`: if `max_depth` < `min_depth`\n \"\"\"\n if min_depth <= 0:\n collected_ancestors.add( self )\n if 0 < max_depth:\n for parent in [self.mother, self.father]:\n if parent is not None:\n parent._ancestors( collected_ancestors, min_depth-1, max_depth-1 )\n return collected_ancestors\n\n def parents( self ):\n ''' Provide this related person's parents\n\n Returns:\n :obj:`set`: this related person's known parents\n '''\n return self.ancestors( 1 )\n\n def grandparents( self ):\n ''' Provide this related person's known grandparents\n\n Returns:\n :obj:`set`: this related person's known grandparents\n '''\n return self.ancestors( 3 )\n\n def all_ancestors( self ):\n ''' Provide all of this related person's known ancestors\n\n Returns:\n :obj:`set`: all of this related person's known ancestors\n '''\n return self.ancestors( 2, max_depth=float( 'inf' ) )\n\n def grandparents_and_earlier( self ):\n ''' Provide this related person's known grandparents, and all of their ancestors\n\n Returns:\n :obj:`set`: all of this related person's known grandparents\n '''\n return self.ancestors( 2, max_depth=float( 'inf' ) )\n","sub_path":"assignments/AY_2017_2018/semester_2/3/related_person.py","file_name":"related_person.py","file_ext":"py","file_size_in_byte":11292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24426852","text":"import os\r\nfrom flask import Flask, render_template, redirect, request, url_for, session\r\nfrom law_manager import collect_law_data\r\nimport json\r\nfrom functools import cmp_to_key\r\nfrom YAP_Wrapper.yap_api import YapApi\r\nimport compress_json\r\nfrom distance import levenshtein\r\n\r\napp = Flask(__name__)\r\napp.secret_key = \"efrat and yuval\"\r\n\r\nsearch_word = \"\"\r\ndata = []\r\nresult = []\r\ncount = 1\r\nip = '127.0.0.1:8000'\r\nyap = YapApi()\r\n\r\n\r\ndef collect_yap_data():\r\n global ip\r\n global yap\r\n for law in data:\r\n for point in law[\"points\"]:\r\n if point:\r\n point_headline = point[\"point headline\"]\r\n tokenized_text, segmented_text, lemmas, dep_tree, md_lattice, ma_lattice = yap.run(point_headline, ip)\r\n new_segmented = clean_prefix(segmented_text)\r\n new_lemmas = clean_prefix(lemmas)\r\n point[\"yap\"] = [tokenized_text, new_segmented, new_lemmas]\r\n\r\n\r\ndef clean_prefix(yap_text):\r\n new_text = yap_text.split(' ')\r\n for i in range(len(new_text)):\r\n if len(new_text[i]) == 1:\r\n new_text[i] = ''\r\n new_text = \" \".join(new_text).replace(\" \", ' ') # with no prefix char\r\n return new_text\r\n\r\n\r\ndef clean(list):\r\n if '' in list:\r\n list.remove('')\r\n\r\n\r\ndef similar(point, search_word, search_segmented, search_lemmas):\r\n headline = point[\"point headline\"]\r\n point_segmented_text = point[\"yap\"][1]\r\n point_lemmas = point[\"yap\"][2]\r\n\r\n split_search = search_word.split(\" \")\r\n split_search_segment = search_segmented.split(\" \")\r\n split_search_lemmas = search_lemmas.split(\" \")\r\n\r\n split_lemmas_point = point_lemmas.split(\" \")\r\n split_headline = headline.split(\" \")\r\n split_segment_point = point_segmented_text.split(\" \")\r\n\r\n split_headline += split_segment_point + split_lemmas_point\r\n clean(split_search)\r\n clean(split_search_segment)\r\n clean(split_search_lemmas)\r\n\r\n for i in range(len(split_search)):\r\n if split_search[i] not in split_headline:\r\n if split_search_segment[i] not in split_headline:\r\n if split_search_lemmas[i] not in split_headline:\r\n return False\r\n return True\r\n\r\n\r\ndef search(search_word):\r\n global ip\r\n global yap\r\n global data\r\n points_list = []\r\n index = 1\r\n tokenized_text, segmented_text, lemmas, dep_tree, md_lattice, ma_lattice = yap.run(search_word, ip)\r\n search_segmented = clean_prefix(segmented_text)\r\n search_lemmas = clean_prefix(lemmas)\r\n for law in data:\r\n if law and law[\"points\"]:\r\n for point in law[\"points\"]:\r\n if point:\r\n is_short_content = True\r\n # if search_word == point[\"point headline\"]:\r\n if similar(point, search_word, search_segmented, search_lemmas):\r\n content = point[\"content\"].split(\"\\n\")\r\n clean(content)\r\n if len(content) > 3:\r\n is_short_content = False\r\n points_list.append({\"law name\": law[\"law_name\"],\r\n \"date\": law[\"date\"],\r\n \"point headline\": point[\"point headline\"],\r\n \"content\": content,\r\n \"index\": str(index),\r\n \"short\": is_short_content,\r\n \"link\": \"https://he.wikisource.org/wiki/\"+law[\"law_name\"].replace(\" \", \"_\")})\r\n index += 1\r\n\r\n return points_list\r\n\r\n\r\ndef open_files_and_collect_data():\r\n path = \"\".join(os.getcwd())\r\n for root, subFolder, files in os.walk(path):\r\n for item in files:\r\n if item.endswith(\"main.xml\"):\r\n file_name_path = str(root) + \"\\main.xml\"\r\n law_data = collect_law_data(file_name_path)\r\n if law_data is not None:\r\n data.append(law_data)\r\n\r\n\r\ndef compare(item1, item2):\r\n if item1[\"date\"] == '0' or item2[\"date\"] == '0':\r\n return -1\r\n date1 = item1[\"date\"]\r\n date2 = item2[\"date\"]\r\n split_date1 = date1.split(\"-\")\r\n split_date2 = date2.split(\"-\")\r\n if split_date1[0] == split_date2[0]:\r\n if split_date1[1] == split_date2[1]:\r\n if split_date1[2] == split_date2[2]:\r\n return 0\r\n else:\r\n return int(split_date1[2]) - int(split_date2[2])\r\n else:\r\n return int(split_date1[1]) - int(split_date2[1])\r\n else:\r\n return int(split_date1[0]) - int(split_date2[0])\r\n\r\n\r\ndef compare_lex(item1, item2):\r\n global search_word\r\n if search_word == item1[\"point headline\"]:\r\n return 1\r\n if search_word == item2[\"point headline\"]:\r\n return -1\r\n else:\r\n if levenshtein(item1[\"point headline\"], search_word) < levenshtein(item2[\"point headline\"], search_word):\r\n return 1\r\n else:\r\n return -1\r\n\r\n\r\ndef sort_by_date(results):\r\n return sorted(results, reverse=True, key=cmp_to_key(compare))\r\n\r\n\r\ndef sort_results(result):\r\n return sorted(result, reverse=True, key=cmp_to_key(compare_lex))\r\n\r\n\r\n@app.route(\"/result\", methods=['GET', 'POST'])\r\ndef result_found():\r\n global count\r\n global result\r\n res_len = len(result)\r\n if request.method == 'POST':\r\n if request.form[\"submit_button\"] == \"new search\":\r\n return redirect(url_for(\"home\"))\r\n elif request.form[\"submit_button\"] == \"load more\":\r\n count += 1\r\n elif request.form[\"submit_button\"] == \"sort\":\r\n result = sort_by_date(result)\r\n result = sort_results(result)\r\n return render_template(\"result.html\", results=result[:count * 20], count=count * 20, result_len=res_len)\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef home():\r\n global result\r\n global count\r\n global search_word\r\n if request.method == 'POST':\r\n search_word = request.form[\"search bar\"]\r\n result = search(search_word)\r\n count = 1\r\n return redirect(url_for(\"result_found\"))\r\n else:\r\n return render_template(\"search.html\")\r\n\r\n\r\nif __name__ == '__main__':\r\n if not (os.path.isfile('comp_data.json.gz') and os.access('comp_data.json.gz', os.R_OK)):\r\n if not (os.path.isfile('data.txt') and os.access('data.txt', os.R_OK)):\r\n with open(\"data.txt\", \"w\", encoding='utf8') as jsonfile:\r\n open_files_and_collect_data()\r\n collect_yap_data()\r\n json.dump(data, jsonfile, ensure_ascii=False)\r\n\r\n with open('data.txt', encoding='utf8') as data_json:\r\n data_load = json.load(data_json)\r\n compress_json.dump(data_load, \"comp_data.json.gz\")\r\n\r\n data = compress_json.load(\"comp_data.json.gz\")\r\n app.run()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"367483209","text":"import sys\r\nimport vtk\r\nimport csv\r\n# import xlrd\r\nfrom PyQt4 import QtCore, QtGui\r\nfrom vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\r\n\r\n\r\nclass UI(QtGui.QMainWindow):\r\n solid = 1\r\n # Used to toggle between wireframe and solid mode\r\n showANT = 0\r\n # Used to show an hide antennas\r\n assemblyMade = False\r\n\r\n # denotes if the assembly has been made yet\r\n\r\n def __init__(self, parent=None):\r\n QtGui.QMainWindow.__init__(self, parent)\r\n\r\n # Holds all active antennas with keys being a tuple of an antenna's coordinates\r\n self.antennas = {}\r\n\r\n # Create Gui Frame\r\n self.frame = QtGui.QFrame()\r\n\r\n # Create and add VTK render window to QT for display of VTK objects\r\n self.layout = QtGui.QVBoxLayout()\r\n self.vtkWidget = QVTKRenderWindowInteractor(self.frame)\r\n self.layout.addWidget(self.vtkWidget)\r\n\r\n # Create renderer to render actor\r\n self.render = vtk.vtkRenderer()\r\n self.vtkWidget.GetRenderWindow().AddRenderer(self.render)\r\n self.interactor = self.vtkWidget.GetRenderWindow().GetInteractor()\r\n\r\n # Create mapper to draw actors\r\n self.mapper = vtk.vtkPolyDataMapper()\r\n\r\n # Create an actor\r\n self.actor = vtk.vtkActor()\r\n self.actor.SetMapper(self.mapper)\r\n\r\n # Add the actor to the renderer and reset camera so it is centered on the actor\r\n self.render.AddActor(self.actor)\r\n self.render.ResetCamera()\r\n\r\n self.frame.setLayout(self.layout)\r\n self.setCentralWidget(self.frame)\r\n\r\n self.show()\r\n self.interactor.Initialize()\r\n\r\n # Import button, used to import plane models from various 3D filetypes\r\n self.planes = QtGui.QPushButton('Import Plane Model', self)\r\n self.layout.addWidget(self.planes)\r\n self.planes.clicked.connect(self.readfiles)\r\n\r\n # Wireframe button, used to toggle between solid and wireframe mode\r\n self.wireframe = QtGui.QPushButton('Toggle Wireframe Mode', self)\r\n self.layout.addWidget(self.wireframe)\r\n self.wireframe.clicked.connect(self.toggleWireframe)\r\n\r\n # Import Antennas, used to import antennas from a csv\r\n self.antennaImport = QtGui.QPushButton('Import Antennas From CSV', self)\r\n self.layout.addWidget(self.antennaImport)\r\n self.antennaImport.clicked.connect(self.importCSV)\r\n\r\n #x, y, and z text fields for user to enter coordinates\r\n self.xInput = QtGui.QLineEdit()\r\n self.yInput = QtGui.QLineEdit()\r\n self.zInput = QtGui.QLineEdit()\r\n self.layout.addWidget(self.xInput)\r\n self.layout.addWidget(self.yInput)\r\n self.layout.addWidget(self.zInput)\r\n\r\n #Enter Antennas, used to enter the x, y, and z coordinates for an antenna\r\n self.addAntenna = QtGui.QPushButton('Enter Antenna Coordinates', self)\r\n self.layout.addWidget(self.addAntenna)\r\n self.addAntenna.clicked.connect(self.enterAntennaCoordinates)\r\n\r\n #Toggle Antennas, used to Show/Hide antennas, not currently implemented\r\n #self.antenna = QtGui.QPushButton('Toggle Antennas', self)\r\n #self.layout.addWidget(self.antenna)\r\n #self.antenna.clicked.connect(self.showAntenna)\r\n\r\n # Toggle Antennas, used to Show/Hide antennas, not currently implemented\r\n self.antenna = QtGui.QPushButton('Toggle Antennas', self)\r\n self.layout.addWidget(self.antenna)\r\n self.antenna.clicked.connect(self.showAntenna)\r\n\r\n # Currently using CSV in place of XLS\r\n # read sample XLS sent\r\n ## def readXLS(self):\r\n ## fileXLS = xlrd.open_workbook('F16.xls')\r\n ## coordinates = fileXLS.sheet_by_index(0)\r\n ##\r\n ## xyz = []\r\n ## #xyz =[] #list of xyz in each row\r\n ## coords = [] #list of all coordinates\r\n ## for i in range(0, 9):\r\n ## xyz.append([])\r\n ## for j in range(0, 3):\r\n ## xyz[i].append(coordinates.cell(i , j))\r\n ## print \"coordinate list: \", xyz\r\n\r\n # read and print CSV file\r\n def readCSV(self, antennas):\r\n xyz = []\r\n with open(antennas, 'rb') as csvfile:\r\n coordinates = csv.reader(csvfile, delimiter=',')\r\n for row in coordinates:\r\n # convert list of strings to float\r\n row = map(float, row)\r\n xyz.append(row)\r\n global antennaLocs\r\n antennaLocs = xyz\r\n if (not self.assemblyMade):\r\n self.assembly = vtk.vtkAssembly()\r\n self.assemblyMade = True\r\n for antenna in antennaLocs:\r\n self.convertDimensions(antenna[0], antenna[1], antenna[2], antenna[3])\r\n\r\n # import CSV files for antenna coordinates\r\n def importCSV(self):\r\n filename = QtGui.QFileDialog.getOpenFileName(self, \"Import CSV files\")\r\n file = open(filename, 'r')\r\n self.readCSV(filename)\r\n\r\n #user enters x, y, and z antenna coordinates\r\n def enterAntennaCoordinates(self):\r\n x = self.xInput.text()\r\n y = self.yInput.text()\r\n z = self.zInput.text()\r\n self.convertDimensions(float(x), float(y), float(z), 0)\r\n \r\n # adds models to the renderer\r\n def addModel(self, reader):\r\n self.maper = vtk.vtkPolyDataMapper()\r\n if vtk.VTK_MAJOR_VERSION <= 5:\r\n self.mapper.SetInput(reader.GetOutput())\r\n else:\r\n self.mapper.SetInputConnection(reader.GetOutputPort())\r\n # Check if assembly has been made yet\r\n if (not self.assemblyMade):\r\n self.assembly = vtk.vtkAssembly()\r\n self.assemblyMade = True\r\n self.assembly = vtk.vtkAssembly()\r\n # Add the main actor(plane model) to the assembly then call render and reset the camera\r\n self.assembly.AddPart(self.actor)\r\n self.render.AddActor(self.assembly)\r\n self.render.ResetCamera()\r\n self.interactor.Render()\r\n\r\n def convertDimensions(self, ngx, ngy, ngz, ngo):\r\n '''It's important to note that NG uses different x,y, and z coordinates than VTK. NG's x\r\n plane start at 0 at the tip of the nose and extends to the back of the plane. VTK's x\r\n starts at the tip of the plane's right and extends to the tip of the plane's left wing.\r\n\r\n NG's y starts at 0 in the center of the plane and extends to each wing tip. Extending to\r\n the plane's right wing increases the value of y while exntending to the plane's left wing\r\n decreases the value of y. VTK's y start at the bottom-most point of the plane and extends\r\n to the top-most point. The bottom-most point is 0 and increases as you extend towards\r\n the top-most point.\r\n\r\n NG's z start just on top of the nose of the plane and extends to the bottom-most and\r\n top-most points of the plane. It increases as you move towards the bottom-most point\r\n and decreases as you move towards the top-most point. VTK's z begins at the tip of the\r\n nose of the plane and extends to the tip of the back of the plane. The tip of the nose\r\n is 0 and increases as you extend towards the back.\r\n\r\n Orientation is not currently implemented and thus ngo is not used.\r\n '''\r\n\r\n xmid = 226.6690063476525 / 2\r\n # Finds the middle of the plane in terms of VTK's x plane, used in conjunction with NG's y\r\n # to determine antennas placement on VTK's x plane. Currently hardcoded until support for\r\n # more models is added\r\n\r\n ymid = 100.79100036621094 / 2\r\n # Finds the middle of the plane in terms of VTK's y plane, used in conjunction with NG's z\r\n # to determine antennas placement on VTK's y plane. Currently hardcoded until support for\r\n # more models is added\r\n\r\n mod = 20.80\r\n # Used to convert meters to VTK units. This is specific to each plane model, but since MVP only\r\n # deals with the F16, this will remain hardcoded until more models are supported\r\n\r\n # Outdated, we have decided to stick with one conversion unit as the slight variations are likely\r\n # due to addition parts on the model not accounted for by the specs found on wikipedia.\r\n # xmod = 22.7579323642\r\n # ymod = 20.6538935177\r\n # zmod = 20.957569867\r\n\r\n x = xmid + (ngy * mod)\r\n # Since NG's y is our x and starts in the middle of the plane, we find the middle and apply\r\n # NG's y with the unit conversion factor\r\n\r\n y = ymid - 13 + (-ngz * mod)\r\n # Since NG's z is VTK's y, is inverted, and starts in the middle of the plane, we find the\r\n # middle and apply the inverted value of NG's z with the unit conversion factor\r\n\r\n if -9.0 < ngx < -6.5:\r\n z = (ngx + 13.8) * mod\r\n else:\r\n z = (ngx + 15.09) * mod\r\n # Since NG's x is VTK's z and starts from on top of the nose rather than the center of the plane,\r\n # we apply an offset to NG's x before converting it with the unit conversion factor. The different\r\n # offsets are a result of botched coordinates supplied to us by NG for the antennas located on\r\n # the wings. In the future, there will be one offset for every point\r\n\r\n # Check if the assembly already exists\r\n if (not self.assemblyMade):\r\n self.assembly = vtk.vtkAssembly()\r\n # Add the main actor(the plane) to the assembly\r\n self.assembly.AddPart(self.actor)\r\n\r\n # Create a sphere of radius 3 and assign it an actor and mapper\r\n sphere = vtk.vtkSphereSource()\r\n sphere.SetRadius(3)\r\n sphereMapper = vtk.vtkPolyDataMapper()\r\n sphereMapper.SetInputConnection(sphere.GetOutputPort())\r\n sphereActor = vtk.vtkActor()\r\n sphereActor.SetMapper(sphereMapper)\r\n\r\n # Set the sphere to the location calculated by the method and set its color to red before\r\n # adding it to the assembly\r\n sphereActor.SetPosition(x, y, z)\r\n sphereActor.GetProperty().SetColor(255, 0, 0)\r\n self.assembly.AddPart(sphereActor)\r\n self.antennas[(ngx, ngy, ngz)] = sphereActor\r\n # Tolerance will have to be changed, currently you need to exact location, the the decimal\r\n # in order to locate an antenna\r\n\r\n # Show Antenna on model, currently not implemented\r\n def showAntenna(self):\r\n if self.showANT == 1:\r\n # antennas should be shown, add all from dictionary to assembly\r\n for antenna in self.antennas:\r\n self.assembly.AddPart(self.antennas[antenna])\r\n self.render.ResetCamera()\r\n self.interactor.Render()\r\n self.showANT = 0\r\n else:\r\n # antennas should be hidden, remove all in dictionary from assembly\r\n for antenna in self.antennas:\r\n self.assembly.RemovePart(self.antennas[antenna])\r\n self.render.ResetCamera()\r\n self.interactor.Render()\r\n self.showANT = 1\r\n\r\n # wireframe function\r\n def toggleWireframe(self):\r\n if self.solid == 1:\r\n self.actor.GetProperty().SetRepresentationToWireframe()\r\n self.interactor.Render()\r\n self.solid = 0\r\n else:\r\n self.actor.GetProperty().SetRepresentationToSurface()\r\n self.interactor.Render()\r\n self.solid = 1\r\n\r\n # importing OBJ/STL/PLY currently\r\n def readfiles(self):\r\n filename = QtGui.QFileDialog.getOpenFileName(self, \"Import Models\")\r\n file = open(filename, \"r\")\r\n extension = QtCore.QFileInfo(filename).suffix()\r\n\r\n # create an obj, stl, ply reader based on the extension of the filename selected\r\n # we add the selected 3D model to our renderer\r\n if extension == 'obj':\r\n with file:\r\n reader = vtk.vtkOBJReader()\r\n reader.SetFileName(str(filename))\r\n self.addModel(reader)\r\n\r\n if extension == 'stl':\r\n with file:\r\n reader = vtk.vtkSTLReader()\r\n reader.SetFileName(str(filename))\r\n self.addModel(reader)\r\n\r\n if extension == 'ply':\r\n with file:\r\n reader = vtk.vtkPLYReader()\r\n reader.SetFileName(str(filename))\r\n self.addModel(reader)\r\n\r\n\r\n# this creates the window and all the renderers\r\nif __name__ == \"__main__\":\r\n app = QtGui.QApplication(sys.argv)\r\n window = UI()\r\n window.setWindowTitle('CEESIM Visualizer')\r\n sys.exit(app.exec_())\r\n","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551322516","text":"# Script to store model architectures and hyperparameter combinations\n__all__ = ['model_archs', 'training_sets', 'hyperparameters', 'cv_weights', 'model_defs', 'dataloaders',\n 'loss_functions']\n\n\nfrom utils.custom_architectures import *\nfrom utils.dataloaders import *\nfrom torchvision import models, datasets\nimport torch\nimport torch.nn as nn\n\n# architecture definitions with input size and whether the model is used at the haulout level or single seal level\nmodel_archs = {'NasnetA': {'input_size': 299},\n 'Resnet18': {'input_size': 224},\n 'Resnet34': {'input_size': 224},\n 'Resnet50': {'input_size': 224},\n 'WideResnetA': {'input_size': 28},\n 'WideResnetCount': {'input_size': 28},\n 'Resnet18count': {'input_size': 224},\n 'Resnet34count': {'input_size': 224},\n 'Resnet50count': {'input_size': 224},\n 'NasnetAcount': {'input_size': 224},\n 'NasnetAe2e': {'input_size': 299},\n 'CountCeption': {'input_size': 100},\n 'Squeezenet11': {'input_size': 224},\n 'Densenet121': {'input_size': 224},\n 'Densenet169': {'input_size': 224},\n 'Alexnet': {'input_size': 224},\n 'VGG16': {'input_size': 224}\n }\n\n# model definitions\nmodel_defs = {'Pipeline1': {'NasnetA': lambda num_classes: NASNetA(in_channels_0=48, out_channels_0=24,\n out_channels_1=32, out_channels_2=64,\n out_channels_3=128, num_classes=num_classes),\n 'Resnet18': lambda num_classes: models.resnet18(pretrained=False, num_classes=num_classes),\n 'Resnet34': lambda num_classes: models.resnet34(pretrained=False, num_classes=num_classes),\n 'Resnet50': lambda num_classes: models.resnet50(pretrained=False, num_classes=num_classes),\n 'Densenet121': lambda num_classes: models.densenet121(pretrained=False,\n num_classes=num_classes),\n 'Densenet169': lambda num_classes: models.densenet169(pretrained=False,\n num_classes=num_classes),\n 'Alexnet': lambda num_classes: models.alexnet(pretrained=False,\n num_classes=num_classes),\n 'VGG16': lambda num_classes: models.vgg16(pretrained=False, num_classes=num_classes),\n 'Squeezenet11': lambda num_classes: models.squeezenet1_1(pretrained=False,\n num_classes=num_classes)\n },\n 'Pipeline1.1': {'Resnet18count': resnet18_count(),\n 'Resnet34count': resnet34_count(),\n 'Resnet50count': resnet50_count(),\n 'NasnetAcount': NASNetA_count(),\n 'WideResnetCount': wrn_count(depth=28),\n 'CountCeption': ModelCountception()}}\n\n# model dataloaders\ndataloaders = {'Pipeline1': lambda dataset, transforms: datasets.ImageFolder(dataset, transforms),\n 'Pipeline1.1': lambda dataset, shp_trans, int_trans: ImageFolderTrainDet(dataset, shp_trans, int_trans)}\n\n# model loss functions\nloss_functions = {'Pipeline1': lambda weight: nn.CrossEntropyLoss(weight=torch.FloatTensor(weight)),\n 'Pipeline1.1': lambda _: nn.MSELoss()}\n\n# training sets with number of classes and size of scale bands\ntraining_sets = {'training_set_vanilla': {'num_classes': 11, 'scale_bands': [450, 450, 450]},\n 'training_set_multiscale_A': {'num_classes': 11, 'scale_bands': [450, 1350, 4000]}\n }\n\n# hyperparameter sets\nhyperparameters = {'A': {'learning_rate': 1E-3, 'batch_size_train': 64, 'batch_size_val': 8, 'batch_size_test': 64,\n 'step_size': 1, 'gamma': 0.95, 'epochs': 5, 'num_workers_train': 16, 'num_workers_val': 1},\n 'B': {'learning_rate': 1E-3, 'batch_size_train': 16, 'batch_size_val': 1, 'batch_size_test': 8,\n 'step_size': 1, 'gamma': 0.95, 'epochs': 5, 'num_workers_train': 8, 'num_workers_val': 1},\n 'C': {'learning_rate': 1E-3, 'batch_size_train': 64, 'batch_size_val': 8, 'batch_size_test': 64,\n 'step_size': 1, 'gamma': 0.95, 'epochs': 30, 'num_workers_train': 16, 'num_workers_val': 8},\n 'D': {'learning_rate': 1E-3, 'batch_size_train': 16, 'batch_size_val': 8, 'batch_size_test': 32,\n 'step_size': 1, 'gamma': 0.95, 'epochs': 5, 'num_workers_train': 8, 'num_workers_val': 8},\n 'E': {'learning_rate': 1E-3, 'batch_size_train': 16, 'batch_size_val': 1, 'batch_size_test': 4,\n 'step_size': 1, 'gamma': 0.95, 'epochs': 10, 'num_workers_train': 4, 'num_workers_val': 1}\n }\n\n# cross-validation weights\ncv_weights = {'NO': lambda x: [1] * x,\n 'WCV': lambda x: [5] + [1] * (x-2) + [5]}\n\n\n\n\n\n","sub_path":"utils/model_library.py","file_name":"model_library.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629991655","text":"import random\nfrom Cell import Cell\n\nclass Table(object):\n\tdef __init__(self, file_path = None, height = 9, weight = 9):\n\t\tself.cells = []\n\t\tself.height = height\n\t\tself.weight = weight\n\t\tif file_path:\n\t\t\tflatten_table = self.get_file_input(file_path)\n\t\telse:\n\t\t\tflatten_table = self.get_random_input()\n\t\tself.generate_table(flatten_table)\n\n\tdef get_random_input(self):\n\t\tflatten_table = []\n\t\tfor x in range(self.height * self.weight):\n\t\t\tflatten_table.append(bool(random.getrandbits(1)))\n\t\treturn flatten_table\n\n\tdef get_file_input(self, file_path):\n\t\tself.height = 0\n\t\tself.weight = 0\n\t\tflatten_table = []\n\t\twith open(file_path) as file:\n\t\t\tfor line in file:\n\t\t\t\tself.height += 1\n\t\t\t\tself.weight = 0\n\t\t\t\tvalue_list = line.split()\n\t\t\t\tfor val in value_list:\n\t\t\t\t\tself.weight += 1\n\t\t\t\t\tflatten_table.append(int(val) == 1)\n\t\treturn flatten_table\n\n\tdef generate_table(self, flatten_table):\n\t\tself.cells = []\n\t\tfor h in range(self.height):\n\t\t\ttemp = []\n\t\t\tfor w in range(self.weight):\n\t\t\t\ttemp.append(Cell(flatten_table[h * self.weight + w]))\n\t\t\tself.cells.append(temp)\n\n\tdef check_cell_is_in_table(self, h, w):\n\t\treturn h < self.height and w < self.weight and h >= 0 and w >= 0\n\n\tdef get_number_of_alive_neighbour(self, h, w):\n\t\tnumber_of_alive_neighbour = 0\n\t\toffset = [[-1, -1], [-1, 0], [-1, 1],\n\t\t\t\t [0, -1], [0, 1],\n\t\t\t\t [1, -1], [1, 0], [1, 1]]\n\t\tfor i in range(8):\n\t\t\tif self.check_cell_is_in_table(h + offset[i][0], w + offset[i][1]) and \\\n\t\t\t\t\t\tself.cells[h + offset[i][0]][w + offset[i][1]].state:\n\t\t\t\tnumber_of_alive_neighbour += 1\t\n\n\t\treturn number_of_alive_neighbour\n\n\tdef get_next_state(self):\n\t\tfor h in range(self.height):\n\t\t\tfor w in range(self.weight):\n\t\t\t\tnumber_of_alive_neighbour = self.get_number_of_alive_neighbour(h, w)\n\t\t\t\tself.cells[h][w].get_next_state(number_of_alive_neighbour)\n\n\tdef set_state(self):\n\t\tfor h in range(self.height):\n\t\t\tfor w in range(self.weight):\n\t\t\t\tself.cells[h][w].turn()\n\n\tdef next_generation(self):\n\t\tself.get_next_state()\n\t\tself.set_state()\n\n\n\n\tdef show_table(self):\n\t\tshow = \"\"\n\t\tfor i in range(self.height):\n\t\t\tfor j in range(self.weight):\n\t\t\t\tshow = show + '*' if self.cells[i][j].state else show + 'o'\n\t\t\tshow += '\\n'\n\t\treturn show[: len(show) - 1]\n\n\n\t\t\n\n","sub_path":"src/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248286283","text":"from django.shortcuts import get_object_or_404, render, redirect\n\n# Create your views here.\nfrom store.models import Product ,Variation\nfrom .models import Cart, CartItem\n\n\ndef _cart_id(request):\n cart = request.session.session_key\n if not cart:\n cart = request.session.create()\n return cart\n\ndef remove_cart_item(request, product_id, cart_item_id):\n cart = Cart.objects.get(cart_id = _cart_id(request))\n product = get_object_or_404(Product, id = product_id)\n cart_item = CartItem.objects.get(product = product, cart = cart, id = cart_item_id)\n cart_item.delete()\n return redirect('cart')\n\n\n\ndef remove_cart(request, product_id, cart_item_id):\n cart = Cart.objects.get(cart_id = _cart_id(request))\n product = get_object_or_404(Product, id = product_id)\n try:\n cart_item = CartItem.objects.get(product = product, cart = cart, id = cart_item_id)\n if cart_item.quantity > 1:\n cart_item.quantity -= 1\n cart_item.save()\n else:\n cart_item.delete()\n except:\n pass\n\n return redirect('cart')\n\ndef add_cart(request, product_id):\n\n product = Product.objects.get(id = product_id)\n\n #### tạo variation vào 1 list\n product_variation = []\n if request.method == 'POST':\n for item in request.POST:\n key = item\n value = request.POST[key]\n\n try:\n variation = Variation.objects.get(product = product,variation_category__iexact = key,variation_value__iexact = value)\n product_variation.append(variation)\n\n except:\n pass\n \n\n ##### tạo 1 cart \n try: \n cart = Cart.objects.get(cart_id = _cart_id(request))\n except Cart.DoesNotExist:\n cart = Cart.objects.create(\n cart_id = _cart_id(request)\n )\n cart.save()\n\n ############### tạo cartitem\n\n is_cart_item_exists = CartItem.objects.filter(product = product, cart = cart).exists()\n\n if is_cart_item_exists:\n cart_item = CartItem.objects.filter(product = product, cart = cart) \n ex_var_list = []\n id = [] # id của item\n for item in cart_item:\n existing_variation = item.variations.all()\n ex_var_list.append(list(existing_variation))\n id.append(item.id)\n\n if product_variation in ex_var_list:\n #tang len mot san pham\n index = ex_var_list.index(product_variation)\n item_id = id[index]\n item = CartItem.objects.get(product = product, id = item_id)\n item.quantity += 1\n item.save()\n \n else:\n # tao moi mot san pham\n item = CartItem.objects.create(\n product = product,\n quantity = 1,\n cart = cart\n \n )\n item.variations.add(*product_variation)\n item.save()\n\n else :\n \n cart_item = CartItem.objects.create(\n product = product,\n quantity = 1,\n cart = cart,\n )\n\n if len(product_variation) > 0:\n #print(\"sssssssssssssssssssssss\")\n cart_item.variations.clear()\n cart_item.variations.add(*product_variation) # dấu * rất quan trọng\n cart_item.save()\n \n return redirect('cart')\n\n\n\ndef cart(request, total =0, quantity = 0, cart_items = None):\n \n cart = Cart.objects.get(cart_id = _cart_id(request))\n cart_items = CartItem.objects.filter(cart = cart, is_active = True)\n total = 0\n quantity = 0\n for cart_item in cart_items:\n\n total += (cart_item.product.price * cart_item.quantity)\n quantity += cart_item.quantity\n \n tax = (2*total)/100\n grand_total = total + tax\n context = {\n 'total': total,\n 'quantity':quantity,\n 'cart_items':cart_items,\n 'tax': tax,\n 'grand_total': grand_total\n }\n\n\n\n return render(request, 'store/cart.html',context)","sub_path":"carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246536195","text":"#!/usr/bin/python\nfrom __future__ import print_function\n\nimport xbmc\nimport xbmcaddon\nimport subprocess\nimport re\nfrom os import path, popen\nglobal sleep_time, watched_local, watched_remote, counterdefault, addon\nglobal lockfilepaths, transmisionMinimalSpeed , transmissionCmd, debugMe\n\nclass MyMonitor( xbmc.Monitor ):\n def __init__( self, *args, **kwargs ):\n xbmc.Monitor.__init__( self )\n def onSettingsChanged( self ):\n load_settings()\n\ndef port_set(string):\n ret = set()\n for port in re.findall(\"[0-9]+\", string):\n try:\n port = int(port)\n except ValueError:\n continue\n ret.add(port)\n return ret\n\ndef mylog(msg):\n if debugMe:\n print(\"{}: {}\".format(addon.getAddonInfo('id'), msg))\n\ndef check_services(watchlocal, watchremote):\n \"\"\" Check if any of the watched services is running. \"\"\"\n\n netstat = subprocess.check_output(['/bin/netstat', '-t', '-n'], universal_newlines=True)\n\n for line in netstat.split('\\n')[2:]:\n items = line.split()\n if len(items) < 4: continue\n if not (\"udp\" in items[0] or \"tcp\" in items[0] or \"raw\" in items[0]): continue\n\n local_addr, local_port = items[3].rsplit(':', 1)\n remote_addr, remote_port = items[4].rsplit(':', 1)\n\n if local_addr[0] == '[' and local_addr[-1] == ']':\n local_addr = local_addr[1:-1]\n\n if remote_addr[0] == '[' and remote_addr[-1] == ']':\n remote_addr = remote_addr[1:-1]\n\n local_port = int(local_port)\n\n if ((local_addr != remote_addr) and (local_port in watchremote)) or \\\n ((local_addr == remote_addr) and (local_port in watchlocal)):\n mylog(\"Found connection from {} to {}:{}\".format(remote_addr, local_addr, local_port))\n return True\n\n mylog(\"No connection found.\")\n return False\n\ndef check_lockfiles(lockfilelist=[]):\n for i in lockfilelist:\n if path.exists(i):\n mylog(\"lockfile(s) found: %s\" %(i))\n return True\n return False\n\n# function to check the status of transmission. if running, it checks if the downloads are going fast enough to keep the system awake\ndef check_transmission(transmissioncommand,transmissionminimalspeed=10.0):\n try: transmissioninfo = popen(transmissioncommand).read()\n except:\n mylog(\"transmission-remote error. is transmission-remote installed on this system?\")\n return False\n else: \n if transmissioninfo == '':\n mylog(\"transmission not running or not giving any response\")\n return False\n elif float(transmissioninfo.split()[-1]) >= transmissionminimalspeed:\n mylog(\"transmission downloading: %s kb/s\" %(transmissioninfo.split()[-1]))\n return True\n else:\n mylog(\"transmission downloading too slow: %s kb/s\" %(transmissioninfo.split()[-1]))\n return False\n return False\n\ndef check_all(watchlocal=set(), watchremote=set(), lockfilelist=[], transmissioncommand=\"\", transmissionminimalspeed=0.0):\n # this routine checks if the system is active. If it finds an activity, it will return True and not check other activities.\n if lockfilelist:\n if check_lockfiles(lockfilelist):\n return True\n if watchlocal or watchremote:\n if check_services(watchlocal,watchremote):\n return True\n if transmissioncommand:\n if check_transmission(transmissioncommand, transmissionminimalspeed):\n return True\n return False\n\ndef load_settings():\n global sleep_time, watched_local, watched_remote, counterdefault, addon\n global lockfilepaths, transmisionMinimalSpeed , transmissionCmd, debugMe\n \n addon = xbmcaddon.Addon()\n s = addon.getSetting\n try:\n sleep_time = int(s('sleep'))\n except ValueError:\n sleep_time = 60\n watched_local = port_set(s('localports'))\n watched_remote = port_set(s('remoteports'))\n counterdefault = int(s('idlecount'))\n lockfilepaths=s('lockfilepaths')\n if lockfilepaths:\n lockfilepaths=lockfilepaths.split(\";\")\n else:\n lockfilepaths=[]\n checkTransmission = s('checktransmission') == 'true'\n transmisionMinimalSpeed = float(s('transmissionminspeed'))\n transmissionUsername = s('transmissionuser')\n transmissionPasswd = s('transmissionpass')\n if not checkTransmission:\n transmissionCmd = \"\"\n elif transmissionUsername and transmissionPasswd:\n transmissionCmd = \"transmission-remote -n %s:%s -l\" %(transmissionUsername, transmissionPasswd)\n else:\n transmissionCmd = \"transmission-remote -l\"\n debugMe = s('debugme') == 'true'\n #FIXME: I need to check if transmission-remote always returns kb/s\n\nload_settings()\ncounter = counterdefault\n\nmylog(\"Watching for remote connections to ports {} and for local connections to ports {}, sleep time is {} s.\".format(\n ', '.join(str(x) for x in watched_remote),\n ', '.join(str(x) for x in watched_local),\n sleep_time))\n\nwhile not xbmc.abortRequested:\n for i in range(sleep_time):\n if xbmc.abortRequested: break\n load_settings()\n if i==0:\n if check_all(watched_local, watched_remote, lockfilepaths, transmissionCmd, transmisionMinimalSpeed):\n mylog(\"Setting InhibitIdleShutdown to true\")\n xbmc.executebuiltin('InhibitIdleShutdown(true)')\n counter = counterdefault\n elif counter > 0: # if we have a value on the counter, we leave shutdown as it is.\n counter = counter -1\n mylog(\"Delay counter is: %i. Not changing InhibitIdleShutdown.\" %(counter))\n else:\n mylog(\"Setting InhibitIdleShutdown to false\")\n xbmc.executebuiltin('InhibitIdleShutdown(false)')\n xbmc.sleep(1000)\n","sub_path":"inhibit_shutdown.py","file_name":"inhibit_shutdown.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603897630","text":"import json\nimport urllib.parse\n\n\ndef to_json(response):\n return json.loads(response.get_data(as_text=True))\n\n\ndef post_json(client, url, obj):\n return client.post(url,\n content_type='application/json',\n data=json.dumps(obj))\n\n\ndef put_json(client, url, obj):\n return client.put(url,\n content_type='application/json',\n data=json.dumps(obj))\n\n\ndef create_user(client, userid, first, last, groups=None):\n message = {\n 'first_name': first,\n 'last_name': last,\n 'userid': userid\n }\n\n if groups:\n message['groups'] = groups\n\n with post_json(client, '/users/', message) as r:\n assert r.status_code == 201\n\n j = to_json(r)\n assert j['userid']\n return j['userid']\n\n\ndef update_user(client, userid, first, last, groups=None):\n url = '/users/' + urllib.parse.quote(userid)\n message = {\n 'first_name': first,\n 'last_name': last,\n 'userid': userid\n }\n\n if groups:\n message['groups'] = groups\n\n with put_json(client, url, message) as r:\n assert r.status_code == 200\n\n j = to_json(r)\n assert j['userid']\n return j['userid']\n\n\ndef get_user(client, userid):\n url = '/users/' + urllib.parse.quote(userid)\n with client.get(url) as r:\n assert r.status_code == 200\n j = to_json(r)\n assert j['userid'] == userid\n return j\n\n\ndef get_group(client, groupid):\n url = '/groups/' + urllib.parse.quote(groupid)\n\n with client.get(url) as r:\n assert r.status_code == 200\n return to_json(r)\n\n\ndef create_group(client, groupid):\n message = {'name': groupid}\n with post_json(client, '/groups/', message) as r:\n assert r.status_code == 201\n\n j = to_json(r)\n assert j['name'] == message['name']\n return j['name']\n\n\ndef update_group(client, groupid, user_ids):\n url = '/groups/' + urllib.parse.quote(groupid)\n with put_json(client, url, user_ids) as r:\n assert r.status_code == 200\n\n j = to_json(r)\n assert j['name']\n return j['name']\n","sub_path":"test/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546173610","text":"import urllib.request\r\ngive = input(\"Give link\")\r\ntry:\r\n url = urllib.request.urlopen(give)\r\n content = url.read()\r\nexcept urllib.error.HTTPError:\r\n print(\"Webpage not found\")\r\n exit()\r\n\r\nf = open('reader.html', 'wb')\r\nf.write(content)\r\nf.close()\r\n","sub_path":"download_webpage_html.py","file_name":"download_webpage_html.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628459579","text":"import torch\n#import fairseq\nimport argparse\nimport pickle\nimport os\nimport logging\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\nparser = argparse.ArgumentParser(description='AAAI CLF')\nparser.add_argument('--gpu', default='6', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\nparser.add_argument('--data_path', type=str, default='./processed_data/',\n help='path to data folders')\n\nargs = parser.parse_args()\n# List available models\nprint(torch.hub.list('pytorch/fairseq')) # [..., 'transformer.wmt16.en-de', ... ]\n\n# Load a transformer trained on WMT'16 En-De\nen2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de', checkpoint_file='model1.pt',\n tokenizer='moses', bpe='fastbpe')\nde2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.de-en', checkpoint_file='model1.pt',\n tokenizer='moses', bpe='fastbpe')\n\n#assert isinstance(en2de.models[0], fairseq.models.transformer.TransformerModel)\n\ndata_path = args.data_path\n\nwith open(data_path + 'train_unlabeled_data.pkl', 'rb') as f:\n train_unlabeled_data = pickle.load(f)\n\nnum_sample_sen = 2\ncnt = 0\ntrain_unlabeled_data_aug = {}\ngpu = args.gpu\nos.environ['CUDA_VISIBLE_DEVICES'] = gpu\nen2de = en2de.cuda()\nde2en = de2en.cuda()\n\nfor key, value in tqdm(train_unlabeled_data.items(), ncols=50, desc=\"Iteration:\"):\n new_value = []\n\n for i in range(num_sample_sen):\n v = de2en.translate(en2de.translate(value, sampling = True, temperature = 0.8),\n sampling = True, temperature = 0.8)\n if cnt % 100 == 0:\n print(\"***************\")\n print(\"org: \", value)\n print(\"new: \", v)\n new_value.append(v)\n train_unlabeled_data_aug[key] = new_value\n if cnt % 1000 == 0:\n with open(data_path + 'train_unlabeled_data_bt.pkl', 'wb') as f:\n assert len(train_unlabeled_data_aug[key]) == num_sample_sen\n pickle.dump(train_unlabeled_data_aug, f)\n cnt += 1\n\nwith open(data_path + 'train_unlabeled_data_bt.pkl', 'wb') as f:\n pickle.dump(train_unlabeled_data_aug, f)\n\n\n","sub_path":"code/back_tanslation.py","file_name":"back_tanslation.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491129169","text":"# -*- coding: utf-8 -*-\n\"\"\"\n#------------------------------------------------------------------------------\n# Revised: 2016-09-22\n# Authors: Theja Putta\n# Email: putta.v@husky.neu.edu\n# Prerequisites: ArcGIS 10.1 or higher and Python 2.7\n#\n# -----------------------------------------------------------------------------\n\"\"\"\n\n########################################################################################################################\n# Libraries\n########################################################################################################################\n# Import arcpy and Set Environments\nimport arcpy\narcpy.env.overwriteOutput = True\n# Import Standard Libraries\nimport os\nimport sys\nimport shutil\n\n# Specify Toolbox file paths\nToolDataPath = os.path.dirname(__file__)[:-7]\nLibDataPath = os.path.join(ToolDataPath, \"LibData\")\nFilesPath = os.path.join(ToolDataPath, \"Files\")\nScratchPath = os.path.join(ToolDataPath, \"Scratch\")\nResultsPath = os.path.join(ToolDataPath, \"Results\")\n\n# Import other libraries in ToolData/LibData\nsys.path.insert(0, LibDataPath)\nimport networkx as nx\n\n########################################################################################################################\n# Get Input from the toolbox GUI\n########################################################################################################################\n\nstreets = arcpy.GetParameterAsText(0)\nlts_field = arcpy.GetParameterAsText(1)\nstress_level = arcpy.GetParameterAsText(2)\noutput_folder = arcpy.GetParameterAsText(3)\n\nstress_level = int(stress_level)\nassert stress_level in {1,2,3,4}, \"Stress level must be 1, 2, 3 or 4\"\nassert arcpy.Describe(streets).shapeType == \"Polyline\", \"Input Street should be of type: POLYLINE\"\n\ncreate_lts_shp = 1\nif create_lts_shp == 1:\n\t# Create a shapefile to be used by networkx.\n\ttry:\n\t\tshutil.rmtree(ScratchPath+\"/temp_islands\", ignore_errors=True)\n\texcept:\n\t\tpass\n\tarcpy.env.overwriteOutput = True\n\tos.mkdir(ScratchPath+\"/temp_islands\")\n\tarcpy.FeatureClassToShapefile_conversion(streets,ScratchPath+\"/temp_islands\")\n\tstreets_shp = ScratchPath+\"/temp_islands/{0}.shp\".format(streets)\n\tlts_shp = ScratchPath+\"/temp_islands/lts{0}.shp\".format(str(stress_level))\n\twhere_clause = \"{0} <= {1}\".format(lts_field, stress_level)\n\tarcpy.Select_analysis(streets_shp, lts_shp, where_clause)\n\tarcpy.AddMessage(streets_shp)\n\tarcpy.AddMessage(lts_shp)\n\tarcpy.AddMessage(where_clause)\n\n\tlts_field = lts_field[:10]\n\n\tfields = arcpy.ListFields(lts_shp)\n\tdrop_fields = []\n\tfor field in fields:\n\t\tif field.name not in {lts_field, \"FID\", \"Shape\"}:\n\t\t\tdrop_fields.append(field.name)\n\tarcpy.DeleteField_management(lts_shp,drop_fields)\n\tarcpy.RepairGeometry_management(lts_shp)\n\t# arcpy.AddGeometryAttributes_management(lts_shp,Geometry_Properties=\"LENGTH\",Length_Unit=\"METERS\")\n\n\ndef make_islands(shapefile, output):\n\tlts_graph = nx.read_shp(shapefile).to_undirected(reciprocal=False)\n\tarcpy.AddMessage(lts_shp)\n\tarcpy.AddMessage(str(len(lts_graph.nodes()))+\" nodes\")\n\tlts_graph_components = nx.connected_component_subgraphs(lts_graph)\n\tnumber_of_components = len(lts_graph_components)\n\td = dict()\n\tfor j in range(number_of_components):\n\t\tfor edge in lts_graph_components[j].edges():\n\t\t\td[edge] = j+1\n\tnx.set_edge_attributes(lts_graph,\"Isl_Rank\",d)\n\tnx.write_shp(lts_graph,str(ScratchPath+\"/temp_islands\"))\n\tarcpy.Dissolve_management(str(ScratchPath+\"/temp_islands/edges.shp\"),output,\"Isl_Rank\")\n\tprj_file = shapefile[:-3]+\"prj\"\n\tarcpy.DefineProjection_management(output,prj_file)\n\n\nlts_shp = str(ScratchPath+\"/temp_islands/lts{0}.shp\".format(str(stress_level)))\noutput = str(output_folder+\"/lts{0}_islands.shp\").format(str(stress_level))\nmake_islands(lts_shp, output)\n# try:\n# \tshutil.rmtree(ScratchPath+\"/temp\", ignore_errors=True)\n# except:\n# \tpass","sub_path":"Scripts/CreateIslands.py","file_name":"CreateIslands.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515342388","text":"import torch, os\nimport numpy as np\nfrom torch import optim\nfrom torch import nn\nfrom omniglotNShot import OmniglotNShot\nimport scipy.stats\nfrom torch.utils.data import DataLoader\nfrom torch.optim import lr_scheduler\nimport random, sys, pickle\nimport argparse\n\nfrom maml import MAML\n\n\n\ndef main():\n\targparser = argparse.ArgumentParser()\n\targparser.add_argument('-n', help='n way', default=5)\n\targparser.add_argument('-k', help='k shot', default=1)\n\targparser.add_argument('-b', help='batch size', default=32)\n\targparser.add_argument('-l', help='meta learning rate', default=1e-3)\n\targs = argparser.parse_args()\n\tn_way = int(args.n)\n\tk_shot = int(args.k)\n\tmeta_batchsz = int(args.b)\n\tmeta_lr = float(args.l)\n\ttrain_lr = 0.4\n\n\tk_query = 15\n\timgsz = 84\n\tmdl_file = 'ckpt/omniglot%d%d.mdl'%(n_way, k_shot)\n\tprint('omniglot: %d-way %d-shot meta-lr:%f, train-lr:%f' % (n_way, k_shot, meta_lr, train_lr))\n\n\n\n\tdevice = torch.device('cuda:0')\n\tnet = MAML(n_way, k_shot, k_query, meta_batchsz, 5, meta_lr, train_lr, device)\n\tprint(net)\n\n\n\t# batchsz here means total episode number\n\tdb = OmniglotNShot('omniglot', batchsz=meta_batchsz, n_way=n_way, k_shot=k_shot, k_query=k_query, imgsz=imgsz)\n\n\tfor step in range(10000000):\n\n\t\t# train\n\t\tsupport_x, support_y, query_x, query_y = db.get_batch('train')\n\t\tsupport_x = torch.from_numpy(support_x).float().transpose(2, 4).transpose(3, 4).repeat(1, 1, 3, 1, 1).to(device)\n\t\tquery_x = torch.from_numpy(query_x).float().transpose(2, 4).transpose(3, 4).repeat(1, 1, 3, 1, 1).to(device)\n\t\tsupport_y = torch.from_numpy(support_y).long().to(device)\n\t\tquery_y = torch.from_numpy(query_y).long().to(device)\n\n\t\taccs = net(support_x, support_y, query_x, query_y, training = True)\n\n\t\tif step % 20 == 0:\n\t\t\tprint(step, '\\t', accs)\n\n\n\n\t\tif step % 1000 == 0:\n\t\t\t# test\n\t\t\tpass\n\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"omniglot_train.py","file_name":"omniglot_train.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422220348","text":"def func(x):\n from math import sin\n return sin(x / 2) + 1 - x ** 2\n\n\ndef derivation1(x):\n from math import cos\n return 0.5 * cos(x / 2) - 2 * x\n\n\ndef derivation2(x):\n from math import sin\n return 0.25 * sin(x / 2) - 2\n\n\ndef applicator(left, right, eps, functor, applicable):\n res = abs(functor(left))\n while left < right:\n left += eps / 2\n res = applicable(res, abs(functor(left)))\n\n return res\n\n\ndef iterative_method(left, right, eps, functor, deriv1):\n sign = lambda x: 1 if x >= 0 else -1\n ceil = lambda x: x if int(x) == x else int(x) + 1\n N = max(deriv1(left), deriv1(right))\n k = sign(deriv1(left)) * (ceil(N / 2) + 1)\n\n xn = left\n n = 0\n\n while True:\n n += 1\n\n xn1 = xn - functor(xn) / k\n y = functor(xn)\n delta = xn - xn1\n xn = xn1\n\n yield n, xn, xn1, y, delta\n\n if abs(delta) < eps:\n break\n\n\ndef combined_method(left, right, eps, functor, deriv1, deriv2):\n def hord1(xn, xi):\n return xi - functor(xi) * (xi - xn) / (functor(xi) - functor(xn))\n\n fixed = functor(left) * deriv2(left) > 0\n if fixed:\n xn, xi = left, right\n else:\n xn, xi = right, left\n\n n = 0\n\n while True:\n n += 1\n\n delta = xi - xn\n if fixed:\n xi = hord1(xn, xi)\n xn = xn - functor(xn) / deriv1(xn)\n else:\n xn = hord1(xi, xn)\n xi = xi - functor(xi) / deriv1(xi)\n\n y = functor(xn)\n\n yield n, xn, xi, y, delta\n\n if abs(delta) < eps:\n break\n\n\ndef isolate(acc, left, right, functor):\n while functor(left + acc) * functor(right - acc) < 0:\n left += acc\n right -= acc\n\n while functor(left) * functor(left + acc) > 0:\n left += acc\n\n while functor(right) * functor(right - acc) > 0:\n right -= acc\n\n return left, right\n\n\ndef check_isolated(left, right, eps, deriv1, deriv2):\n mx = applicator(left, right, eps, deriv2, max)\n mn = applicator(left, right, eps, deriv1, min)\n\n return mx < 2 * mn\n\nfunc_repr = 'sin(x / 2) + 1 - x ^ 2'\nderiv1_repr = '0.5 * cos(x / 2) - 2 * x'\nderiv2_repr = '0.25 * sin(x / 2) - 2'\n\n\ndef show_iterative_method():\n print('Программа уточняет корень уравнения {} = 0'.format(func_repr))\n print('Границы диапазона: [{}, {}]'.format(1.0, 2.8))\n print('Уточнение интервала изоляции: [{:.3f}, {:.3f}]'.format(*isolate(0.01, 1.0, 2.8, func)))\n condition = check_isolated(1.0, 2.8, 0.01, derivation1, derivation2)\n print('Проверка условия M <= 2m: {}'.format('верно' if condition else 'неверно'))\n\n for t in iterative_method(1.0, 2.8, 0.00001, func, derivation1):\n print('{}\\t{:.5f}\\t{:.5f}\\t{:+.6f}\\t{:+.6f}'.format(*t))\n\n\ndef show_combined_method():\n print('Программа уточняет корень уравнения {} = 0'.format(func_repr))\n print('Границы диапазона: [{}, {}]'.format(1.0, 2.8))\n print('Уточнение интервала изоляции: [{:.3f}, {:.3f}]'.format(*isolate(0.01, 1.0, 2.8, func)))\n condition = check_isolated(1.0, 2.8, 0.01, derivation1, derivation2)\n print('Проверка условия M <= 2m: {}'.format('верно' if condition else 'неверно'))\n\n for t in combined_method(1.0, 2.8, 0.00001, func, derivation1, derivation2):\n print('{}\\t{:.5f}\\t{:.5f}\\t{:+.6f}\\t{:+.6f}'.format(*t))\n\n\n#show_iterative_method()\nshow_combined_method()\ninput()\n","sub_path":"Вычислительная математика/Лабораторные работы/Lab1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485293925","text":"import serial, time, datetime, sys\nimport yaml\n\nfrom sendSMS import sendSMS\nfrom sendPushBullet import sendPushBulletNotification, sendPushBulletEmail, displayPushBulletDevices\nfrom dataProcessing import DataProcessing\n\nmsgSent = False\nmsgSentTime = 0\ndata = DataProcessing()\ndata.startAsyncXBee()\ndata.startAsyncBPM()\ndata.startAsyncTemp()\n\nconf_list = None\nwith open(r'Configuration.yaml') as file:\n\tconf_list = yaml.full_load(file)\n\ndef sendNotifications(sms_msg, pb_msg):\n\ttry:\n\t\tfor item in conf_list['sms_recipient_number']:\n\t\t\tsendSMS(conf_list['sms_account_sid'], conf_list['sms_auth_token'], conf_list['sms_sender_number'], item, sms_msg)\n\t\tif (conf_list['send_pushbullet_option']):\n\t\t\tsendPushBulletNotification(conf_list['pushbullet_api_key'], pb_msg)\n\t\treturn True\n\texcept:\n\t\treturn False\n\ntry:\n\twhile True:\n\t\ttime.sleep(1)\n\t\tbpm = data.BPM\n\t\tvoltage = data.pulseVoltage\n\t\ttemp = data.temp\n\t\tmotion = data.motion\n\t\tbpmQueueSize = data.bpmQueue.qsize()\n\t\ttempQueueSize = data.tempQueue.qsize()\n\t\tmsgRxTime = data.msgRxTime\n\n\t\tif msgRxTime > 0:\n\t\t\tif bpm > 0:\n\t\t\t\tprint(\"BPM: %d\\n\" % bpm)\n\t\t\telse:\n\t\t\t\tprint(\"NO HEARTBEAT DETECTED\\n\")\n\t\t\t\t\n\t\t\tif temp > -100:\n\t\t\t\tprint(\"TEMPERATURE: %d\\n\" % temp)\n\t\t\telse:\n\t\t\t\tprint(\"NO TEMPERATURE DETECTED\\n\")\n\n\t\t\tif motion:\n\t\t\t\tprint(\"DEVICE MOTION DETECTED\\n\\n\")\n\t\t\telse:\n\t\t\t\tprint(\"NO DEVICE MOTION DETECTED\\n\\n\")\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"DEVICE IS OFF\\n\\n\")\n\n\t\tif not msgSent and (voltage > 0 or motion == True):\n\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++++\\nSENDING EMERGENCY NOTIFICATIONS\\n++++++++++++++++++++++++++++++++++++++++++++++\\n\")\n\t\t\tif (sendNotifications(conf_list['sms_message'], conf_list['pushbullet_message'])):\n\t\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++++\\nEMERGENCY NOTIFICATIONS SENT\\n++++++++++++++++++++++++++++++++++++++++++++++\\n\\n\")\n\t\t\telse:\n\t\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++++\\nEMERGENCY NOTIFICATIONS FAILED TO SEND\\n++++++++++++++++++++++++++++++++++++++++++++++\\n\\n\")\n\t\t\tmsgSent = True\n\t\t\tmsgSentTime = time.time()\n\n\t\ttimeVal = int(time.time()) - int(msgSentTime)\n\t\tif msgSentTime > 0 and (timeVal % conf_list['vitals_interval'] == 0) and timeVal > 0: \n\t\t\tnotif_msg = \"VITALS WERE CAPTURED\\n\\n\"\n\t\t\tif bpm > 0:\n\t\t\t\tnotif_msg = notif_msg + \"BPM: %d\\n\" % bpm\n\t\t\telse:\n\t\t\t\tnotif_msg = notif_msg + \"BPM WAS NOT DETECTED\\n\"\n\t\t\tif temp > -100:\n\t\t\t\tnotif_msg = notif_msg + \"TEMPERATURE: %d\\n\" % temp\n\t\t\telse:\n\t\t\t\tnotif_msg = notif_msg + \"TEMPERATURE WAS NOT DETECTED\\n\"\n\t\t\tif motion:\n\t\t\t\tnotif_msg = notif_msg + \"DEVICE MOTION WAS DETECTED\\n\"\n\t\t\telse:\n\t\t\t\tnotif_msg = notif_msg + \"DEVICE MOTION WAS NOT DETECTED\\n\"\n\t\t\t\n\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++++\\nSENDING VITALS NOTIFICATIONS\\n++++++++++++++++++++++++++++++++++++++++++++++\\n\")\n\t\t\tif (sendNotifications(notif_msg, notif_msg)):\n\t\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++++\\nVITALS NOTIFICATIONS SENT\\n++++++++++++++++++++++++++++++++++++++++++++++\\n\\n\")\n\t\t\telse:\n\t\t\t\tprint(\"++++++++++++++++++++++++++++++++++++++++++++++\\nVITALS NOTIFICATIONS FAILED TO SEND\\n++++++++++++++++++++++++++++++++++++++++++++++\\n\\n\")\n\t\t\n\t\tmsgRxTime = data.msgRxTime\n\t\tif (time.time() - msgRxTime > 20) and msgRxTime > 0: # Approximation when XBee is sent to sleep mode\n\t\t\tmsgSent = False\n\t\t\tmsgSentTime = 0\n\t\t\tdata.clearValues()\n\t\t\ttime.sleep(1)\n\t\t\nexcept:\n\tdata.stopAsyncXBee()\n\tdata.stopAsyncBPM()\n\tdata.stopAsyncTemp()\n\tdata.closeSerial()\n","sub_path":"CrisisCommunicator.py","file_name":"CrisisCommunicator.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"405193387","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef plot_images(imgs, loc, title=None):\n '''Plot an array of images.\n\n We assume that we are given a matrix of data whose shape is (n*n, s*s) --\n that is, there are n^2 images along the first axis of the array, and each\n image is a square measuring s pixels on a side. Each row of the input will\n be plotted as a sub-region within a single image array containing an n x n\n grid of images.\n '''\n n = int(np.sqrt(len(imgs)))\n assert n * n == len(imgs), 'images array must contain a square number of rows!'\n s = int(np.sqrt(len(imgs[0])))\n assert s * s == len(imgs[0]), 'images must be square!'\n\n img = np.zeros((s * n, s * n), dtype=imgs[0].dtype)\n for i, pix in enumerate(imgs):\n r, c = divmod(i, n)\n img[r * s:(r+1) * s, c * s:(c+1) * s] = pix.reshape((s, s))\n\n ax = plt.gcf().add_subplot(loc)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set_frame_on(False)\n ax.imshow(img, cmap=plt.cm.gray)\n if title:\n ax.set_title(title)\n\ndef plot_layers(weights, tied_weights=False):\n '''Create a plot of weights, visualized as \"bottom-level\" pixel arrays.'''\n if hasattr(weights[0], 'eval'):\n weights = [w.eval() for w in weights]\n k = min(len(weights), 9)\n imgs = np.eye(weights[0].shape[0])\n for i, weight in enumerate(weights[:-1]):\n imgs = np.dot(weight.T, imgs)\n plot_images(imgs, 100 + 10 * k + i + 1, 'Layer {}'.format(i+1))\n weight = weights[-1]\n if int(np.sqrt(weight.shape[1])) ** 2 != weight.shape[1]:\n return\n if tied_weights:\n imgs = np.dot(weight.T, imgs)\n plot_images(imgs, 100 + 10 * k + k, 'Layer {}'.format(k))\n else:\n plot_images(weight, 100 + 10 * k + k, 'Decoding weights')\n\ndef scale_to_unit_interval(ndar, eps=1e-8):\n \"\"\" Scales all values in the ndarray ndar to be between 0 and 1 \"\"\"\n ndar = ndar.copy()\n ndar -= ndar.min()\n ndar *= 1.0 / (ndar.max() + eps)\n return ndar\n\ndef tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n \"\"\"\n Transform an array with one flattened image per row, into an array in\n which images are reshaped and layed out like tiles on a floor.\n\n This function is useful for visualizing datasets whose rows are images,\n and also columns of matrices for transforming those rows\n (such as the first layer of a neural net).\n\n :type X: a 2-D ndarray or a tuple of 4 channels, elements of which can\n be 2-D ndarrays or None;\n :param X: a 2-D array in which every row is a flattened image.\n\n :type img_shape: tuple; (height, width)\n :param img_shape: the original shape of each image\n\n :type tile_shape: tuple; (rows, cols)\n :param tile_shape: the number of images to tile (rows, cols)\n\n :param output_pixel_vals: if output should be pixel values (i.e. int8\n values) or floats\n\n :param scale_rows_to_unit_interval: if the values need to be scaled before\n being plotted to [0,1] or not\n\n\n :returns: array suitable for viewing as an image.\n (See:`PIL.Image.fromarray`.)\n :rtype: a 2-d array with same dtype as X.\n\n \"\"\"\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\n in zip(img_shape, tile_shape, tile_spacing)]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output np ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(out_shape,\n dtype=dt) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535388887","text":"import sys\nimport time\n\nfrom pymongo import MongoClient\nfrom pymongo.errors import PyMongoError\n\n# Argument\nif len(sys.argv) != 2:\n print(\"You forgot the argument!\\nRetryable Writes: [true|false]\")\n exit(1)\n\nretryWrites = sys.argv[1].lower() == 'true'\nprint(\"Retryable Writes activated: \" + (u'\\U0001f604' if retryWrites else u'\\U0001f622') + \"\\n\")\n\n# Database connection\nclient = MongoClient(host=['mongo1:27017', 'mongo2:27017'], replicaset='replicaTest', retryWrites=retryWrites, w=2)\ndb = client.test\ncollection = db.coll\n\n# Drop collection\ncollection.drop()\n\n# Write\nfor i in range(10000):\n try:\n doc = {'a': i}\n print(\"Write : \" + str(doc))\n collection.insert_one(doc)\n time.sleep(0.05)\n except PyMongoError as e:\n print(e)\n","sub_path":"3-retryable-writes/retryableWrites.py","file_name":"retryableWrites.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56452963","text":"import media\r\nimport fresh_tomatoes\r\nimport grequests\r\nimport requests\r\nfrom flask import Flask\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\ndef get_video_id(rs):\r\n # Same youtube video id in a dictionary\r\n video_id = {}\r\n for r in rs:\r\n r = r.json()\r\n video_id[r[\"id\"]] = r[\"results\"][0][\"key\"]\r\n return video_id\r\n\r\n\r\ndef create_movie_list():\r\n movies = []\r\n\r\n # Define tmdb api key and neccesary request URI\r\n api_key = input('tmdb API key: ')\r\n img_uri = \"https://image.tmdb.org/t/p/w640\" # noqa\r\n video_uri = \"https://www.youtube.com/watch?v=\" # noqa\r\n video_id_request_uri_pre = \"https://api.themoviedb.org/3/movie/%s/videos?api_key=%s&language=en-US\" # noqa\r\n movie_request_uri_pre = \"https://api.themoviedb.org/3/movie/popular?api_key=%s&language=en-US&page=1\" # noqa\r\n movie_request_uri = movie_request_uri_pre % api_key\r\n\r\n # Retrieve popular movies from tmdb api\r\n payload = \"{}\"\r\n response = requests.request(\"GET\", movie_request_uri, data=payload)\r\n results = response.json()[\"results\"]\r\n\r\n # Retrieve youtube video id asynchrounously\r\n rs = []\r\n for result in results:\r\n rs.append(grequests.get(\r\n video_id_request_uri_pre % (result[\"id\"], api_key)))\r\n rs = grequests.map(rs)\r\n video_id = get_video_id(rs)\r\n\r\n # Create a list of Movie objects\r\n for result in results:\r\n release_year = result[\"release_date\"][0:4]\r\n movies.append(media.Movie(\r\n title=result[\"original_title\"]+\" (\"+release_year+\")\",\r\n poster_image_url=img_uri+result[\"poster_path\"], # noqa\r\n trailer_youtube_url=video_uri+video_id[result[\"id\"]]\r\n ))\r\n\r\n return movies\r\n\r\n\r\ndef main():\r\n # Generate movie website and open in browser\r\n return fresh_tomatoes.open_movies_page(create_movie_list())\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef my_app():\r\n return main()\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=8000)\r\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359503444","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\n\nfrom actlet.storage import *\n\ns_cb = ExcelStorage(r\"2016-09/20160930.xls\", sheetIndex = 0)\nls_cb = s_cb.read()\ndf_cb = pd.DataFrame(ls_cb[1:], columns = [\"date\", \"src_type\", \"bank\", \"cardtype\", \"users_count\", \"cards_count\"])\n\ns_bankMappings = ExcelStorage(\"映射数据.xlsx\", sheetName = \"银行名称映射\")\nls_bankMappings = s_bankMappings.read()\ndf_bankMappings = pd.DataFrame(ls_bankMappings[1:], columns = [\"alias\", \"uni_name\"])\nprint(\"Bank Mapping: \", df_bankMappings.shape)\ndf_bms = df_bankMappings.drop_duplicates(subset=[\"alias\"], keep=\"first\")\nprint(\"Bank Mapping Droped Duplicates: \", df_bms.shape)\n\n\ndf_tmp = pd.merge(df_cb, df_bms, left_on = \"bank\", right_on = \"alias\", how=\"left\")\ndf_uniname_nan = df_tmp.loc[pd.isnull(df_tmp[\"uni_name\"]), [\"bank\"]]\ndf_uniname_nan.to_csv(\"uniname_nan.csv\")\n\ndf_tmp.to_csv(\"final.csv\")\n","sub_path":"tests/other_tests/proc_cardbinding.py","file_name":"proc_cardbinding.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389826658","text":"import socket\nimport sys\nimport os\nimport signal\nimport time\nfrom io import BytesIO\nfrom PIL import Image\nfrom subprocess import Popen, PIPE\nimport math\n# Create a UDP socket\nwhile 1:\n UDP_IP_ADDRESS = \"0.0.0.0\"\n UDP_PORT_NO = int(sys.argv[1])\n socket_syn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n mtu=1500-6 # -6 car 6 est la taille de notre numéro de séquence il faut donc l'inclure dans la taille totale de ce qu'on envoie\n #tab_segments= []\n i=1\n j=0\n\n def get_numseq(i): #pour avoir le format: 00000i\n if len(str(i))==1:\n numseq= \"00000\" + str(i)\n if len(str(i))==2:\n numseq= \"0000\" + str(i)\n if len(str(i))==3:\n numseq= \"000\" + str(i)\n if len(str(i))==4:\n numseq= \"00\" + str(i)\n if len(str(i))==5:\n numseq= \"0\" + str(i)\n if len(str(i))==6:\n numseq= str(i)\n return numseq\n\n\n socket_data = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n socket_syn.bind((UDP_IP_ADDRESS, UDP_PORT_NO))\n\n\n\n #sent = socket_syn.sendto(\"SYN\".encode(),(UDP_IP_ADDRESS, UDP_PORT_NO))\n while 1:\n (message, addrclt) = socket_syn.recvfrom(3)\n message= message.decode()\n print (\"message:\", message)\n if message == \"SYN\":\n socket_syn.sendto(b\"SYN-ACK7000\", addrclt)\n rtt1=time.time()\n\n (message, addrclt) = socket_syn.recvfrom(3)\n rtt2=time.time()\n message= message.decode()\n if message==\"ACK\":\n socket_syn.close()\n break\n socket_data.bind((UDP_IP_ADDRESS, 7000))\n print (\"Entering UDP data transfer mode...\")\n fichierrecu= socket_data.recv(mtu).decode() # pour que ça marche il faut mettre exactement la taille du buffer\n fichier= fichierrecu[0:-1]\n f=open(fichier,\"rb\")\n rtt=rtt2-rtt1\n print(rtt)\n socket_data.settimeout(rtt) #les fct bloquantes comme le receive ne le seront plus après ce paramètre\n #TRAITER LE CAS OU CEST UNE IMAGE\n f_size = os.fstat(f.fileno()).st_size\n #-----------------------------------REMPLISSAGE DU TABLEAU DE SEGMENTS A ENVOYER--------------------------------------\n\n tab_segments= []\n while f.tell() < f_size:\n seq_number= get_numseq(j+1).encode() # en bytes\n tab_segments.append(seq_number+f.read(mtu)) # la taille devient 1506\n j+=1\n f.close()\n\n #----------------------------------- FIN REMPLISSAGE DU TABLEAU DE SEGMENTS A ENVOYER----------------\n\n\n #----------------------------------------ENVOI DU TABLEAU AU CLIENT ---------------------------------\n print(\"Sending data from file\", fichierrecu,\"to the client ...\")\n print (len(tab_segments))\n current_segment=1\n sliding_window= 50\n total_segments= len(tab_segments)\n time1= time.time()\n list_ack_received=[]\n while current_segment < total_segments: #tant qu'on a pas atteint une valeur superieure au denrier segment\n for segment in tab_segments[current_segment-1:current_segment-1+sliding_window]:#on envoie tous les paquets de la sliding_window\n socket_data.sendto(segment, addrclt) #envoi de tous les segments du premier indice a l'indice+sliding sliding_window\n#------------------------ TRAITEMENT DES ACK-----------------------------------\n\n for segment in tab_segments[current_segment-1:current_segment-1+sliding_window]:#on envoie tous les paquets de la sliding_window\n try:\n list_ack_received.append(int(socket_data.recv(9).decode()[3:9]))\n\n except socket.timeout:\n continue\n\n current_segment=max(list_ack_received)+1\n #else:\n #for segment in tab_segments[current_segment-1:current_segment-1+sliding_window]:\n #segment_formatted=int(segment[0:6].decode())\n #print(\"seg format\",segment_formatted)\n #if segment_formatted not in list_ack_received:\n #print(\"dans la boucle car pas recu ack\")\n #print(\"list ack received\",list_ack_received)\n #print (\"list ack non received:\", list_ack_non_received)\n #print(\"segment pas recu:\",segment_formatted)\n #list_ack_non_received.append(segment_formatted) #si y a dautres segments non acquittés\n #if len(list_ack_non_received) >0:\n #current_segment= min(list_ack_non_received)\n #else:\n\n # list_ack_non_received.pop(list_ack_non_received.index(current_segment))\n # print (\"current segment qui renvoie seg perdu\", current_segment)\n #if list_ack_received.count(segment_formatted) > 1: #si le segment quon vient de recevoir est 2 fois dans la liste des ack passer au segment suivant\n # print(\"dans la boucle car ack répété\")\n # current_segment+=1\n # break\n\n\n\n\n#------------------------ FIN TRAITEMENT DES ACK--------------------------------\n\n\n #----------------------FIN DE TRANSFERT FICHIER---------------------------\n time2= time.time()\n\n print (\"taille fichier: \",f_size)\n socket_data.sendto(b\"FIN\", addrclt)\n print(\"Data sent. End of communication.\")\n bitrate= (f_size * 10**-6) / (time2-time1)\n print (\"Bitrate:\", round(bitrate,3),\"Mo/s\")\n","sub_path":"udp_server_client1.py","file_name":"udp_server_client1.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485019526","text":"import FWCore.ParameterSet.Config as cms\nimport os\n#import FWCore.Utilities.FileUtils as FileUtils\n#mylist = FileUtils.loadListFromFile ('MiniAOD_2.txt')\n#infiles = cms.untracked.vstring(*mylist)\n\nFILE1=os.environ.get('FILE1')\nFILE2=os.environ.get('FILE2')\n\n\nprocess = cms.Process(\"Rootuple\")\n\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.Reconstruction_cff')\n\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\n# from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\n# process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data')\nprocess.GlobalTag.globaltag = cms.string('106X_upgrade2018_realistic_v4')\n\n\n#process.MessageLogger.cerr.FwkReport.reportEvery = 1000\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.options.allowUnscheduled = cms.untracked.bool(True)\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\n#process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100))\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n #'/store/data/Run2016C/Charmonium/MINIAOD/17Jul2018-v1/20000/9C03CBE2-4B8B-E811-9299-0CC47AC17678.root',\n #'file:6EF2991A-8E4C-C94A-BE5E-3E5D0A17DE08.root',\n #'/store/data/Run2018B/DoubleMuonLowMass/MINIAOD/17Sep2018-v1/60000/6EF2991A-8E4C-C94A-BE5E-3E5D0A17DE08.root',\n# '/store/data/Run2018B/DoubleMuonLowMass/MINIAOD/17Sep2018-v1/100000/32AB25E3-8BF6-ED4F-A72B-FF5E7036C416.root',\n#\t\t\t\t'file:root://se01.indiacms.res.in:1094//cms/store/user/digupta/BctoDsMuMu_MC_MINIAOD/BctoDsMuMu_MC_MINIAOD_205.root',\n#\t\t\t\tmylist,\n\t\t\t\tFILE1,\n #'/store/data/Run2016H/DoubleMuonLowMass/MINIAOD/17Jul2018-v1/50000/9A79D8BA-D38B-E811-BCD3-0090FAA57AE0.root', \n )\n)\n\nprocess.load(\"slimmedMuonsTriggerMatcher_cfi\")\n\nprocess.load(\"bctodsmumu_analysis.BcToDsMuMuPAT.BcToDsMuMuRootupler_cfi\")\nprocess.rootuple.isMC = cms.bool(True)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n\n fileName = cms.string(FILE2),\n# fileName = cms.string('BctoDsMuMu_ntuple_v1.root'),\n# fileName = cms.string('BctoDsMuMu_ntuple_test4_For_1_MiniAOD.root'),\n# fileName = cms.string('BctoDsMuMu_ntuple_v2.root'),\n# fileName = cms.string('BctoDsMuMu_ntuple_v3.root')\n# fileName = cms.string('BctoDsMuMu_ntuple_mc_v1.root'),\n)\n\nprocess.p = cms.Path(process.slimmedMuonsWithTriggerSequence *process.rootuple)\n\n","sub_path":"BcToDsMuMuPAT/Test/BcToDsMuMuRootupler.py","file_name":"BcToDsMuMuRootupler.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27856936","text":"# -*- coding: utf-8 -*-\n\"\"\"\n pyvisa.constants\n ~~~~~~~~~~~~~~~~\n\n VISA VPP-4.3 constants (VPP-4.3.2 spec, section 3).\n\n Makes all \"completion and error codes\", \"attribute values\", \"event type\n values\", and \"values and ranges\" defined in the VISA specification VPP-4.3.2,\n section 3, available as variable values.\n\n The module exports the values under the original, all-uppercase names.\n\n This file is part of PyVISA.\n\n :copyright: 2014 by PyVISA Authors, see AUTHORS for more details.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import division, unicode_literals, print_function, absolute_import\n\nimport enum\n\n# _to_int() is necessary because the VISA specification is flawed: It defines\n# the VISA codes, which have a value less than zero, in their internal 32-bit\n# signed integer representation. However, this is positive. ctypes doesn't\n# care about that and (correctly) returns the negative value, which is left as\n# such by Python.\n#\n\n\ndef _to_int(x):\n \"\"\"Converts a completion and error code as it is listed in 32-bit notation\n in the VPP-4.3.2 specification to the actual integer value.\n \"\"\"\n if x > 0x7FFFFFFF:\n return int(x - 0x100000000)\n else:\n return int(x)\n\nVI_SUCCESS = _to_int(0x00000000)\nVI_SUCCESS_EVENT_EN = _to_int(0x3FFF0002)\nVI_SUCCESS_EVENT_DIS = _to_int(0x3FFF0003)\nVI_SUCCESS_QUEUE_EMPTY = _to_int(0x3FFF0004)\nVI_SUCCESS_TERM_CHAR = _to_int(0x3FFF0005)\nVI_SUCCESS_MAX_CNT = _to_int(0x3FFF0006)\nVI_SUCCESS_DEV_NPRESENT = _to_int(0x3FFF007D)\nVI_SUCCESS_TRIG_MAPPED = _to_int(0x3FFF007E)\nVI_SUCCESS_QUEUE_NEMPTY = _to_int(0x3FFF0080)\nVI_SUCCESS_NCHAIN = _to_int(0x3FFF0098)\nVI_SUCCESS_NESTED_SHARED = _to_int(0x3FFF0099)\nVI_SUCCESS_NESTED_EXCLUSIVE = _to_int(0x3FFF009A)\nVI_SUCCESS_SYNC = _to_int(0x3FFF009B)\n\nVI_WARN_QUEUE_OVERFLOW = _to_int(0x3FFF000C)\nVI_WARN_CONFIG_NLOADED = _to_int(0x3FFF0077)\nVI_WARN_NULL_OBJECT = _to_int(0x3FFF0082)\nVI_WARN_NSUP_ATTR_STATE = _to_int(0x3FFF0084)\nVI_WARN_UNKNOWN_STATUS = _to_int(0x3FFF0085)\nVI_WARN_NSUP_BUF = _to_int(0x3FFF0088)\n\n# The following one is a non-standard NI extension\nVI_WARN_EXT_FUNC_NIMPL = _to_int(0x3FFF00A9)\n\nVI_ERROR_SYSTEM_ERROR = _to_int(0xBFFF0000)\nVI_ERROR_INV_OBJECT = _to_int(0xBFFF000E)\nVI_ERROR_RSRC_LOCKED = _to_int(0xBFFF000F)\nVI_ERROR_INV_EXPR = _to_int(0xBFFF0010)\nVI_ERROR_RSRC_NFOUND = _to_int(0xBFFF0011)\nVI_ERROR_INV_RSRC_NAME = _to_int(0xBFFF0012)\nVI_ERROR_INV_ACC_MODE = _to_int(0xBFFF0013)\nVI_ERROR_TMO = _to_int(0xBFFF0015)\nVI_ERROR_CLOSING_FAILED = _to_int(0xBFFF0016)\nVI_ERROR_INV_DEGREE = _to_int(0xBFFF001B)\nVI_ERROR_INV_JOB_ID = _to_int(0xBFFF001C)\nVI_ERROR_NSUP_ATTR = _to_int(0xBFFF001D)\nVI_ERROR_NSUP_ATTR_STATE = _to_int(0xBFFF001E)\nVI_ERROR_ATTR_READONLY = _to_int(0xBFFF001F)\nVI_ERROR_INV_LOCK_TYPE = _to_int(0xBFFF0020)\nVI_ERROR_INV_ACCESS_KEY = _to_int(0xBFFF0021)\nVI_ERROR_INV_EVENT = _to_int(0xBFFF0026)\nVI_ERROR_INV_MECH = _to_int(0xBFFF0027)\nVI_ERROR_HNDLR_NINSTALLED = _to_int(0xBFFF0028)\nVI_ERROR_INV_HNDLR_REF = _to_int(0xBFFF0029)\nVI_ERROR_INV_CONTEXT = _to_int(0xBFFF002A)\nVI_ERROR_QUEUE_OVERFLOW = _to_int(0xBFFF002D)\nVI_ERROR_NENABLED = _to_int(0xBFFF002F)\nVI_ERROR_ABORT = _to_int(0xBFFF0030)\nVI_ERROR_RAW_WR_PROT_VIOL = _to_int(0xBFFF0034)\nVI_ERROR_RAW_RD_PROT_VIOL = _to_int(0xBFFF0035)\nVI_ERROR_OUTP_PROT_VIOL = _to_int(0xBFFF0036)\nVI_ERROR_INP_PROT_VIOL = _to_int(0xBFFF0037)\nVI_ERROR_BERR = _to_int(0xBFFF0038)\nVI_ERROR_IN_PROGRESS = _to_int(0xBFFF0039)\nVI_ERROR_INV_SETUP = _to_int(0xBFFF003A)\nVI_ERROR_QUEUE_ERROR = _to_int(0xBFFF003B)\nVI_ERROR_ALLOC = _to_int(0xBFFF003C)\nVI_ERROR_INV_MASK = _to_int(0xBFFF003D)\nVI_ERROR_IO = _to_int(0xBFFF003E)\nVI_ERROR_INV_FMT = _to_int(0xBFFF003F)\nVI_ERROR_NSUP_FMT = _to_int(0xBFFF0041)\nVI_ERROR_LINE_IN_USE = _to_int(0xBFFF0042)\nVI_ERROR_NSUP_MODE = _to_int(0xBFFF0046)\nVI_ERROR_SRQ_NOCCURRED = _to_int(0xBFFF004A)\nVI_ERROR_INV_SPACE = _to_int(0xBFFF004E)\nVI_ERROR_INV_OFFSET = _to_int(0xBFFF0051)\nVI_ERROR_INV_WIDTH = _to_int(0xBFFF0052)\nVI_ERROR_NSUP_OFFSET = _to_int(0xBFFF0054)\nVI_ERROR_NSUP_VAR_WIDTH = _to_int(0xBFFF0055)\nVI_ERROR_WINDOW_NMAPPED = _to_int(0xBFFF0057)\nVI_ERROR_RESP_PENDING = _to_int(0xBFFF0059)\nVI_ERROR_NLISTENERS = _to_int(0xBFFF005F)\nVI_ERROR_NCIC = _to_int(0xBFFF0060)\nVI_ERROR_NSYS_CNTLR = _to_int(0xBFFF0061)\nVI_ERROR_NSUP_OPER = _to_int(0xBFFF0067)\nVI_ERROR_INTR_PENDING = _to_int(0xBFFF0068)\nVI_ERROR_ASRL_PARITY = _to_int(0xBFFF006A)\nVI_ERROR_ASRL_FRAMING = _to_int(0xBFFF006B)\nVI_ERROR_ASRL_OVERRUN = _to_int(0xBFFF006C)\nVI_ERROR_TRIG_NMAPPED = _to_int(0xBFFF006E)\nVI_ERROR_NSUP_ALIGN_OFFSET = _to_int(0xBFFF0070)\nVI_ERROR_USER_BUF = _to_int(0xBFFF0071)\nVI_ERROR_RSRC_BUSY = _to_int(0xBFFF0072)\nVI_ERROR_NSUP_WIDTH = _to_int(0xBFFF0076)\nVI_ERROR_INV_PARAMETER = _to_int(0xBFFF0078)\nVI_ERROR_INV_PROT = _to_int(0xBFFF0079)\nVI_ERROR_INV_SIZE = _to_int(0xBFFF007B)\nVI_ERROR_WINDOW_MAPPED = _to_int(0xBFFF0080)\nVI_ERROR_NIMPL_OPER = _to_int(0xBFFF0081)\nVI_ERROR_INV_LENGTH = _to_int(0xBFFF0083)\nVI_ERROR_INV_MODE = _to_int(0xBFFF0091)\nVI_ERROR_SESN_NLOCKED = _to_int(0xBFFF009C)\nVI_ERROR_MEM_NSHARED = _to_int(0xBFFF009D)\nVI_ERROR_LIBRARY_NFOUND = _to_int(0xBFFF009E)\nVI_ERROR_NSUP_INTR = _to_int(0xBFFF009F)\nVI_ERROR_INV_LINE = _to_int(0xBFFF00A0)\nVI_ERROR_FILE_ACCESS = _to_int(0xBFFF00A1)\nVI_ERROR_FILE_IO = _to_int(0xBFFF00A2)\nVI_ERROR_NSUP_LINE = _to_int(0xBFFF00A3)\nVI_ERROR_NSUP_MECH = _to_int(0xBFFF00A4)\nVI_ERROR_INTF_NUM_NCONFIG = _to_int(0xBFFF00A5)\nVI_ERROR_CONN_LOST = _to_int(0xBFFF00A6)\n\n# The following two are a non-standard NI extensions\nVI_ERROR_MACHINE_NAVAIL = _to_int(0xBFFF00A7)\nVI_ERROR_NPERMISSION = _to_int(0xBFFF00A8)\n\n\n#\n# Attribute constants\n#\n# All attribute codes are unsigned long, so no _to_int() is necessary.\n#\n\nVI_ATTR_RSRC_CLASS = 0xBFFF0001\nVI_ATTR_RSRC_NAME = 0xBFFF0002\nVI_ATTR_RSRC_IMPL_VERSION = 0x3FFF0003\nVI_ATTR_RSRC_LOCK_STATE = 0x3FFF0004\nVI_ATTR_MAX_QUEUE_LENGTH = 0x3FFF0005\nVI_ATTR_USER_DATA = 0x3FFF0007\nVI_ATTR_FDC_CHNL = 0x3FFF000D\nVI_ATTR_FDC_MODE = 0x3FFF000F\nVI_ATTR_FDC_GEN_SIGNAL_EN = 0x3FFF0011\nVI_ATTR_FDC_USE_PAIR = 0x3FFF0013\nVI_ATTR_SEND_END_EN = 0x3FFF0016\nVI_ATTR_TERMCHAR = 0x3FFF0018\nVI_ATTR_TMO_VALUE = 0x3FFF001A\nVI_ATTR_GPIB_READDR_EN = 0x3FFF001B\nVI_ATTR_IO_PROT = 0x3FFF001C\nVI_ATTR_DMA_ALLOW_EN = 0x3FFF001E\nVI_ATTR_ASRL_BAUD = 0x3FFF0021\nVI_ATTR_ASRL_DATA_BITS = 0x3FFF0022\nVI_ATTR_ASRL_PARITY = 0x3FFF0023\nVI_ATTR_ASRL_STOP_BITS = 0x3FFF0024\nVI_ATTR_ASRL_FLOW_CNTRL = 0x3FFF0025\n\nVI_ATTR_ASRL_DISCARD_NULL = 0x3FFF00B0\nVI_ATTR_ASRL_CONNECTED = 0x3FFF01BB\nVI_ATTR_ASRL_BREAK_STATE = 0x3FFF01BC\nVI_ATTR_ASRL_BREAK_LEN = 0x3FFF01BD\nVI_ATTR_ASRL_ALLOW_TRANSMIT = 0x3FFF01BE\nVI_ATTR_ASRL_WIRE_MODE = 0x3FFF01BF\n\nVI_ATTR_RD_BUF_OPER_MODE = 0x3FFF002A\nVI_ATTR_RD_BUF_SIZE = 0x3FFF002B\nVI_ATTR_WR_BUF_OPER_MODE = 0x3FFF002D\nVI_ATTR_WR_BUF_SIZE = 0x3FFF002E\nVI_ATTR_SUPPRESS_END_EN = 0x3FFF0036\nVI_ATTR_TERMCHAR_EN = 0x3FFF0038\nVI_ATTR_DEST_ACCESS_PRIV = 0x3FFF0039\nVI_ATTR_DEST_BYTE_ORDER = 0x3FFF003A\nVI_ATTR_SRC_ACCESS_PRIV = 0x3FFF003C\nVI_ATTR_SRC_BYTE_ORDER = 0x3FFF003D\nVI_ATTR_SRC_INCREMENT = 0x3FFF0040\nVI_ATTR_DEST_INCREMENT = 0x3FFF0041\nVI_ATTR_WIN_ACCESS_PRIV = 0x3FFF0045\nVI_ATTR_WIN_BYTE_ORDER = 0x3FFF0047\nVI_ATTR_GPIB_ATN_STATE = 0x3FFF0057\nVI_ATTR_GPIB_ADDR_STATE = 0x3FFF005C\nVI_ATTR_GPIB_CIC_STATE = 0x3FFF005E\nVI_ATTR_GPIB_NDAC_STATE = 0x3FFF0062\nVI_ATTR_GPIB_SRQ_STATE = 0x3FFF0067\nVI_ATTR_GPIB_SYS_CNTRL_STATE = 0x3FFF0068\nVI_ATTR_GPIB_HS488_CBL_LEN = 0x3FFF0069\nVI_ATTR_CMDR_LA = 0x3FFF006B\nVI_ATTR_VXI_DEV_CLASS = 0x3FFF006C\nVI_ATTR_MAINFRAME_LA = 0x3FFF0070\nVI_ATTR_MANF_NAME = 0xBFFF0072\nVI_ATTR_MODEL_NAME = 0xBFFF0077\nVI_ATTR_VXI_VME_INTR_STATUS = 0x3FFF008B\nVI_ATTR_VXI_TRIG_STATUS = 0x3FFF008D\nVI_ATTR_VXI_VME_SYSFAIL_STATE = 0x3FFF0094\nVI_ATTR_WIN_BASE_ADDR = 0x3FFF0098\nVI_ATTR_WIN_SIZE = 0x3FFF009A\nVI_ATTR_ASRL_AVAIL_NUM = 0x3FFF00AC\nVI_ATTR_MEM_BASE = 0x3FFF00AD\nVI_ATTR_ASRL_CTS_STATE = 0x3FFF00AE\nVI_ATTR_ASRL_DCD_STATE = 0x3FFF00AF\nVI_ATTR_ASRL_DSR_STATE = 0x3FFF00B1\nVI_ATTR_ASRL_DTR_STATE = 0x3FFF00B2\nVI_ATTR_ASRL_END_IN = 0x3FFF00B3\nVI_ATTR_ASRL_END_OUT = 0x3FFF00B4\nVI_ATTR_ASRL_REPLACE_CHAR = 0x3FFF00BE\nVI_ATTR_ASRL_RI_STATE = 0x3FFF00BF\nVI_ATTR_ASRL_RTS_STATE = 0x3FFF00C0\nVI_ATTR_ASRL_XON_CHAR = 0x3FFF00C1\nVI_ATTR_ASRL_XOFF_CHAR = 0x3FFF00C2\nVI_ATTR_WIN_ACCESS = 0x3FFF00C3\nVI_ATTR_RM_SESSION = 0x3FFF00C4\nVI_ATTR_VXI_LA = 0x3FFF00D5\nVI_ATTR_MANF_ID = 0x3FFF00D9\nVI_ATTR_MEM_SIZE = 0x3FFF00DD\nVI_ATTR_MEM_SPACE = 0x3FFF00DE\nVI_ATTR_MODEL_CODE = 0x3FFF00DF\nVI_ATTR_SLOT = 0x3FFF00E8\nVI_ATTR_INTF_INST_NAME = 0xBFFF00E9\nVI_ATTR_IMMEDIATE_SERV = 0x3FFF0100\nVI_ATTR_INTF_PARENT_NUM = 0x3FFF0101\nVI_ATTR_RSRC_SPEC_VERSION = 0x3FFF0170\nVI_ATTR_INTF_TYPE = 0x3FFF0171\nVI_ATTR_GPIB_PRIMARY_ADDR = 0x3FFF0172\nVI_ATTR_GPIB_SECONDARY_ADDR = 0x3FFF0173\nVI_ATTR_RSRC_MANF_NAME = 0xBFFF0174\nVI_ATTR_RSRC_MANF_ID = 0x3FFF0175\nVI_ATTR_INTF_NUM = 0x3FFF0176\nVI_ATTR_TRIG_ID = 0x3FFF0177\nVI_ATTR_GPIB_REN_STATE = 0x3FFF0181\nVI_ATTR_GPIB_UNADDR_EN = 0x3FFF0184\nVI_ATTR_DEV_STATUS_BYTE = 0x3FFF0189\nVI_ATTR_FILE_APPEND_EN = 0x3FFF0192\nVI_ATTR_VXI_TRIG_SUPPORT = 0x3FFF0194\nVI_ATTR_TCPIP_ADDR = 0xBFFF0195\nVI_ATTR_TCPIP_HOSTNAME = 0xBFFF0196\nVI_ATTR_TCPIP_PORT = 0x3FFF0197\nVI_ATTR_TCPIP_DEVICE_NAME = 0xBFFF0199\nVI_ATTR_TCPIP_NODELAY = 0x3FFF019A\nVI_ATTR_TCPIP_KEEPALIVE = 0x3FFF019B\nVI_ATTR_4882_COMPLIANT = 0x3FFF019F\nVI_ATTR_USB_SERIAL_NUM = 0xBFFF01A0\nVI_ATTR_USB_INTFC_NUM = 0x3FFF01A1\nVI_ATTR_USB_PROTOCOL = 0x3FFF01A7\nVI_ATTR_USB_MAX_INTR_SIZE = 0x3FFF01AF\n\nVI_ATTR_JOB_ID = 0x3FFF4006\nVI_ATTR_EVENT_TYPE = 0x3FFF4010\nVI_ATTR_SIGP_STATUS_ID = 0x3FFF4011\nVI_ATTR_RECV_TRIG_ID = 0x3FFF4012\nVI_ATTR_INTR_STATUS_ID = 0x3FFF4023\nVI_ATTR_STATUS = 0x3FFF4025\nVI_ATTR_RET_COUNT = 0x3FFF4026\nVI_ATTR_BUFFER = 0x3FFF4027\nVI_ATTR_RECV_INTR_LEVEL = 0x3FFF4041\nVI_ATTR_OPER_NAME = 0xBFFF4042\nVI_ATTR_GPIB_RECV_CIC_STATE = 0x3FFF4193\nVI_ATTR_RECV_TCPIP_ADDR = 0xBFFF4198\nVI_ATTR_USB_RECV_INTR_SIZE = 0x3FFF41B0\nVI_ATTR_USB_RECV_INTR_DATA = 0xBFFF41B1\n\n\n#\n# Event Types\n#\n# All event codes are unsigned long, so no _to_int() is necessary.\n#\n\nVI_EVENT_IO_COMPLETION = 0x3FFF2009\nVI_EVENT_TRIG = 0xBFFF200A\nVI_EVENT_SERVICE_REQ = 0x3FFF200B\nVI_EVENT_CLEAR = 0x3FFF200D\nVI_EVENT_EXCEPTION = 0xBFFF200E\nVI_EVENT_GPIB_CIC = 0x3FFF2012\nVI_EVENT_GPIB_TALK = 0x3FFF2013\nVI_EVENT_GPIB_LISTEN = 0x3FFF2014\nVI_EVENT_VXI_VME_SYSFAIL = 0x3FFF201D\nVI_EVENT_VXI_VME_SYSRESET = 0x3FFF201E\nVI_EVENT_VXI_SIGP = 0x3FFF2020\nVI_EVENT_VXI_VME_INTR = 0xBFFF2021\nVI_EVENT_TCPIP_CONNECT = 0x3FFF2036\nVI_EVENT_USB_INTR = 0x3FFF2037\n\nVI_ALL_ENABLED_EVENTS = 0x3FFF7FFF\n\n\n#\n# Values and Ranges\n#\n\nVI_FIND_BUFLEN = 256\nVI_NULL = 0\n\nVI_TRUE = 1\nVI_FALSE = 0\n\nVI_INTF_GPIB = 1\nVI_INTF_VXI = 2\nVI_INTF_GPIB_VXI = 3\nVI_INTF_ASRL = 4\nVI_INTF_PXI = 5\nVI_INTF_TCPIP = 6\nVI_INTF_USB = 7\nVI_INTF_RIO = 8\nVI_INTF_FIREWIRE = 9\n\nVI_PROT_NORMAL = 1\nVI_PROT_FDC = 2\nVI_PROT_HS488 = 3\nVI_PROT_4882_STRS = 4\nVI_PROT_USBTMC_VENDOR = 5\n\nVI_FDC_NORMAL = 1\nVI_FDC_STREAM = 2\n\nVI_LOCAL_SPACE = 0\nVI_A16_SPACE = 1\nVI_A24_SPACE = 2\nVI_A32_SPACE = 3\nVI_OPAQUE_SPACE = 0xFFFF\n\nVI_UNKNOWN_LA = -1\nVI_UNKNOWN_SLOT = -1\nVI_UNKNOWN_LEVEL = -1\n\nVI_QUEUE = 1\nVI_HNDLR = 2\nVI_SUSPEND_HNDLR = 4\nVI_ALL_MECH = 0xFFFF\n\nVI_ANY_HNDLR = 0\n\nVI_TRIG_ALL = -2\nVI_TRIG_SW = -1\nVI_TRIG_TTL0 = 0\nVI_TRIG_TTL1 = 1\nVI_TRIG_TTL2 = 2\nVI_TRIG_TTL3 = 3\nVI_TRIG_TTL4 = 4\nVI_TRIG_TTL5 = 5\nVI_TRIG_TTL6 = 6\nVI_TRIG_TTL7 = 7\nVI_TRIG_ECL0 = 8\nVI_TRIG_ECL1 = 9\nVI_TRIG_PANEL_IN = 27\nVI_TRIG_PANEL_OUT = 28\n\nVI_TRIG_PROT_DEFAULT = 0\nVI_TRIG_PROT_ON = 1\nVI_TRIG_PROT_OFF = 2\nVI_TRIG_PROT_SYNC = 5\n\nVI_READ_BUF = 1\nVI_WRITE_BUF = 2\nVI_READ_BUF_DISCARD = 4\nVI_WRITE_BUF_DISCARD = 8\nVI_IO_IN_BUF = 16\nVI_IO_OUT_BUF = 32\nVI_IO_IN_BUF_DISCARD = 64\nVI_IO_OUT_BUF_DISCARD = 128\n\nVI_FLUSH_ON_ACCESS = 1\nVI_FLUSH_WHEN_FULL = 2\nVI_FLUSH_DISABLE = 3\n\nVI_NMAPPED = 1\nVI_USE_OPERS = 2\nVI_DEREF_ADDR = 3\n\nVI_TMO_IMMEDIATE = 0\n# Attention! The following is *really* positive! (unsigned long)\nVI_TMO_INFINITE = 0xFFFFFFFF\n\nVI_NO_LOCK = 0\nVI_EXCLUSIVE_LOCK = 1\nVI_SHARED_LOCK = 2\nVI_LOAD_CONFIG = 4\n\nVI_NO_SEC_ADDR = 0xFFFF\n\nVI_ASRL_PAR_NONE = 0\nVI_ASRL_PAR_ODD = 1\nVI_ASRL_PAR_EVEN = 2\nVI_ASRL_PAR_MARK = 3\nVI_ASRL_PAR_SPACE = 4\n\nVI_ASRL_STOP_ONE = 10\nVI_ASRL_STOP_ONE5 = 15\nVI_ASRL_STOP_TWO = 20\n\nVI_ASRL_FLOW_NONE = 0\nVI_ASRL_FLOW_XON_XOFF = 1\nVI_ASRL_FLOW_RTS_CTS = 2\nVI_ASRL_FLOW_DTR_DSR = 4\n\nVI_ASRL_END_NONE = 0\nVI_ASRL_END_LAST_BIT = 1\nVI_ASRL_END_TERMCHAR = 2\nVI_ASRL_END_BREAK = 3\n\nVI_STATE_ASSERTED = 1\nVI_STATE_UNASSERTED = 0\nVI_STATE_UNKNOWN = -1\n\nVI_BIG_ENDIAN = 0\nVI_LITTLE_ENDIAN = 1\n\nVI_DATA_PRIV = 0\nVI_DATA_NPRIV = 1\nVI_PROG_PRIV = 2\nVI_PROG_NPRIV = 3\nVI_BLCK_PRIV = 4\nVI_BLCK_NPRIV = 5\nVI_D64_PRIV = 6\nVI_D64_NPRIV = 7\n\nVI_WIDTH_8 = 1\nVI_WIDTH_16 = 2\nVI_WIDTH_32 = 4\n\nVI_GPIB_REN_DEASSERT = 0\nVI_GPIB_REN_ASSERT = 1\nVI_GPIB_REN_DEASSERT_GTL = 2\nVI_GPIB_REN_ASSERT_ADDRESS = 3\nVI_GPIB_REN_ASSERT_LLO = 4\nVI_GPIB_REN_ASSERT_ADDRESS_LLO = 5\nVI_GPIB_REN_ADDRESS_GTL = 6\n\nVI_GPIB_ATN_DEASSERT = 0\nVI_GPIB_ATN_ASSERT = 1\nVI_GPIB_ATN_DEASSERT_HANDSHAKE = 2\nVI_GPIB_ATN_ASSERT_IMMEDIATE = 3\n\nVI_GPIB_HS488_DISABLED = 0\nVI_GPIB_HS488_NIMPL = -1\n\nVI_GPIB_UNADDRESSED = 0\nVI_GPIB_TALKER = 1\nVI_GPIB_LISTENER = 2\n\nVI_VXI_CMD16 = 0x0200\nVI_VXI_CMD16_RESP16 = 0x0202\nVI_VXI_RESP16 = 0x0002\nVI_VXI_CMD32 = 0x0400\nVI_VXI_CMD32_RESP16 = 0x0402\nVI_VXI_CMD32_RESP32 = 0x0404\nVI_VXI_RESP32 = 0x0004\n\nVI_ASSERT_SIGNAL = -1\nVI_ASSERT_USE_ASSIGNED = 0\nVI_ASSERT_IRQ1 = 1\nVI_ASSERT_IRQ2 = 2\nVI_ASSERT_IRQ3 = 3\nVI_ASSERT_IRQ4 = 4\nVI_ASSERT_IRQ5 = 5\nVI_ASSERT_IRQ6 = 6\nVI_ASSERT_IRQ7 = 7\n\nVI_UTIL_ASSERT_SYSRESET = 1\nVI_UTIL_ASSERT_SYSFAIL = 2\nVI_UTIL_DEASSERT_SYSFAIL = 3\n\nVI_VXI_CLASS_MEMORY = 0\nVI_VXI_CLASS_EXTENDED = 1\nVI_VXI_CLASS_MESSAGE = 2\nVI_VXI_CLASS_REGISTER = 3\nVI_VXI_CLASS_OTHER = 4\n\n# \"Backwards compatibility\" according to NI\n\nVI_NORMAL = VI_PROT_NORMAL\nVI_FDC = VI_PROT_FDC\nVI_HS488 = VI_PROT_HS488\nVI_ASRL488 = VI_PROT_4882_STRS\nVI_ASRL_IN_BUF = VI_IO_IN_BUF\nVI_ASRL_OUT_BUF = VI_IO_OUT_BUF\nVI_ASRL_IN_BUF_DISCARD = VI_IO_IN_BUF_DISCARD\nVI_ASRL_OUT_BUF_DISCARD = VI_IO_OUT_BUF_DISCARD\n\n\n# Enums\n\nclass AccessModes(enum.IntEnum):\n\n #: Does not obtain any lock on the VISA resource.\n no_lock = 0\n\n #: Obtains a exclusive lock on the VISA resource.\n exclusive_lock = 1\n\n #: Obtains a lock on the VISA resouce which may be shared\n #: between multiple VISA sessions.\n shared_lock = 2\n\n\nclass StopBits(enum.IntEnum):\n \"\"\"The number of stop bits that indicate the end of a frame.\n \"\"\"\n one_ = VI_ASRL_STOP_ONE\n one_and_a_half = VI_ASRL_STOP_ONE5\n two = VI_ASRL_STOP_TWO\n\n\nclass Parity(enum.IntEnum):\n \"\"\"The parity types to use with every frame transmitted and received on a serial session.\n \"\"\"\n none = VI_ASRL_PAR_NONE\n odd = VI_ASRL_PAR_ODD\n even = VI_ASRL_PAR_EVEN\n mark = VI_ASRL_PAR_MARK\n space = VI_ASRL_PAR_SPACE\n\n\nclass SerialTermination(enum.IntEnum):\n \"\"\"The available methods for terminating a serial transfer.\n \"\"\"\n\n #: The transfer terminates when all requested data is transferred\n #: or when an error occurs.\n none = VI_ASRL_END_NONE\n\n #: The transfer occurs with the last bit not set until the last\n #: character is sent.\n last_bit = VI_ASRL_END_LAST_BIT\n\n #: The transfer terminate by searching for \"/\"\n #: appending the termination character.\n termination_char = VI_ASRL_END_TERMCHAR\n\n #: The write transmits a break after all the characters for the\n #: write are sent.\n termination_break = VI_ASRL_END_BREAK\n\n\nclass InterfaceType(enum.IntEnum):\n \"\"\"The hardware interface\n \"\"\"\n gpib = VI_INTF_GPIB\n vxi = VI_INTF_VXI\n gpib_vxi = VI_INTF_GPIB_VXI\n asrl = VI_INTF_ASRL\n pxi = VI_INTF_PXI\n tcpip = VI_INTF_TCPIP\n usb = VI_INTF_USB\n rio = VI_INTF_RIO\n firewire = VI_INTF_FIREWIRE\n\n\nclass AddressState(enum.IntEnum):\n\n unaddressed =VI_GPIB_UNADDRESSED\n talker = VI_GPIB_TALKER\n listenr = VI_GPIB_LISTENER\n\n\nclass IOProtocol(enum.IntEnum):\n\n normal = VI_PROT_NORMAL\n\n #: Fast data channel (FDC) protocol for VXI\n fdc = VI_PROT_FDC\n\n #: High speed 488 transfer for GPIB\n hs488 = VI_PROT_HS488\n\n #: 488 style transfer for serial\n protocol4882_strs = VI_PROT_4882_STRS\n\n #: Test measurement class vendor specific for USB\n usbtmc_vendor = VI_PROT_USBTMC_VENDOR\n\n\nclass LineState(enum.IntEnum):\n\n asserted = VI_STATE_ASSERTED\n unasserted = VI_STATE_UNASSERTED\n unknown = VI_STATE_UNKNOWN\n","sub_path":"pyvisa/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":20096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96312535","text":"import time\n\nn = 10000000\nstart = time.time()\n\na, b = range(n), range(n)\nc = []\n\nfor i in a:\n\tc.append(a[i] * b[i])\n\nt = time.time() - start\nprint(\"Tempo: %s\" % t)\n\n","sub_path":"codes/ufjf/sample_4/fill_array_classic.py","file_name":"fill_array_classic.py","file_ext":"py","file_size_in_byte":165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"129256202","text":"#! /usr/bin/env python\n__author__ = 'Sean Yu'\n'''created @2015/10/11''' \nif __name__ == '__main__':\n import sys\n foldername ='..'# 'c:/workspace/gotest1' #'C:\\\\work\\\\auto\\\\dash2.1'##u'C:\\\\work\\\\auto\\\\workspace\\\\dash-ia' #'C:\\\\work\\\\auto\\\\winAI\\\\IA'#sys.argv[1]\n import os\n\n import re\n pComment=re.compile('\\s*(#).*',re.MULTILINE|re.DOTALL)\n pBlank = re.compile('^\\s*$', re.MULTILINE|re.DOTALL)\n def counterOfFile(filename):\n counter = 0\n with open(filename ) as f: #\n for line in f.readlines():\n if re.match(pComment, line):\n continue\n elif re.match(pBlank, line):\n continue\n else:\n counter+=1\n return counter, filename\n\n def counterOfFolder(foldername):\n allItems = os.listdir(foldername)\n counter =0\n message = ''\n for item in allItems:\n filename = foldername+'/'+item\n if os.path.isfile(filename) and filename.endswith('.py'):\n tmpcounter, filename = counterOfFile(filename)\n counter+=tmpcounter\n message += '\\n%10d, %s'%(tmpcounter,filename)\n elif os.path.isdir(filename):\n subfoldername = foldername+'/'+item\n tmpcounter , folder =counterOfFolder(subfoldername)\n counter+=tmpcounter\n if tmpcounter:\n message += '\\n\\n%10d, %s%s'%(tmpcounter,filename,folder.replace('\\n','\\n\\t'))\n return counter, message\n\n counter ,message= counterOfFolder(foldername)\n print(message)\n print('Total Number Of Python Code in %s: %d'%(foldername,counter))\n\n\n\n","sub_path":"bin/NLC_counter.py","file_name":"NLC_counter.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110813906","text":"from deepecho.benchmark import run_benchmark\nfrom deepecho.benchmark.dataset import Dataset\n\n\ndef test_run_benchmark():\n models = [('PARModel', {'epochs': 5, 'cuda': False, 'verbose': False})]\n datasets = [Dataset('Libras', max_entities=10)]\n metrics = ['sdmetrics', 'rf_detection']\n results = run_benchmark(\n models=models,\n datasets=datasets,\n metrics=metrics\n )\n\n expected = ['model', 'dataset', 'fit_time', 'sample_time', 'rf_detection',\n 'rf_detection_time', 'sdmetrics', 'sdmetrics_time']\n assert list(results.columns) == expected\n assert results.shape == (1, 8)\n","sub_path":"benchmark/tests/test_benchmark.py","file_name":"test_benchmark.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380221861","text":"# coding: utf-8\n# leetcode.com/problems/lowest-common-ancestor-of-a-binary-search-tree\n\nfrom libleet import TreeNode\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n if p.val > q.val:\n p, q = q, p\n p, q = p.val, q.val\n def _find(r):\n if p <= r.val <= q:\n return r\n elif r.left and q < r.val:\n return _find(r.left)\n elif r.right and p > r.val:\n return _find(r.right)\n return _find(root)\n\n# another trivial problem about BST\n# related: #236\n# 15/02/16 PM\n\ns = Solution()\nroot = TreeNode(6)\n\nr2 = root.left = TreeNode(2)\nroot.left.left = TreeNode(0)\nr4 = root.left.right = TreeNode(4)\nroot.left.right.left = TreeNode(3)\nroot.left.right.left = TreeNode(5)\n\nr8 = root.right = TreeNode(8)\nroot.right.left = TreeNode(7)\nroot.right.right = TreeNode(9)\n\nprint(r2.val, r8.val, s.lowestCommonAncestor(root, r2, r8).val)\nprint(r2.val, r4.val, s.lowestCommonAncestor(root, r2, r4).val)\n\n","sub_path":"lowest_common_ancestor_of_a_binary_search_tree.py","file_name":"lowest_common_ancestor_of_a_binary_search_tree.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244360794","text":"#!/usr/bin/env python3\n# SPDX-License-Identifier: MIT\n# Electrc Rocker LED animation\n# Author Stephen Hemminger \n\nimport signal\nimport time\nfrom math import floor\n\nfrom ant.core import driver\nfrom ant.core.node import Node, Network, ChannelID\nfrom ant.core.constants import NETWORK_KEY_ANT_PLUS, NETWORK_NUMBER_PUBLIC, TIMEOUT_NEVER\nfrom ant.core.exceptions import DriverError\nfrom ant.plus.power import *\n\nfrom rpi_ws281x import PixelStrip, Color\n\n# Rider configuration\nFTP = 250\n\n# Zwift power zone color map\nCOLORMAP = [\n {\n \"base\": 0,\n \"rgb\": [0, 0, 0]\n }, # Black\n {\n \"base\": 1,\n \"rgb\": [64, 64, 64]\n }, # White\n {\n \"base\": 60,\n \"rgb\": [0, 0, 127]\n }, # Blue\n {\n \"base\": 76,\n \"rgb\": [0, 127, 0]\n }, # Green\n {\n \"base\": 90,\n \"rgb\": [127, 127, 0]\n }, # Yellow\n {\n \"base\": 105,\n \"rgb\": [127, 63, 0]\n }, # Orange\n {\n \"base\": 119,\n \"rgb\": [255, 0, 0]\n }, # Red\n]\n\n# LED strip configuration:\nLED_COUNT = 120 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n\n# Define functions which animate LEDs in various ways.\ndef colorWipe(strip, color, wait_ms=50):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n\n\ndef theaterChase(strip, color, wait_ms=50, iterations=10):\n \"\"\"Movie theater light style chaser animation.\"\"\"\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i + q, color)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i + q, 0)\n\n\n# convert power value to zone color\ndef zone_color(power):\n percent = (100. * power) / FTP\n for z in COLORMAP:\n b = z['base']\n if percent < b:\n break\n rgb = z['rgb']\n return Color(*rgb)\n\n\nclass PowerMeter(BicyclePower):\n def __init__(self, node, network):\n super(PowerMeter, self).__init__(\n node,\n network,\n callbacks={\n 'onPowerData': self.power_data,\n })\n self.previous_count = None\n self.previous_power = None\n self.power = None\n self.cadence = None\n\n def __str__(self):\n print('(power={}, cadence={})'.format(self.power, self.cadence))\n\n def power_data(self, count, _differ, _ratio, cadence, apower, ipower):\n if cadence is not None:\n self.cadence = cadence\n if self.previous_count is None:\n self.power = ipower\n else:\n # use accumulated power to bridge gaps\n events = self.wrapDifference(count, self.previous_count, 256)\n if events == 0:\n return\n total = self.wrapDifference(apower, self.previous_power, 65536)\n self.power = total / events\n self.previous_power = apower\n self.previous_count = count\n\n def get(self):\n return (self.power, self.cadence)\n\n\ndef sigterm_handler(_signo, _stack_frame):\n # Raises SystemExit(0):\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n # Create NeoPixel object with appropriate configuration.\n strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT,\n LED_BRIGHTNESS, LED_CHANNEL)\n # Intialize the library (must be called once before other functions).\n strip.begin()\n\n # Initial animation\n colorWipe(strip, Color(127, 0, 0), wait_ms=5) # Red wipe\n\n # Configure ANT\n print('Configure..')\n device = driver.USB2Driver(idVendor=0x0fcf, idProduct=0x1009)\n\n print('Starting...')\n antnode = Node(device)\n antnode.start()\n\n # Ready animation\n colorWipe(strip, Color(127, 127, 0), wait_ms=5) # Yellow wipe\n\n network = Network(key=NETWORK_KEY_ANT_PLUS, name='N:ANT+')\n antnode.setNetworkKey(NETWORK_NUMBER_PUBLIC, network)\n print('Ant...')\n\n powermeter = PowerMeter(antnode, network)\n powermeter.open(searchTimeout=TIMEOUT_NEVER)\n print('Powermeter...')\n\n signal.signal(signal.SIGTERM, sigterm_handler)\n colorWipe(strip, Color(0, 0, 127), wait_ms=5) # Green wipe\n\n try:\n avg_power = 0\n while True:\n # read powermeter every second\n (power, cadence) = powermeter.get()\n if power is None:\n time.sleep(5)\n continue\n\n avg_power = (power + 3 * avg_power) / 4\n color = zone_color(avg_power)\n\n if cadence is None:\n # solid color if no cadence\n colorWipe(strip, color, wait_ms=1000 / LED_COUNT)\n elif cadence == 0:\n # sleep if not pedaling\n colorWipe(strip, Color(0, 0, 0), wait_ms=10)\n time.sleep(5)\n else:\n # default animation of 50ms == cadence 200\n delay_ms = 10000 / cadence\n repeats = floor(1000 / delay_ms)\n theaterChase(\n strip, color, wait_ms=delay_ms, iterations=repeats)\n finally:\n colorWipe(strip, Color(0, 0, 0), wait_ms=10)\n powermeter.close()\n antnode.stop()\n","sub_path":"electric-rocker.py","file_name":"electric-rocker.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"593742027","text":"#!/usr/bin/env python\n# ***********************************************************************************************************\n#\n# Starfish Storage Corporation (\"COMPANY\") CONFIDENTIAL\n# Unpublished Copyright (c) 2011-2017 Starfish Storage Corporation, All Rights Reserved.\n#\n# NOTICE: All information contained herein is, and remains the property of COMPANY. The intellectual and\n# technical concepts contained herein are proprietary to COMPANY and may be covered by U.S. and Foreign\n# Patents, patents in process, and are protected by trade secret or copyright law. Dissemination of this\n# information or reproduction of this material is strictly forbidden unless prior written permission is\n# obtained from COMPANY. Access to the source code contained herein is hereby forbidden to anyone except\n# current COMPANY employees, managers or contractors who have executed Confidentiality and Non-disclosure\n# agreements explicitly covering such access.\n#\n# ANY REPRODUCTION, COPYING, MODIFICATION, DISTRIBUTION, PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR\n# THROUGH USE OF THIS SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS STRICTLY PROHIBITED,\n# AND IN VIOLATION OF APPLICABLE LAWS AND INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE\n# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS TO REPRODUCE, DISCLOSE OR DISTRIBUTE\n# ITS CONTENTS, OR TO MANUFACTURE, USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.\n#\n# FOR U.S. GOVERNMENT CUSTOMERS REGARDING THIS DOCUMENTATION/SOFTWARE\n# These notices shall be marked on any reproduction of this data, in whole or in part.\n# NOTICE: Notwithstanding any other lease or license that may pertain to, or accompany the delivery of,\n# this computer software, the rights of the Government regarding its use, reproduction and disclosure are\n# as set forth in Section 52.227-19 of the FARS Computer Software-Restricted Rights clause.\n# RESTRICTED RIGHTS NOTICE: Use, duplication, or disclosure by the Government is subject to the\n# restrictions as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and Computer\n# Software clause at DFARS 52.227-7013.\n#\n# ***********************************************************************************************************\n# -*- coding: utf-8 -*-\n\"\"\"\n Part of the distribution\n# Event Handler Class\n#\n# symbolic link events are ignored\n# handle create, hardlink,rename, remove\n#\n# events handled\n#\n# POSTCREATE\n# POSTREMOVE\n# POSTRENAME\n# POSTLINK\n#\n# ATTRIBUTE (disabled by default)\n# POSTPERMCHANGE\n#\n\"\"\"\n\nfrom itertools import filterfalse\nimport logging\nimport os\nimport pprint # for detailed logging\nimport stat\nimport subprocess as sp\nimport sys\nfrom time import asctime,ctime,time,sleep\n\nfrom sf_em_common.eventCollector import EventCollector\nfrom sf_em_common.utils import _index_to_substring,partition\n\nfrom sf_gpfs.eventProcessor import EventProcessorService\nfrom sf_gpfs.make_entries import sf_entry_format,sf_heartbeat_entry\n\nfrom sfutils.encoding import is_utf8\nfrom sfutils.scan_entry import ScanEntry\nEvent = ScanEntry.Event\n\ndef unwrap_EMS_pe(arg, **kwarg):\n return EventMonitorService.process_events(*arg, **kwarg)\n\nclass EventMonitorService(EventProcessorService):\n _event_logger=logging.getLogger(__name__)\n def __init__(self, args):\n \"\"\"\n args.dbpack is basename of DB msgpack file for persistent data relevant to managing\n inode-to-path/parent_inode DBs\n args.dbpack must be located in /data\n args.dbpack must include the volume name in the string to allow multiple monitors to run\n (tracking different file systems)\n \"\"\"\n super(EventMonitorService,self).__init__(args)\n self.eventOpts = \"\".join( ['-'] + args.eventOpts)\n self.args = args\n\n \"\"\"\n self.delay_min min number of seconds to wait for a full chunk\n self.delay_max max number of seconds to wait for a full chunk\n self.pause_between_reads total pause in seconds - default\n self.delay number of seconds tested to wait for a full chunk\n \"\"\"\n self.eventgroup_msgsize = args.event_chunk_size # max of 10000 events packed up and sent at a time - about 85 KB packed\n# not used for now - GPFS event buffer can only provide 32KB per call, about 160 events, so the chunksize is the important parameter\n self.eventBufferSz = args.event_buffer \n if self.args.debug:\n self.delay_max = 1\n self.delay_min = 0.01\n self.pause_between_reads = 0\n self.delay = 1\n else:\n self.delay_min = 1\n self.delay_max = 16\n self.pause_between_reads = 5\n self.delay = 15\n if self.args.event_debug:\n self.events_file = os.path.join(self.logdir, \"RunSerialEvents.log\")\n with open(self.events_file,'w') as fd:\n pass\n# from EventProcessorService - REFACTOR: eventually needs to be called from processes to allow multiprocessing\n self.mp_init()\n\n#---------------------------------------------------------------------------------------------------\n#---------------------------------------------------------------------------------------------------\n def group_events(self):\n \"\"\"\n Buffer to collect chunks of self.eventgroup_msgsize events at a time for processing\n Because the print_event_cmd may return a non-utf8 filename as part of the event string,\n process the string as an immutable array of bytes, not a unicode string\n\n events that need further parsing/processing (events,event_chunk) are returned as str (unicode)\n events that have non-utf8 filenames are logged as warnings (non-utf8_event_chunk)\n\n This is the only place where the event Monitor needs to test for utf8-compatible filenames\n\n \"\"\"\n\n events = []\n chunksize = 0\n print_event_cmd = [self.printeventbin,self.eventOpts,self.filesystem]\n otimestamp = time()\n ntimestamp = time()\n deltatimestamp = ntimestamp - otimestamp\n # exit loop if the chunk is large enough or the delay is long enough\n while chunksize < self.eventgroup_msgsize and deltatimestamp < self.delay:\n out = sp.check_output(print_event_cmd)\n procL = out.split(b'===End_Message===\\n')\n del procL[-1] #last End_Message splits off an empty string\n\n \"\"\"\n REFACTOR: \n non_utf8_event_chunk,event_chunk = partition(is_utf8,procL)\n \"\"\"\n event_chunk = list(map(bytes.decode,list(filter(is_utf8,procL))))\n non_utf8_event_chunk = list(filterfalse(is_utf8,procL))\n\n for idx,event in enumerate(non_utf8_event_chunk):\n if idx == 0:\n EventMonitorService._event_logger.warning('The following events have non-utf8 compatible filenames:')\n EventMonitorService._event_logger.warning('{}: {}'.format(idx,event))\n if len(event_chunk) == 0:\n self.delay = min( 2*self.delay, self.delay_max)\n EventMonitorService._event_logger.info('empty chunk delay {} secs in group events {}'.format(self.delay,ctime().split()[3]))\n sleep(self.delay)\n else:\n self.delay /= 2\n self.delay = max( 2*self.delay, self.delay_min)\n EventMonitorService._event_logger.info('chunk size {} delay {} secs in group events {}'.format(len(event_chunk),self.delay,ctime().split()[3]))\n EventMonitorService._event_logger.info('event_chunk {} '.format(event_chunk))\n sleep(self.delay)\n\n events += event_chunk\n chunksize = len(events)\n ntimestamp = time()\n deltatimestamp = ntimestamp - otimestamp\n event_count = len(events)\n msg = 'Grouped %d events' % event_count\n EventMonitorService._event_logger.info(msg)\n return (events)\n\n#---------------------------------------------------------------------------------------------------\n# lots of OO design opportunities here\n def run(self):\n \"\"\"\n lots of OO design opportunities here\n if an object handle is already in attSet for an event type for this group\n (e.g. multiple 'atime's), then attribute event could be skipped (only need latest one)\n\n could sort eventlist before doing inode scans - e.g. first by inode#, then by sequence number\n the trick here is that it's possible that a directory could be processed before a file in it\n and that would throw the order of ops off. But it would be nice to know all activity for a file before\n uploading. Perhaps after the sort and the operations on the list (e.g. keeping last attr event) the original\n order could be restored\n\n future enhancement - can use timestamp on when eSet,attSet are first created to\n keep track of how frequently events occur and when associated file info should be passed on to catalog\n there is no signal for when events are added, so just check occaisionally\n\n Process, then wait a few seconds for next attempt\n don't bother to process events until there is enough work - at least one chunk\n self.delayread() should do machine learning so that at least one chunk, and hopefully many,\n (self.eventgroup_msgsize) can be accumulated before calling self.process_events()\n \"\"\"\n event_collector = EventCollector(self.args)\n EventMonitorService._event_logger=logging.getLogger(__name__)\n EventMonitorService._event_logger.info('ENGINE Bus STARTED') # Magic info that the service really started\n# num_consumers = min(len(self.filelist),mp.cpu_count() - 2)\n# event_collector.active_num_consumers = num_consumers\n# logger_prefix = 'sf-gpfs.EventMonitor.' + self.args.vol\n# pool = mp.Pool(processes=1,initializer=EventMonitorService._event_logger.process_logger,initargs=('sf-lustre',self.args,'sf-lustre'))\n\n otimestamp = time()\n etupleHeaders = ['eventD','fullpath','statD','xattr']\n msg = 'Starting event monitor'\n print(msg)\n EventMonitorService._event_logger.info(msg)\n while True:\n ntimestamp = time()\n self.delayread(otimestamp,ntimestamp)\n otimestamp = ntimestamp\n\n \"\"\"\n group_events returns a list of raw event strings\n parse_events returns a list of event dictionaries\n process_events returns a list of event tuples\n number of events is roughly self.eventgroup_msgsize\n \"\"\"\n events = self.group_events()\n if len(events) == 0:\n sf_entryL = sf_heartbeat_entry()\n event_collector.upload_entries(sf_entryL)\n msg = (asctime() + '\\tUpload Heartbeat')\n EventMonitorService._event_logger.info(msg)\n else:\n # REFACTOR: These are all monads - need to make sure that each list is either empty or has valid data struct \n # can't have list of empty tuples (eventTupleL)\n eventDictL = self.parse_events(events)\n eventTupleL = self.process_events(eventDictL)\n sf_entryL = sf_entry_format(eventTupleL)\n if self.args.event_debug:\n with open(self.events_file,'a+') as fd:\n print('-'*40,'Raw Event Group','-'*40,file=fd)\n for line in events: print(line,file=fd)\n print('-'*40,'Event Entry Group','-'*40,file=fd)\n for entry in sf_entryL: print(entry,file=fd)\n print('-'*100,file=fd)\n eventgroup_count = len(sf_entryL)\n self.queue_event_count += eventgroup_count\n msg = ('Commit DBs')\n EventMonitorService._event_logger.info(msg)\n self.inodeDBset.commitDBs()\n event_collector.upload_entries(sf_entryL)\n msg = ('uploading ' + str(eventgroup_count) + ' entries: ' + ctime().split()[3])\n EventMonitorService._event_logger.info(msg)\n for entry in sf_entryL:\n EventMonitorService._event_logger.info(pprint.pformat(entry))\n lastentry = sf_entryL[-1] if len(sf_entryL) > 0 else []\n msg = (asctime() + \"\\t\" + str(eventgroup_count) + ' Events processed through ' + str(lastentry))\n EventMonitorService._event_logger.info(msg)\n\n#---------------------------------------------------------------------------------------------------\n def openScanLog(self):\n scanfileS = \".\".join(['GPFS',self.volume,'event_scanner.lst'])\n self.scanLogS = os.path.join(self.datadir, scanfileS)\n print (\"Opening event scanner log file {}\".format(self.scanLogS))\n try:\n self.scanLog = open(self.scanLogS,'a+')\n except IOError:\n print(\"Error when opening ScanLog\", file=sys.stderr)\n sys.exit(2)\n\n# REFACTOR:\n# This may never be usable - scan is only closed at exit\n def closeScanLog(self):\n print (\"Closing event scanner log file {}\".format(self.scanLogS))\n try:\n self.scanLog.flush()\n self.scanLog.close()\n except IOError:\n print(\"Error when syncing and closing ScanLog\", file=sys.stderr)\n sys.exit(2)\n\n#\n def run_raw(self):\n \"\"\"\n dump raw events\n there is no upload to SF DB\n \"\"\"\n EventMonitorService._event_logger=logging.getLogger(__name__)\n EventMonitorService._event_logger.info('ENGINE Bus STARTED') # Magic info that the service really started\n self.openScanLog()\n\n otimestamp = time()\n etupleHeaders = ['eventD','fullpath','statD','xattr']\n msg = 'Starting event monitor dump'\n print(msg)\n EventMonitorService._event_logger.info(msg)\n while True:\n ntimestamp = time()\n self.delayread(otimestamp,ntimestamp)\n otimestamp = ntimestamp\n\n events = self.group_events()\n eventgroup_count = len(events)\n self.queue_event_count += eventgroup_count\n for event in events:\n print(event.strip(),file=self.scanLog)\n self.scanLog.flush()\n\n\n#----------------------Class end -------------------------------------------------------------------\n","sub_path":"sf-gpfs/gpfsmonitor/src/sf_gpfs/eventMonitor.py","file_name":"eventMonitor.py","file_ext":"py","file_size_in_byte":14888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584126054","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport time\n\nimport epa.logging\n\n\nlogger = epa.logging.get_logger(__name__)\n\n\ndef main_func(coordinator, process_name=\"\", description=\"\"):\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\"config_files\", help=\"config files\", nargs='+')\n args = parser.parse_args()\n\n logger.info(f\"Start {process_name}\")\n\n threads = []\n for config in args.config_files:\n t = coordinator(config_file=config)\n t.start()\n threads.append(t)\n time.sleep(5)\n\n logger.info(f\"{len(threads)} {coordinator.__name__} created.\")\n\n try:\n for t in threads:\n t.join()\n except KeyboardInterrupt:\n for t in threads:\n t.terminated = True\n t.stop_event.set()\n\n logger.warning(\"Oop !!!\")\n","sub_path":"iot_data/iot_toolkit/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492075198","text":"import mmh3\n\n\nclass MinHashSignatureBuilder:\n def __init__(self, columns, rows):\n self.columns = columns\n self.rows = rows\n\n def __call__(self, features):\n return [\n min(mmh3.hash(feature, column) % self.rows for feature in features)\n for column in range(self.columns)\n ]\n","sub_path":"src/sentry/similarity/signatures.py","file_name":"signatures.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"282381034","text":"\n# coding: utf-8\n\n# In[71]:\n\n\nimport keras as k\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nimport random\n\n\n# In[82]:\n\n\nimport numpy as np\n\nn_samples = 1000000\nn_partitions = 10\n\n# Generate tasks and partitions\npartition_data = np.random.random((n_samples, n_partitions)) # partition data generation\n\ndata = np.zeros((n_samples, n_partitions)) # initialize input layer\nlabels = np.zeros((n_samples, n_partitions)) # initialize outputs layer for training \n\ntask_data = np.zeros((n_samples, 1)) # initialize task list\n\nfor i in range (0, n_samples):\n \n partitions = partition_data[i]\n task = random.uniform(0, partitions.max())\n task_data[i] = task\n \n best_partition = -1\n best_fit = 999999999\n \n for j in range (0, n_partitions):\n current_fit = partitions[j] - task\n data[i,j] = current_fit\n if current_fit > 0 and current_fit < best_fit:\n best_fit = current_fit\n best_partition = j\n \n labels[i][best_partition] = 1\n\n\n# In[73]:\n\n\nprint(data)\nprint(labels)\n\n\n# In[86]:\n\n\nimport tensorflow as tf\n\n# Construct neural network\nn_nodes = 32\n\nmodel = Sequential()\nmodel.add(Dense(n_nodes, input_dim=n_partitions, activation='relu'))\nmodel.add(Dense(n_nodes, activation='relu'))\nmodel.add(Dense(n_partitions, activation='softmax'))\n\n# Configure a model for categorical classification. from https://www.tensorflow.org/guide/keras#train_and_evaluate\nmodel.compile(optimizer=tf.train.RMSPropOptimizer(0.008),\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_accuracy])\n\n\n# In[87]:\n\n\nmodel.fit(data, labels, epochs=100, batch_size=256)\n\n","sub_path":"Simple Classifer/SimpleKerasClassifer.py","file_name":"SimpleKerasClassifer.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243083019","text":"\ndef f(string_word):\n output_string = \"\"\n for i in range(1, len(string_word)+1):\n output_string += string_word[-i]\n return output_string\n\nword = \"flipped class room is important\"\nw = word.split()\n#print(w)\ninvert_string = []\nfor i in w:\n invert_string.append(f(i))\n\n#print(invert_string)\ninvert_output = \" \".join(invert_string)\nprint(invert_output)\n","sub_path":"t1b.py","file_name":"t1b.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"174277868","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\nimport copy\nimport mock\n\nfrom octaviaclient.osc.v2 import member\nfrom octaviaclient.tests.unit.osc.v2 import fakes as mem_fakes\nfrom osc_lib.tests.utils import ParserException\n\n\nclass TestMember(mem_fakes.TestLoadBalancerv2):\n\n mem = mem_fakes.FakeMember.create_member()\n\n columns = (\n 'id',\n 'name',\n 'project_id',\n 'provisioning_status',\n 'address',\n 'protocol_port',\n 'operating_status',\n 'weight'\n )\n\n datalist = (\n (\n mem.id,\n mem.name,\n mem.project_id,\n mem.provisioning_status,\n mem.address,\n mem.protocol_port,\n mem.operating_status,\n mem.weight\n ),\n )\n\n info = {'members': [{\n 'id': mem.id,\n 'name': mem.name,\n 'project_id': mem.project_id,\n 'provisioning_status': mem.provisioning_status,\n 'address': mem.address,\n 'protocol_port': mem.protocol_port,\n 'operating_status': mem.operating_status,\n 'weight': mem.weight,\n 'pool_id': mem.pool_id}]\n }\n\n mem_info = copy.deepcopy(info)\n\n def setUp(self):\n super(TestMember, self).setUp()\n self.mem_mock = self.app.client_manager.load_balancer.load_balancers\n self.mem_mock.reset_mock()\n\n self.api_mock = mock.Mock()\n self.api_mock.member_list.return_value = self.mem_info\n lb_client = self.app.client_manager\n lb_client.load_balancer = self.api_mock\n lb_client.neutronclient = mock.MagicMock()\n\n\nclass TestListMember(TestMember):\n\n def setUp(self):\n super(TestListMember, self).setUp()\n self.cmd = member.ListMember(self.app, None)\n\n def test_member_list_no_options(self):\n arglist = []\n verifylist = []\n\n self.assertRaises(ParserException,\n self.check_parser, self.cmd, arglist, verifylist)\n\n @mock.patch('octaviaclient.osc.v2.utils.get_member_attrs')\n def test_member_list(self, mock_attrs):\n mock_attrs.return_value = {'pool_id': 'pool_id',\n 'project_id': self.mem.project_id}\n arglist = ['pool_id']\n verifylist = []\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n columns, data = self.cmd.take_action(parsed_args)\n\n self.api_mock.member_list.assert_called_once_with(pool_id='pool_id')\n self.assertEqual(self.columns, columns)\n self.assertEqual(self.datalist, tuple(data))\n\n\nclass TestCreateMember(TestMember):\n\n def setUp(self):\n super(TestCreateMember, self).setUp()\n self.cmd = member.CreateMember(self.app, None)\n self.api_mock.member_create.return_value = {\n 'member': self.mem_info}\n\n @mock.patch('octaviaclient.osc.v2.utils.get_member_attrs')\n def test_member_create(self, mock_attrs):\n mock_attrs.return_value = {\n 'ip_address': '192.0.2.122',\n 'protocol_port': self.mem.protocol_port,\n 'weight': self.mem.weight,\n 'admin_state_up': True,\n 'pool_id': self.mem.pool_id}\n\n arglist = ['pool_id', '--address', '192.0.2.122',\n '--protocol-port', '80',\n '--weight', '1', '--enable']\n verifylist = [\n ('address', '192.0.2.122'),\n ('protocol_port', 80),\n ('weight', 1)\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self.cmd.take_action(parsed_args)\n self.api_mock.member_create.assert_called_with(\n pool_id=self.mem.pool_id, json={\n 'member': {'ip_address': '192.0.2.122',\n 'protocol_port': self.mem.protocol_port,\n 'weight': self.mem.weight,\n 'admin_state_up': True}})\n\n\nclass TestMemberDelete(TestMember):\n\n def setUp(self):\n super(TestMemberDelete, self).setUp()\n self.cmd = member.DeleteMember(self.app, None)\n\n @mock.patch('octaviaclient.osc.v2.utils.get_member_attrs')\n def test_member_delete(self, mock_attrs):\n mock_attrs.return_value = {'pool_id': 'test_pool_id',\n 'member_id': 'test_mem_id'}\n arglist = ['test_pool_id', 'test_mem_id']\n verifylist = []\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self.cmd.take_action(parsed_args)\n self.api_mock.member_delete.assert_called_with(\n pool_id='test_pool_id', member_id='test_mem_id')\n\n\nclass TestMemberSet(TestMember):\n\n def setUp(self):\n super(TestMemberSet, self).setUp()\n self.cmd = member.SetMember(self.app, None)\n\n @mock.patch('octaviaclient.osc.v2.utils.get_member_attrs')\n def test_member_set(self, mock_attrs):\n mock_attrs.return_value = {'pool_id': 'test_pool_id',\n 'member_id': 'test_mem_id',\n 'name': 'new_name'}\n arglist = ['test_pool_id', 'test_mem_id', '--name',\n 'new_name']\n verifylist = [\n ('pool', 'test_pool_id'),\n ('member', 'test_mem_id'),\n ('name', 'new_name')\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self.cmd.take_action(parsed_args)\n self.api_mock.member_set.assert_called_with(\n pool_id='test_pool_id', member_id='test_mem_id',\n json={'member': {'name': 'new_name'}})\n\n\nclass TestMemberShow(TestMember):\n\n def setUp(self):\n super(TestMemberShow, self).setUp()\n self.api_mock = mock.Mock()\n self.api_mock.member_list.return_value = self.mem_info\n self.api_mock.member_show.return_value = {\n 'member': self.mem_info['members'][0]}\n lb_client = self.app.client_manager\n lb_client.load_balancer = self.api_mock\n\n self.cmd = member.ShowMember(self.app, None)\n\n @mock.patch('octaviaclient.osc.v2.utils.get_member_attrs')\n def test_member_show(self, mock_attrs):\n mock_attrs.return_value = {'member_id': self.mem.id,\n 'pool_id': self.mem.pool_id}\n arglist = [self.mem.pool_id, self.mem.id]\n verifylist = [\n ('pool', self.mem.pool_id),\n ('member', self.mem.id)\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self.cmd.take_action(parsed_args)\n self.api_mock.member_show.assert_called_with(\n member_id=self.mem.id,\n pool_id=self.mem.pool_id\n )\n","sub_path":"octaviaclient/tests/unit/osc/v2/test_member.py","file_name":"test_member.py","file_ext":"py","file_size_in_byte":7163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212634950","text":"import os\nimport math\nimport errno\nimport msvcrt\nimport os.path\nimport itertools\nfrom os import listdir\nfrom os.path import isfile, join\nimport os.path, time\n\nRUN = False\nDATE_INDEX = 8;\n\n# define list of all the month's\nmonths_list = [(\"Jan\", \"01\"), (\"Feb\", \"02\"), (\"Mar\", \"03\"), \\\n\t\t\t\t(\"Apr\", \"04\"), (\"May\", \"05\"), (\"Jun\", \"06\"),\\\n\t\t\t\t(\"Jul\", \"07\"), (\"Aug\", \"08\"), (\"Sep\", \"09\"),\\\n\t\t\t\t(\"Oct\", \"10\"), (\"Nov\", \"11\"), (\"Dec\", \"12\")];\n\nfile_extension_include = [\".jpg\", \".jpeg\", \".PNG\", \".gif\", \".tif\", \".tiff\", \".CR2\", \".ARW\"];\n\ndef include_suffix(file_extension):\n\t# for extension_include in map(lambda x:x.lower(), file_extension_include):\n\tfor extension_include in file_extension_include:\n\t\tif (extension_include == file_extension):\n\t\t\treturn True;\n\treturn False;\n\ndef check_dir_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n# given a path reame all the files inside\ndef rename_method(PATH):\n\tos.chdir(PATH);\n\n\t# find all files in the directory\n\tonly_files = [file for file in listdir(PATH) if isfile(join(PATH,file))];\n\n\tfor file in only_files:\n\n\t\topend_file = os.open(file, os.O_RDONLY);\n\t\ttime_string = time.ctime(os.stat(opend_file)[DATE_INDEX]);\n\t\tos.close(opend_file);\n\t\t\n\t\t# change the month from a name to a number \n\t\tfor month in months_list:\n\t\t\ttime_string = time_string.replace(str(month[0]), str(month[1]));\n\n\t\t# parsing the date in time_string\n\t\tmonth = time_string[4:6];\n\n\t\t# get the name and suffix of the file\n\t\t_ , file_extension = os.path.splitext(file);\n\t\tprint(\"BEFORE\\n\");\n\n\t\t# exclude all files except with suffix from file_extension_include \n\t\tif (not include_suffix(file_extension.lower())): continue;\n\t\tprint(\"AFTER\\n\");\n\n\t\tcheck_dir_exists(PATH + \"\\\\\" + month);\n\t\tos.rename(PATH + \"\\\\\" + file, PATH + \"\\\\\" + month + \"\\\\\" + file);\n\n\t\tprint(\"\\n\");\n\ndef wait_input_to_exit():\n msvcrt.getch()\n\n# ---------------------------------- #\n# -------- edit directories -------- #\n# ---------------------------------- #\n\n# edit_list = [r\"C:\\Users\\Itamar Katz\\Desktop\\New folder\"]\n\nedit_list = [r\"C:\\Users\\Itamar Katz\\Desktop\\TEST\"]\n# edit_list = [r\"H:\\DCIM\\102MSDCF\"]\n\n# --------------------------------------------- #\n# -------- safty net - take off to run -------- #\n# --------------------------------------------- #\n\n# RUN = True\n\n# works with all files exept .xmp\ndef run(edit_list):\n\tfor item in edit_list:\n\t\ttry:\n\t\t\trename_method(item);\n\t\t\tprint (\"Done Running - \" + item);\n\t\texcept Exception as e: \n\t\t\tprint(\"An exception was thrown:\");\n\t\t\tprint(e);\n\t\t\tcontinue;\n\treturn;\n\nprint(\"Starting Program\\n\");\n\nif RUN == True:\n\trun(edit_list);\n\tprint(\"Program has Finished Correctly\");\nelse:\n\tprint(\"Program did not run. RUN = False\");\n\nprint(\"\\nPlease press any key to exit\");\nwait_input_to_exit();\n\n\n","sub_path":"create dir by month date.py","file_name":"create dir by month date.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39319901","text":"import argparse\nimport sys\nfrom encoder import *\nfrom decoder import *\n\n\n# Straight-forward check that there are no two identical windows of length k in the sequence, for testing purposes\ndef validate_no_identical_windows(w, k):\n seen_windows = set()\n for i in range(len(w) - k + 1):\n hash_w = str(w[i:i + k])\n if hash_w in seen_windows:\n return False\n else:\n seen_windows.add(hash_w)\n return True\n\n\ndef run_action(w: List, q, action, redundancy, complexity_mode, verbose_mode, test_mode, comma_mode):\n n = len(w) + redundancy if action == \"encode\" else len(w)\n orig_w = w.copy()\n log_n = ceil(log(n, q))\n k = 2 * log_n + 2\n if verbose_mode:\n print('n =', n)\n print('q =', q)\n print('log_n =', log_n)\n print('k =', k)\n print('w =', w)\n\n res_word = Encoder(complexity_mode, redundancy, verbose_mode, q).input(w).encode().output() if \\\n action == \"encode\" else Decoder(redundancy, verbose_mode, q).input(w).decode().output()\n\n if verbose_mode:\n print(\"output = \", end=\"\")\n if comma_mode:\n print(str(res_word)[1:-1].replace(\" \", \"\"))\n else:\n print(\"\".join(map(str, res_word)))\n\n if test_mode:\n if action == \"encode\":\n if validate_no_identical_windows(res_word, k):\n if orig_w == Decoder(redundancy, verbose_mode, q).input(res_word).decode().output():\n print('TEST SUCCESS')\n return True\n else:\n print('TEST FAILED!')\n print('Decode(Encode(w)) != w')\n return False\n else:\n print('TEST FAILED!')\n print('Result is not repeat-free')\n return False\n elif action == \"decode\":\n if orig_w == Encoder(complexity_mode, redundancy, verbose_mode, q).input(res_word).encode().output():\n print('TEST SUCCESS')\n return True\n else:\n print('TEST FAILED!')\n print('Encode(Decode(w)) != w')\n return False\n return True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"./main\")\n parser.add_argument(\"action\", help=\"{encode, decode}\")\n parser.add_argument(\"sequence\", nargs=\"?\", help=\"a q-ary word\")\n parser.add_argument(\"-i\", \"--input\", help=\"get word from standard input\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--redundancy\", type=int, choices=[1, 2],\n help=\"how many redundancy bits to use\", default=1)\n parser.add_argument(\"-q\", type=int, help=\"alphabet's size\", default=2)\n parser.add_argument(\"-c\", \"--complexity\", choices=[\"time\", \"space\"],\n help=\"save time or space complexity\", default=\"time\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", action=\"store_true\")\n parser.add_argument(\"-t\", \"--test\", help=\"test for output correctness\", action=\"store_true\")\n args = parser.parse_args()\n\n if args.sequence is None:\n if args.input: # get word from standard input\n args.sequence = input()\n else:\n print(\"You must enter a word either from the command line or via standard input\", file=sys.stderr)\n exit(1)\n\n if args.q != 2:\n if \",\" not in args.sequence:\n print(args.q)\n print(\"You must use ',' as a delimiter when using q != 2 flag\", file=sys.stderr)\n exit(1)\n\n is_comma = False\n if \",\" in args.sequence:\n is_comma = True\n args.sequence = args.sequence.replace(\" \", \"\").split(\",\")\n args.sequence = [int(x) for x in list(args.sequence)]\n\n run_action(args.sequence, args.q, args.action, args.redundancy, args.complexity, args.verbose, args.test, is_comma)\n\n# region Anecdotes\n\n# Before inlining 'identical', profiling shows:\n# When n=256, number_of_tests=512, the method 'identical' is called 142M times (~ 2^27), and the program takes 109sec.\n# So for one test, on average, it is called 2^(27-9)=2^18 times. Since n=2^8, we expected a lot more times...\n# After inlining 'identical', profiling shows:\n# Now it takes 78sec (diff=31sec). This means that one test on average takes around 150msec.\n# According to Competitive Programming, 100M operations happen in 1 sec, so here, we have 15M operations.\n# Since n=2^8, we expected 2^(8*3+2*2)=2^28 which is roughly 256M operations...\n\n# endregion\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288455855","text":"# 248. Count of Smaller Number\n# Give you an integer array (index from 0 to n-1, where n is the size of this array, value from 0 to 10000) and an query list. For each query, give you an integer, return the number of element in the array that are smaller than the given integer.\n#\n# Example\n# For array [1,2,7,8,5], and queries [1,8,5], return [0,4,2]\n#\n# Challenge\n# Could you use three ways to do it.\n#\n# Just loop\n# Sort and binary search\n# Build Segment Tree and Search.\n\n\nclass Solution:\n \"\"\"\n @param A: An integer array\n @param queries: The query list\n @return: The number of element in the array that are smaller that the given integer\n \"\"\"\n\n def countOfSmallerNumber(self, A, queries):\n # write your code here\n \"build a segment tree[0,10000]. node contain value of count that are between start and end. set it to 0 when build it\"\n\n \"then use sgement tree modify to update count of each node, time is O(nlog(10000))\"\n\n root = MySTN(0, 10000, 0).build(-1, 10000)\n\n for i in range(0, len(A)):\n \"there might be duplicates in A, so we need to increase the count\"\n root.modify(A[i], root.query(A[i], A[i]) + 1)\n\n result = []\n\n for i in range(0, len(queries)):\n \"we are looking for smaller than target count\"\n result.append(root.query(0, queries[i] - 1))\n\n return result\n\n\nclass MySTN:\n def __init__(self, start, end, count):\n self.start = start\n self.end = end\n self.count = count\n self.left = None\n self.right = None\n\n def build(self, start, end):\n \"build a segment tree with node value 0\"\n\n if start == end:\n return MySTN(start, end, 0)\n\n mid = (start + end) // 2\n left = self.build(start, mid)\n right = self.build(mid + 1, end)\n root = MySTN(start, end, 0)\n root.left = left\n root.right = right\n return root\n\n def query(self, start, end):\n \"return the count of specific segment\"\n if self.start == start and self.end == end:\n return self.count\n\n mid = (self.start + self.end) // 2\n if end <= mid:\n \"in left side\"\n return self.left.query(start, end)\n elif start > mid:\n \"in right side\"\n return self.right.query(start, end)\n else:\n \"partial left and partial right\"\n return self.left.query(start, mid) + self.right.query(mid + 1, end)\n\n def modify(self, index, value):\n \"modify specific index to specific value. need to update all the nodes along the way that may be affected\"\n\n if self.start == index and self.end == index:\n self.count = value\n return\n\n mid = (self.start + self.end) // 2\n if index <= mid:\n \"in left side\"\n self.left.modify(index, value)\n elif index > mid:\n \"in right side\"\n self.right.modify(index, value)\n\n \"need to update count of current node after lefr/child is updated\"\n self.count = self.left.count + self.right.count\n\n\nassert Solution().countOfSmallerNumber([1, 2, 3, 4, 5, 6], [1, 2, 3, 4]) == [0, 1, 2, 3]\n","sub_path":"Algorithm/Python/Tree/SegmentTree/CountOfSmallerNumber.py","file_name":"CountOfSmallerNumber.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301946974","text":"class Solution(object):\r\n def isInterleave(self, s1, s2, s3):\r\n \"\"\"\r\n :type s1: str\r\n :type s2: str\r\n :type s3: str\r\n :rtype: bool\r\n \"\"\"\r\n # start coding at 7:27\r\n if max(s1, s2, s3) == 0: return True;\r\n if (len(s1) + len(s2) != len(s3)): return False;\r\n \r\n dp = [[False for j in range(len(s2)+1)] for i in range(len(s1)+1)]\r\n dp[0][0] = True;\r\n for i in range(1, len(s1)+1):\r\n if dp[i-1][0] and s1[i-1] == s3[i-1]: dp[i][0] = True;\r\n else: break;\r\n for j in range(1, len(s2)+1):\r\n if dp[0][j-1] and s2[j-1] == s3[j-1]: dp[0][j] = True;\r\n else: break;\r\n \r\n for i in range(1, len(s1)+1):\r\n for j in range(1, len(s2)+1):\r\n dp[i][j] = (dp[i][j-1] and s2[j-1] == s3[i+j-1]) or (dp[i-1][j] and s1[i-1] == s3[i+j-1]);\r\n return dp[len(s1)][len(s2)];\r\n # pass 7:34 but faile because of a tiny mistake and fix it immedidately and then pass at 7:34","sub_path":"1-100/91-100/python/97_Interleaving_string.py","file_name":"97_Interleaving_string.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206773131","text":"\nclass BinaryNode(object):\n\n def __init__(self):\n self.data = None\n self.left = None\n self.right = None\n\n def is_leaf(self):\n if self.left is None and self.right is None:\n return True\n else:\n return False\n\n def is_internal(self):\n if self.left is not None or self.right is not None:\n return True\n else:\n return False\n","sub_path":"source/binary_node.py","file_name":"binary_node.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429611033","text":"import datetime\nfrom typing import Callable, Optional\n\nimport pandas as pd\nfrom pandas import DataFrame\n\nfrom quant_candles.controllers import ExchangeREST, ExchangeS3, use_s3\nfrom quant_candles.models import Symbol\n\nfrom .api import get_bitmex_api_response\nfrom .base import BitmexRESTMixin, BitmexS3Mixin\nfrom .constants import API_URL, XBTUSD\n\n\ndef bitmex_trades(\n symbol: Symbol,\n timestamp_from: datetime.datetime,\n timestamp_to: datetime.datetime,\n on_data_frame: Callable,\n retry: bool = False,\n verbose: bool = False,\n) -> None:\n \"\"\"Get BitMEX trades.\"\"\"\n if timestamp_to > use_s3():\n BitmexTradesREST(\n symbol,\n timestamp_from=timestamp_from if timestamp_from > use_s3() else use_s3(),\n timestamp_to=timestamp_to,\n on_data_frame=on_data_frame,\n retry=retry,\n verbose=verbose,\n ).main()\n if timestamp_from < use_s3():\n BitmexTradesS3(\n symbol,\n timestamp_from=timestamp_from,\n timestamp_to=timestamp_to if timestamp_to < use_s3() else use_s3(),\n on_data_frame=on_data_frame,\n retry=retry,\n verbose=verbose,\n ).main()\n\n\nclass BitmexTradesREST(BitmexRESTMixin, ExchangeREST):\n \"\"\"BitMEX trades REST.\"\"\"\n\n\nclass BitmexTradesS3(BitmexS3Mixin, ExchangeS3):\n \"\"\"BitMEX trades S3.\"\"\"\n\n def get_data_frame(self, date: datetime.date) -> Optional[DataFrame]:\n \"\"\"Get data_frame.\n\n Downloaded file has multiple symbols. Do nothing before listing date.\n \"\"\"\n base_url = f\"{API_URL}/instrument?symbol={self.symbol.api_symbol}&count=1\"\n\n def get_api_url(*args, **kwargs):\n return base_url\n\n # In the case of XBTUSD, the listing date was previously correct, but has been\n # revised by BitMEX to 2016-05-13.\n if self.symbol.api_symbol == XBTUSD:\n listing_date = datetime.date(2015, 9, 25)\n else:\n data = get_bitmex_api_response(get_api_url, base_url)\n listing_date = pd.to_datetime(data[0][\"listing\"]).date()\n\n # Without this check, empty data frames may be acquired from BitMEX data before\n # the symbol listing date.\n if date >= listing_date:\n return super().get_data_frame(date)\n","sub_path":"quant_candles/exchanges/bitmex/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41566378","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom pathlib import Path\nfrom typing import Generator\n\nimport pytest\nimport torch\nfrom _pytest.fixtures import SubRequest\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom torch.utils.data import ConcatDataset\n\nfrom torchgeo.datasets import SEN12MS\nfrom torchgeo.transforms import Identity\n\n\nclass TestSEN12MS:\n @pytest.fixture(params=[\"train\", \"test\"])\n def dataset(\n self, monkeypatch: Generator[MonkeyPatch, None, None], request: SubRequest\n ) -> SEN12MS:\n md5s = [\n \"7f14be13d3f62c09b4dd5b4d55c97fd6\",\n \"48182d44b375360381f36d432956b225\",\n \"96cf1b8405d4149c6fe61ad7100bd65d\",\n \"ba8e7e10fba9eea6900ddc530c86025a\",\n \"7ba7c51f2fb3a2074b7bbd3e24f9d70d\",\n \"280c9be2d1e13e663824dccd85e1e42f\",\n \"a5284baf48534d4bc77acb1b103ff16c\",\n \"c6b176fed0cdd5033cb1835506e40ee4\",\n \"adc672746b79be4c4edc8b1a564e3ff4\",\n \"194fab4a4e067a0452824c4e39f61b77\",\n \"7899c0c36c884ae8c991ab8518b0d177\",\n \"ccfee543d4351bcc5aa68729e8cc795c\",\n ]\n\n monkeypatch.setattr(SEN12MS, \"md5s\", md5s) # type: ignore[attr-defined]\n root = os.path.join(\"tests\", \"data\", \"sen12ms\")\n split = request.param\n transforms = Identity()\n return SEN12MS(root, split, transforms=transforms, checksum=True)\n\n def test_getitem(self, dataset: SEN12MS) -> None:\n x = dataset[0]\n assert isinstance(x, dict)\n assert isinstance(x[\"image\"], torch.Tensor)\n assert isinstance(x[\"mask\"], torch.Tensor)\n assert x[\"image\"].shape[0] == 15\n\n def test_len(self, dataset: SEN12MS) -> None:\n assert len(dataset) == 8\n\n def test_add(self, dataset: SEN12MS) -> None:\n ds = dataset + dataset\n assert isinstance(ds, ConcatDataset)\n assert len(ds) == 16\n\n def test_out_of_bounds(self, dataset: SEN12MS) -> None:\n with pytest.raises(IndexError):\n dataset[8]\n\n def test_invalid_split(self) -> None:\n with pytest.raises(AssertionError):\n SEN12MS(split=\"foo\")\n\n def test_not_downloaded(self, tmp_path: Path) -> None:\n with pytest.raises(RuntimeError, match=\"Dataset not found or corrupted.\"):\n SEN12MS(str(tmp_path), checksum=True)\n\n with pytest.raises(RuntimeError, match=\"Dataset not found or corrupted.\"):\n SEN12MS(str(tmp_path), checksum=False)\n\n def test_check_integrity_light(self) -> None:\n root = os.path.join(\"tests\", \"data\", \"sen12ms\")\n ds = SEN12MS(root, checksum=False)\n assert isinstance(ds, SEN12MS)\n\n def test_band_subsets(self) -> None:\n root = os.path.join(\"tests\", \"data\", \"sen12ms\")\n for bands in SEN12MS.BAND_SETS.values():\n ds = SEN12MS(root, bands=bands, checksum=False)\n x = ds[0][\"image\"]\n assert x.shape[0] == len(bands)\n","sub_path":"tests/datasets/test_sen12ms.py","file_name":"test_sen12ms.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93178874","text":"from dynamoengine.base.metaclasses import ItemMetaclass\nfrom dynamoengine.static import VERBOSE_TYPES\n\nfrom dynamoengine.exceptions import (\n DynamoPreconditionError,\n ItemAttributeTypeError,\n AttributeIsRequired\n)\n\nfrom dynamoengine.connection import (\n create_item,\n update_item,\n delete_item,\n with_table\n)\n\n\nclass Item(object, metaclass=ItemMetaclass):\n '''\n\n Represents an item (and a table)\n on DynamoDB. Similar to how\n a Document represents a single\n instance and a collection in\n MongoEngine.\n\n Properties of classes that inherit\n this one become attributes\n of the items.\n\n objects = Query with initial Table scope.\n\n '''\n __slots__ = (\n \"_created\", \"_data\", \"_modified\"\n )\n\n def __init__(self, **kwargs):\n '''\n\n Construct new Item with\n attributes kwargs.\n\n Raise ItemAttributeError if\n a required attribute is not found.\n\n '''\n with_table(self.__class__)\n self._created = False\n self._data = {}\n self._modified = {}\n self._digest(kwargs)\n\n def __getitem__(self, name):\n if name in self._fields:\n return getattr(self, name)\n\n def __setitem__(self, name, value):\n return setattr(self, name, value)\n\n @classmethod\n def _get_hash_field(cls):\n return cls._fields[cls._meta.hash_field_name]\n\n @classmethod\n def from_dynamo(cls, attrs):\n ''' (ItemMetaclass, dict) -> Item\n\n Return new Item with attributes\n parsed from boto3 DynamoDB\n response.\n\n Raise ItemAttributeTypeError\n if field type defined in Item\n class does not match the\n one returned by DynamoDB.\n\n Precondition:\n Each attribute has ONLY\n one type. That is, attrs is a dict\n where keys are attribute names,\n and values are dicts of ONLY\n one key, which is the DynamoDB\n type of the attribute, and whose\n value is the attribute value in the\n correct type.\n\n '''\n data = {}\n for name, val in attrs.items():\n # Ignore values not defined\n if name not in cls._fields:\n continue\n\n if len(val) != 1:\n raise DynamoPreconditionError(\n \"Expected attribute to have only one type on response.\"\n )\n\n if cls._fields[name].dtype not in val:\n raise ItemAttributeTypeError(\n \"Expected field ({0}) to be of type {1} but found {2}.\"\n .format(\n name,\n VERBOSE_TYPES[cls._fields[name].dtype],\n VERBOSE_TYPES[val.keys()])\n )\n\n data[name] = val[cls._fields[name].dtype]\n\n loaded = cls(**data)\n loaded._created = True\n\n return loaded\n\n def _digest(self, atts):\n ''' (Item, dict)\n\n Set attributes of item\n from dict [attribute name] -> value\n atts.\n\n '''\n for name, val in atts.items():\n # self[name] = val\n setattr(self, name, val)\n\n def _flush_update(self):\n '''\n\n Reset all field modified status to\n False.\n\n '''\n for field in self._modified.keys():\n self._modified[field] = False\n\n def _build_update(self):\n ''' -> tuple (expression, names, values)\n\n Return tuple with valid boto3\n UpdateExpression, attribute names,\n and attribute values which\n were modified since the\n item was loaded. If no attributes\n have been modified, return\n (None, None, None).\n\n Precondition:\n Item must have been\n previously inserted into\n the database.\n\n Raises:\n dynamoengine.exceptions.DynamoPreconditionError\n if above precondition is failed.\n\n '''\n if not self._created:\n raise DynamoPreconditionError(\n \"Cannot update a new Item!\"\n )\n\n names = {}\n values = {}\n\n i = 1\n for fname, field in self._fields.items():\n # Loop through modified fields\n # if not field.changed:\n # continue\n if not self._modified.get(fname, False):\n continue\n\n names[\"#a\" + str(i)] = field.name\n values[\":v\" + str(i)] = self.to_dynamo(field.name)\n\n i += 1\n\n if i == 1:\n return (None, None, None)\n\n expressions = []\n for z in range(1, i):\n zs = str(z)\n expressions.append(\"#a\" + zs + \" = \" + \":v\" + zs)\n\n expression = ', '.join(expressions)\n expression = 'SET ' + expression\n\n return expression, names, values\n\n def _mark_as_changed(self, name):\n '''\n\n Mark field with name as modified.\n\n '''\n self._modified[name] = True\n\n def get_key(self):\n ''' -> dict\n\n Return boto3-compatible\n dictionary with the key(s)\n referencing this item.\n\n Example:\n {\n '_id': {\n 'S': 'item-id-here'\n }\n }\n\n '''\n _field = self.__class__._get_hash_field()\n\n key = {}\n kname = _field.name\n\n key[kname] = {}\n key[kname][_field.dtype] = self[kname]\n\n return key\n\n def to_dynamo(self, field_name=None):\n ''' -> dict\n\n Return dict of attributes\n converted to boto3-compatible\n field_name -> type -> value\n\n Raises\n dynamoengine.exceptions.AttributeIsRequired\n if field was set as required and has no value\n\n '''\n d_obj = {}\n\n if field_name:\n # Get values for every field\n field = self._fields[field_name]\n fval = self._data.get(field_name, field._default())\n\n if field.required and fval is None:\n raise AttributeIsRequired(\n \"Field ({0}) is required but has no value.\"\n .format(field_name)\n )\n\n if fval is not None:\n return field.to_dynamo(fval)\n else:\n return None\n\n for name in self._fields.keys():\n d_obj_z = self.to_dynamo(name)\n\n if d_obj_z is not None:\n d_obj[name] = d_obj_z\n\n return d_obj\n\n def update(self, **atts):\n ''' (**)\n\n Modify attributes of item,\n and save item on Database.\n\n '''\n self._digest(atts)\n self.save()\n\n return self\n\n def save(self):\n '''\n\n Create or update item\n and its attributes\n on DynamoDB.\n\n '''\n if self._created:\n update_item(self)\n else:\n create_item(self)\n self._created = True\n\n return self\n\n def delete(self):\n '''\n\n Delete this item from\n the DynamoDB table.\n\n '''\n delete_item(self)\n","sub_path":"dynamoengine/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519182232","text":"import os, subliminal, lxml\n\nfrom app import *\n\nprojectDir = HandleBarConfigPath\n\nclass subs:\n \n def __init__(self, file, type):\n \n \tself.filePath = file\n\n \tif type == \"movie\":\n \t\tself.services = ['addic7ed', 'opensubtitles', 'subswiki', 'thesubdb']\n \telse:\n \t\tself.services = ['bierdopje']\n \t\n \t \t\n def downloadSubtitles(self):\n\n\t cwd = os.path.abspath(projectDir + '/' + SubtitlePath)\n\t os.chdir(cwd)\t \t \n\n\t sub = subliminal.download_subtitles(os.path.basename(self.filePath), [SubtitleLanguageISO], cache_dir=\"/tmp\", services=self.services)\n\t #print sub \t\n","sub_path":"app/subs.py","file_name":"subs.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"479593493","text":"import json\nfrom os import listdir\nfrom os.path import join, isfile, isdir, realpath\nfrom subprocess import Popen, PIPE\nfrom ConfigParser import SafeConfigParser\nfrom ConfigParser import Error as ConfigParserError\nfrom ConfigParser import NoSectionError\n\nfrom jig.exc import PluginError\nfrom jig.conf import PLUGIN_CONFIG_FILENAME, PLUGIN_PRE_COMMIT_SCRIPT\n\ntry:\n from collections import OrderedDict\nexcept ImportError: # pragma: no cover\n from ordereddict import OrderedDict\n\n\nclass PluginManager(object):\n\n \"\"\"\n Provides access to running and managing plugins.\n\n \"\"\"\n def __init__(self, config=None):\n \"\"\"\n Create a plugin manager with the given ``config``.\n\n The ``config`` argument should be an instance of\n :py:class:`SafeConfigParser` and will be the main configuration for an\n jig-initialized Git repository.\n\n If ``config`` is missing, an empty :py:class:`SafeConfigParser` will be\n created.\n \"\"\"\n # The instance of SafeConfigParser we get from :py:method:`config`.\n self.config = config or SafeConfigParser()\n\n # Look through the config and initialize any installed plugins\n self._plugins = self._init_plugins(self.config)\n\n def _init_plugins(self, config):\n \"\"\"\n Creates :py:class:`Plugin` instances from ``config``.\n \"\"\"\n plugins = []\n for section_name in config.sections():\n if not section_name.startswith('plugin:'):\n # We are only interested in the plugin configs\n continue\n\n _, bundle, name = section_name.split(':')\n\n path = config.get(section_name, 'path')\n\n plugin_cfg = join(path, PLUGIN_CONFIG_FILENAME)\n\n with open(plugin_cfg) as fh:\n plugin_config = SafeConfigParser()\n try:\n plugin_config.readfp(fh) # pragma: no branch\n except ConfigParserError as cpe:\n # Something happened when parsing the config\n line = cpe.errors.pop()[0]\n raise PluginError(\n 'Could not parse config file for '\n '{0} in {1}, line {2}.'.format(name, path, line))\n\n # Get rid of the path, we don't need to send this as part of the\n # config for the plugin\n pc = OrderedDict(config.items(section_name))\n del pc['path']\n\n section = Plugin(bundle, name, path, pc)\n plugins.append(section)\n\n return plugins\n\n def __iter__(self):\n return iter(self._plugins)\n\n def __len__(self):\n return len(self._plugins)\n\n @property\n def plugins(self):\n return list(self)\n\n def add(self, plugindir):\n \"\"\"\n Add the given plugin or directory of plugins to this manager instance.\n\n ``plugindir`` should be the full path to a directory containing all the\n files required for a Jig plugin. It can alternatively be a directory of\n plugins, where each sub-directory is a Jig plugin.\n\n If ``recursive`` is True, then add will treat this as a directory of\n plugins instead of a single plugin and attempt to add them all.\n\n Returns a list of plugins that were added to this manager.\n \"\"\"\n root_exc_collection = []\n sub_exc_collection = []\n added = []\n\n try:\n # Add as if plugindir is the actual plugin\n added.append(self._add_plugin(plugindir))\n\n return added\n except PluginError as pe:\n root_exc_collection.append(pe)\n\n # Walk the directory, try to add each sub-directory as a plugin\n for dirname in listdir(plugindir):\n subdir = join(plugindir, dirname)\n if not isdir(subdir) or dirname == '.git':\n continue\n try:\n added.append(self._add_plugin(subdir))\n except PluginError as pe:\n sub_exc_collection.append(pe)\n\n if added:\n return added\n\n exc_collection = sub_exc_collection or root_exc_collection\n\n # If we haven't added any plugins and we have an exception raise it\n raise exc_collection[0]\n\n def _add_plugin(self, plugindir):\n \"\"\"\n If this is a Jig plugin, add it.\n\n ``plugindir`` should be the full path to a directory containing all the\n files required for a jig plugin.\n \"\"\"\n # Is this a plugins?\n config_filename = join(plugindir, PLUGIN_CONFIG_FILENAME)\n\n if not isfile(config_filename):\n raise PluginError('The plugin file {0} is missing.'.format(\n config_filename))\n\n config = SafeConfigParser()\n\n with open(config_filename, 'r') as fh:\n try:\n config.readfp(fh)\n except ConfigParserError as e:\n raise PluginError(e)\n\n try:\n settings = OrderedDict(config.items('settings'))\n except NoSectionError:\n settings = []\n\n try:\n plugin_info = OrderedDict(config.items('plugin'))\n except NoSectionError:\n raise PluginError(\n 'The plugin config does not contain a '\n '[plugin] section.')\n\n try:\n bundle = plugin_info['bundle']\n name = plugin_info['name']\n except KeyError:\n raise PluginError(\n 'Could not find the bundle or name of '\n 'the plugin.')\n\n new_section = 'plugin:{bundle}:{name}'.format(\n bundle=bundle, name=name)\n\n if self.config.has_section(new_section):\n raise PluginError('The plugin is already installed.')\n\n self.config.add_section(new_section)\n\n self.config.set(new_section, 'path', plugindir)\n\n for setting in settings:\n option, value = setting, settings[setting]\n self.config.set(new_section, option, value)\n\n # Re-initialize the self.plugins list\n self._plugins = self._init_plugins(self.config)\n\n # And return the plugin once we find it\n for plugin in self._plugins: # pragma: no branch\n if plugin.name == name and plugin.bundle == bundle:\n return plugin\n\n def remove(self, bundle, name):\n \"\"\"\n Remove a plugin from the list and config.\n\n Both ``bundle`` and ``name`` are required. A\n :py:exception:`PluginError` will be raised if the plugin does not\n exist.\n \"\"\"\n section_name = 'plugin:{bundle}:{name}'.format(\n bundle=bundle, name=name)\n\n if not self.config.has_section(section_name):\n raise PluginError('This plugin does not exist.')\n\n self.config.remove_section(section_name)\n\n # Again, re-initialize the self.plugins list\n self._plugins = self._init_plugins(self.config)\n\n\nclass Plugin(object):\n\n \"\"\"\n A single unit that performs some helpful operation for the user.\n\n \"\"\"\n def __init__(self, bundle, name, path, config={}, help={}):\n # What bundle is this plugin a part of\n self.bundle = bundle\n # What is the name of this plugin?\n self.name = name\n # Where does this plugin live\n self.path = realpath(path)\n # Plugin-specific configuration\n self.config = config\n # Helpful descriptions of the configurations\n self.help = help\n\n def pre_commit(self, git_diff_index):\n \"\"\"\n Runs the plugin's pre-commit script, passing in the diff.\n\n ``git_diff_index`` is a :py:class:`jig.diffconvert.GitDiffIndex`\n object.\n\n The pre-commit script will receive JSON data as standard input (stdin).\n The JSON data is comprised of two main attributes: config and diff.\n\n The ``config`` attribute represents the configuration for this plugin.\n This is up to the plugin author but the values can be changed by the\n user.\n\n The ``diff`` attribute is a list of files and changes that have\n occurred to them. See :py:module:`jig.diffconvert` for\n information on what this object provides.\n \"\"\"\n # Grab this plugin's settings\n data_in = {\n 'config': self.config,\n 'files': git_diff_index}\n\n script = join(self.path, PLUGIN_PRE_COMMIT_SCRIPT)\n ph = Popen([script], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n\n # Send the data to the script\n stdin = json.dumps(data_in, indent=2, cls=PluginDataJSONEncoder)\n\n retcode = None\n stdout = ''\n stderr = ''\n\n try:\n stdout, stderr = ph.communicate(stdin)\n\n # Convert to unicode\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n\n retcode = ph.returncode\n except OSError as ose:\n # Generic non-zero retcode that indicates an error\n retcode = 1\n if ose.errno == 32:\n stderr = u'Error: received SIGPIPE from the command'\n else:\n stderr = unicode(ose)\n\n # And return the relevant stuff\n return retcode, stdout, stderr\n\n\nclass PluginDataJSONEncoder(json.JSONEncoder):\n\n \"\"\"\n Converts the special data objects used when a plugin runs pre-commit.\n\n \"\"\"\n def default(self, obj):\n \"\"\"\n Implements JSONEncoder default method.\n \"\"\"\n files = [i for i in obj.files()]\n\n obj = []\n for f in files:\n obj.append({\n 'type': unicode(f['type']),\n 'name': unicode(f['name']),\n 'filename': unicode(f['filename']),\n 'diff': [j for j in f['diff']]})\n\n return obj\n","sub_path":"src/jig/plugins/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294624095","text":"#!/usr/local/bin/python3\n\nimport os, subprocess, shutil, fnmatch\nfrom math import ceil\n#from daemon import *\n\ndef list_processed_files(start_path):\n matches = []\n for root, dirnames, filenames in os.walk(start_path):\n for filename in fnmatch.filter(filenames, '*.processed'):\n matches.append(os.path.join(root, filename))\n return matches\n\ndef process_racf_files(file,base):\n ps = subprocess.Popen(\"wc -l \" + file, shell=True, stdout=subprocess.PIPE)\n line_count = ps.stdout.read()\n line_count = int(line_count.split()[0])\n ps.stdout.close()\n ps.wait()\n os.system(\"split -l \" +str(int(ceil(line_count/10.0))) + \" \" + file + \" \" + base)\n move_files(base)\n\ndef move_files(base):\n for i in range(1,11):\n root_src_file = base + \"a\" + chr(i+96)\n root_dst_dir = \"/RACF/C\" + str(i) + \"/RACF\"\n unprocessed_file = os.path.join(root_dst_dir, root_src_file)\n unprocessed_dst_dir = \"/RACF/UNPROCESSED\"\n still_unproc_file = os.path.join(unprocessed_dst_dir, root_src_file)\n still_unproc_dir = \"/RACF/UNPROCESSED/STILL\"\n if os.path.exists(unprocessed_file):\n shutil.copy(root_src_file,unprocessed_dst_dir)\n os.remove(unprocessed_file)\n shutil.copy(root_src_file, root_dst_dir)\n elif os.path.exists(still_unproc_file):\n shutil.copy(still_unproc_file, still_unproc_dir)\n os.remove(still_unproc_file)\n else:\n shutil.copy(root_src_file, root_dst_dir)\n\n# Processing RAW RACF event data\n#file = \"/RACF/DATA/ibmtsok.racf.smfdump.20111104.083015\"\n#base= \"ibmtsok.racf.\"\n#unprocessed_files = \"/RACF/UNPROCESSED\"\n\n#process_racf_files(file,base)\n","sub_path":"ArcSight/racf_processing.py","file_name":"racf_processing.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188005553","text":"import uuid\r\nimport copy\r\nfrom io import BytesIO\r\nfrom lxml import etree as etree_\r\n\r\nfrom sdc11073.namespaces import wsaTag, wseTag, dpwsTag, s12Tag, xmlTag, nsmap, WSA_ANONYMOUS, docNameFromQName\r\nfrom sdc11073.namespaces import Prefix_Namespace as Prefix\r\nfrom .. import isoduration\r\n\r\nCHECK_NAMESPACES = False # can be used to enable additional checks for too many namespaces or undefined namespaces\r\n\r\n\r\nDIALECT_ACTION = '{}/Action'.format(Prefix.DPWS.namespace)\r\nDIALECT_THIS_MODEL = '{}/ThisModel'.format(Prefix.DPWS.namespace)\r\nDIALECT_THIS_DEVICE = '{}/ThisDevice'.format(Prefix.DPWS.namespace)\r\nDIALECT_RELATIONSHIP = '{}/Relationship'.format(Prefix.DPWS.namespace)\r\nHOST_TYPE = '{}/host'.format(Prefix.DPWS.namespace)\r\n\r\n\r\nclass SoapResponseException(Exception):\r\n \r\n def __init__(self, soapResponseEnvelope):\r\n super(SoapResponseException, self).__init__()\r\n self.soapResponseEnvelope = soapResponseEnvelope\r\n\r\n\r\nclass ExtendedDocumentInvalid(etree_.DocumentInvalid):\r\n\r\n pass\r\n\r\n\r\ndef mergeDicts(*args):\r\n result = {}\r\n for d in args:\r\n for k, v in d.items():\r\n if not k in result:\r\n result[k] = v \r\n else:\r\n if result[k] != v:\r\n raise RuntimeError('Merge Conflict key={}, value1={}, value2={}'.format(k, result[k], v))\r\n return result\r\n\r\n \r\ndef getText(node, idstring, ns):\r\n if node is None:\r\n return\r\n tmp = node.find(idstring, ns)\r\n if tmp is not None:\r\n return tmp.text\r\n\r\n\r\nclass GenericNode(object):\r\n def __init__(self, node):\r\n self._node = node\r\n\r\n \r\n def asEtreeSubNode(self, rootNode):\r\n rootNode.append(self._node)\r\n\r\n\r\n \r\nclass WsaEndpointReferenceType(object):\r\n ''' Acc. to \"http://www.w3.org/2005/08/addressing\"\r\n\r\n '''\r\n __slots__ = ('address', 'referenceParametersNode', 'metaDataNode')\r\n def __init__(self, address, referenceParametersNode=None, metaDataNode=None):\r\n self.address = address # type=\"wsa:AttributedURI\", which is an xs:anyURI element\r\n self.referenceParametersNode = None\r\n self.metaDataNode = None\r\n if referenceParametersNode is not None:\r\n if hasattr(referenceParametersNode, 'tag') and referenceParametersNode.tag == wsaTag('ReferenceParameters'):\r\n self.referenceParametersNode = referenceParametersNode # any content allowed. optional\r\n else:\r\n self.referenceParametersNode = etree_.Element(wsaTag('ReferenceParameters'))\r\n self.referenceParametersNode.extend(referenceParametersNode)\r\n if metaDataNode is not None:\r\n if hasattr(metaDataNode, 'tag') and metaDataNode.tag == wsaTag('MetaData'):\r\n self.metaDataNode = metaDataNode # any content allowed. optional\r\n else:\r\n self.metaDataNode = etree_.Element(wsaTag('MetaData'))\r\n self.metaDataNode.extend(metaDataNode)\r\n\r\n def __str__(self):\r\n return 'WsaEndpointReferenceType: address={}'.format(self.address)\r\n \r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n addressNode = rootNode.find('wsa:Address', nsmap)\r\n address = addressNode.text\r\n referenceParametersNode = rootNode.find('wsa:ReferenceParameters', nsmap)\r\n metaDataNode = rootNode.find('wsa:MetaData', nsmap)\r\n ret = cls(address, referenceParametersNode, metaDataNode)\r\n return ret\r\n \r\n \r\n def asEtreeSubNode(self, rootNode):\r\n node = etree_.SubElement(rootNode, wsaTag('Address'))\r\n node.text = self.address\r\n if self.referenceParametersNode is not None:\r\n rootNode.append(copy.copy(self.referenceParametersNode))\r\n if self.metaDataNode is not None:\r\n rootNode.append(self.metaDataNode)\r\n\r\n\r\n\r\nclass WsAddress(object):\r\n __slots__ = ('messageId', 'to', 'from_', 'replyTo', 'faultTo', 'action',\r\n 'messageId', 'relatesTo', 'referenceParametersNode', 'relationshipType')\r\n def __init__(self, action, messageId=None, to=None, relatesTo=None, from_=None, replyTo=None,\r\n faultTo=None, referenceParametersNode=None, relationshipType=None): #pylint: disable=too-many-arguments\r\n '''\r\n\r\n :param action: xs:anyURI string, required\r\n :param messageId: xs:anyURI string or None or False; default is None\r\n if None, a messageId is generated automatically\r\n if False, no message ID is generated ( makes only sense for testing )\r\n :param to: xs:anyURI string, optional\r\n :param relatesTo: xs:anyURI string, 0...n\r\n :param from_: WsaEndpointReferenceType instance, optional\r\n :param replyTo: WsaEndpointReferenceType instance, optional\r\n :param faultTo: WsaEndpointReferenceType instance, optional\r\n :param referenceParametersNode: any node, optional\r\n :param relationshipType: a QName, optional\r\n '''\r\n self.action = action\r\n if messageId == False:\r\n self.messageId = None\r\n else:\r\n self.messageId = messageId or uuid.uuid4().urn\r\n self.to = to\r\n self.relatesTo = relatesTo\r\n self.from_ = from_\r\n self.replyTo = replyTo\r\n self.faultTo = faultTo\r\n self.referenceParametersNode = referenceParametersNode\r\n self.relationshipType = relationshipType\r\n\r\n def mkReplyAddress(self, action):\r\n return WsAddress(action=action, relatesTo=self.messageId)\r\n\r\n\r\n def asEtreeSubNode(self, rootNode):\r\n # To (OPTIONAL), defaults to anonymous\r\n node = etree_.SubElement(rootNode, wsaTag('To'), attrib={s12Tag('mustUnderstand'): 'true'})\r\n node.text = self.to or WSA_ANONYMOUS\r\n #From\r\n if self.from_:\r\n self.from_.asEtreeSubNode(rootNode)\r\n # ReplyTo (OPTIONAL), defaults to anonymous\r\n if self.replyTo:\r\n self.replyTo.asEtreeSubNode(rootNode)\r\n # FaultTo (OPTIONAL)\r\n if self.faultTo:\r\n self.faultTo.asEtreeSubNode(rootNode)\r\n # Action (REQUIRED)\r\n node = etree_.SubElement(rootNode, wsaTag('Action'), attrib={s12Tag('mustUnderstand'): 'true'})\r\n node.text = self.action\r\n # MessageID (OPTIONAL)\r\n if self.messageId:\r\n node = etree_.SubElement(rootNode, wsaTag('MessageID'))\r\n node.text = self.messageId\r\n # RelatesTo (OPTIONAL)\r\n if self.relatesTo:\r\n node = etree_.SubElement(rootNode, wsaTag('RelatesTo'))\r\n node.text = self.relatesTo\r\n if self.relationshipType is not None:\r\n node.set('RelationshipType', self.relationshipType)\r\n\r\n if self.referenceParametersNode:\r\n rootNode.append(copy.copy(self.referenceParametersNode))\r\n\r\n \r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n messageId = getText(rootNode, 'wsa:MessageID', nsmap)\r\n to = getText(rootNode, 'wsa:To', nsmap)\r\n action = getText(rootNode, 'wsa:Action', nsmap)\r\n relatesTo = getText(rootNode, 'wsa:RelatesTo', nsmap)\r\n relationshipType = None\r\n relatesToNode = rootNode.find('wsa:RelatesTo', nsmap)\r\n if relatesToNode is not None:\r\n relatesTo = relatesToNode.text\r\n relationshipTypeText = relatesToNode.attrib.get('RelationshipType')\r\n if relationshipTypeText:\r\n # split into namespace, localname\r\n ns, loc = relationshipTypeText.rsplit('/', 1)\r\n relationshipType= etree_.QName(ns, loc)\r\n\r\n def mkEndpointReference(idstring):\r\n tmp = rootNode.find(idstring, nsmap)\r\n if tmp is not None:\r\n return WsaEndpointReferenceType.fromEtreeNode(tmp)\r\n \r\n from_ = mkEndpointReference('wsa:From')\r\n replyTo = mkEndpointReference('wsa:ReplyTo')\r\n faultTo = mkEndpointReference('wsa:FaultTo')\r\n referenceParametersNode = rootNode.find('wsa:ReferenceParameters', nsmap)\r\n \r\n return cls(messageId=messageId, \r\n to=to, \r\n action=action, \r\n relatesTo=relatesTo, \r\n from_=from_,\r\n replyTo=replyTo,\r\n faultTo=faultTo,\r\n referenceParametersNode=referenceParametersNode,\r\n relationshipType=relationshipType)\r\n\r\n\r\n_LANGUAGE_ATTR = '{http://www.w3.org/XML/1998/namespace}lang'\r\n\r\n\r\nclass WsSubscribe(object):\r\n MODE_PUSH = '{}/DeliveryModes/Push'.format(Prefix.WSE.namespace)\r\n __slots__ = ('delivery_mode', 'notifyTo', 'endTo', 'expires', 'filter')\r\n def __init__(self, notifyTo,\r\n expires,\r\n endTo=None,\r\n filter_=None,\r\n delivery_mode=None):\r\n '''\r\n @param notifyTo: a WsaEndpointReferenceType\r\n @param expires: duration in seconds ( absolute date not supported)\r\n @param endTo: a WsaEndpointReferenceType or None\r\n @param delivery_mode: defaults to self.MODE_PUSH\r\n '''\r\n self.delivery_mode = delivery_mode or self.MODE_PUSH\r\n self.notifyTo = notifyTo\r\n self.endTo = endTo\r\n self.expires = expires\r\n self.filter = filter_\r\n\r\n\r\n def asEtreeSubNode(self, rootNode):\r\n # To (OPTIONAL), defaults to anonymous\r\n subscribe = etree_.SubElement(rootNode, wseTag('Subscribe'), nsmap=Prefix.partialMap(Prefix.WSE, Prefix.WSA))\r\n if self.endTo is not None:\r\n endToNode = etree_.SubElement(subscribe, wseTag('EndTo'))\r\n self.endTo.asEtreeSubNode(endToNode)\r\n delivery = etree_.SubElement(subscribe, wseTag('Delivery'))\r\n delivery.set('Mode', self.delivery_mode)\r\n\r\n notifyToNode = etree_.SubElement(delivery, wseTag('NotifyTo'))\r\n self.notifyTo.asEtreeSubNode(notifyToNode)\r\n\r\n exp = etree_.SubElement(subscribe, wseTag('Expires'))\r\n exp.text = isoduration.durationString(self.expires)\r\n fil = etree_.SubElement(subscribe, wseTag('Filter'))\r\n fil.set('Dialect', DIALECT_ACTION) # Is this always this string?\r\n fil.text= self.filter\r\n\r\n\r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n raise NotImplementedError #pylint: disable=unused-argument\r\n\r\n\r\n\r\nclass DPWSThisDevice(object):\r\n __slots__ = ('friendlyName', 'firmwareVersion', 'serialNumber')\r\n\r\n def __init__(self, friendlyName, firmwareVersion, serialNumber):\r\n if isinstance(friendlyName, dict):\r\n self.friendlyName = friendlyName\r\n else: \r\n self.friendlyName = {'': friendlyName} # localized texts\r\n self.firmwareVersion = firmwareVersion\r\n self.serialNumber = serialNumber\r\n\r\n\r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n friendlyName = {} # localized texts\r\n for m in rootNode.findall('dpws:FriendlyName', nsmap):\r\n friendlyName[m.get(_LANGUAGE_ATTR)] = m.text \r\n firmwareVersion = getText(rootNode, 'dpws:FirmwareVersion', nsmap)\r\n serialNumber = getText(rootNode, 'dpws:SerialNumber', nsmap)\r\n return cls(friendlyName, firmwareVersion, serialNumber)\r\n\r\n\r\n def asEtreeSubNode(self, rootNode):\r\n thisDevice = etree_.SubElement(rootNode, dpwsTag('ThisDevice'), nsmap=Prefix.partialMap(Prefix.DPWS))\r\n for lang, name in self.friendlyName.items():\r\n friendlyName = etree_.SubElement(thisDevice, dpwsTag('FriendlyName'))\r\n friendlyName.text = name\r\n friendlyName.set(_LANGUAGE_ATTR, lang)\r\n firmwareVersion = etree_.SubElement(thisDevice, dpwsTag('FirmwareVersion'))\r\n firmwareVersion.text = self.firmwareVersion\r\n serialNumber = etree_.SubElement(thisDevice, dpwsTag('SerialNumber'))\r\n serialNumber.text = self.serialNumber\r\n\r\n\r\n def __str__(self):\r\n return 'DPWSThisDevice: friendlyName={}, firmwareVersion=\"{}\", serialNumber=\"{}\"'.format(self.friendlyName, self.firmwareVersion, self.serialNumber)\r\n\r\n\r\n\r\nclass DPWSThisModel(object):\r\n __slots__ = ('manufacturer', 'manufacturerUrl', 'modelName', 'modelNumber', 'modelUrl', 'presentationUrl')\r\n def __init__(self, manufacturer, manufacturerUrl, modelName, modelNumber, modelUrl, presentationUrl):\r\n if isinstance(manufacturer, dict):\r\n self.manufacturer = manufacturer\r\n else: \r\n self.manufacturer = {None: manufacturer} # localized texts\r\n self.manufacturerUrl = manufacturerUrl\r\n if isinstance(modelName, dict):\r\n self.modelName = modelName\r\n else: \r\n self.modelName = {None: modelName} # localized texts\r\n self.modelNumber = modelNumber\r\n self.modelUrl = modelUrl\r\n self.presentationUrl = presentationUrl\r\n\r\n\r\n def __str__(self):\r\n return 'DPWSThisModel: manufacturer={}, modelName=\"{}\", modelNumber=\"{}\"'.format(self.manufacturer, self.modelName, self.modelNumber)\r\n\r\n\r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n manufacturer = {} # localized texts\r\n for m in rootNode.findall('dpws:Manufacturer', nsmap):\r\n manufacturer[m.get(_LANGUAGE_ATTR)] = m.text \r\n manufacturerUrl = getText(rootNode, 'dpws:ManufacturerUrl', nsmap)\r\n modelName = {} # localized texts\r\n for m in rootNode.findall('dpws:ModelName', nsmap):\r\n modelName[m.get(_LANGUAGE_ATTR)] = m.text \r\n modelNumber = getText(rootNode, 'dpws:ModelNumber', nsmap)\r\n modelUrl = getText(rootNode, 'dpws:ModelUrl', nsmap)\r\n presentationUrl = getText(rootNode, 'dpws:PresentationUrl', nsmap)\r\n return cls(manufacturer, manufacturerUrl, modelName, modelNumber, modelUrl, presentationUrl)\r\n\r\n\r\n def asEtreeSubNode(self, rootNode):\r\n thisModel = etree_.SubElement(rootNode, dpwsTag('ThisModel'), nsmap=Prefix.partialMap(Prefix.DPWS))\r\n for lang, name in self.manufacturer.items():\r\n manufacturer = etree_.SubElement(thisModel, dpwsTag('Manufacturer'))\r\n manufacturer.text = name\r\n if lang is not None:\r\n manufacturer.set(_LANGUAGE_ATTR, lang)\r\n\r\n manufacturerUrl = etree_.SubElement(thisModel, dpwsTag('ManufacturerUrl'))\r\n manufacturerUrl.text = self.manufacturerUrl\r\n\r\n for lang, name in self.modelName.items():\r\n manufacturer = etree_.SubElement(thisModel, dpwsTag('ModelName'))\r\n manufacturer.text = name\r\n if lang is not None:\r\n manufacturer.set(_LANGUAGE_ATTR, lang)\r\n\r\n modelNumber = etree_.SubElement(thisModel, dpwsTag('ModelNumber'))\r\n modelNumber.text = self.modelNumber\r\n modelUrl = etree_.SubElement(thisModel, dpwsTag('ModelUrl'))\r\n modelUrl.text = self.modelUrl\r\n presentationUrl = etree_.SubElement(thisModel, dpwsTag('PresentationUrl'))\r\n presentationUrl.text = self.presentationUrl\r\n\r\n\r\n\r\nclass DPWSHost(object):\r\n __slots__ = ('endpointReferences', 'types')\r\n def __init__(self, endpointReferencesList, typesList):\r\n '''\r\n @param endpointReferencesList: list of WsEndpointReference instances\r\n @param typesList: a list of etree.QName instances\r\n '''\r\n self.endpointReferences = endpointReferencesList\r\n self.types = typesList\r\n\r\n\r\n def asEtreeSubNode(self, rootNode):\r\n _ns = Prefix.partialMap(Prefix.DPWS, Prefix.WSA)\r\n # reverse lookup( key is namespace, value is prefix)\r\n res = {}\r\n for k,v in _ns.items():\r\n res[v] = k\r\n for k,v in rootNode.nsmap.items():\r\n res[v] = k\r\n \r\n # must explicitely add namespaces of types to Host node, because list of qnames is not handled by lxml\r\n typesTexts = []\r\n if self.types:\r\n for qname in self.types:\r\n prefix = res.get(qname.namespace)\r\n if not prefix:\r\n # create a random prefix\r\n prefix='_dpwsh{}'.format(len(_ns))\r\n _ns[prefix] = qname.namespace\r\n typesTexts.append('{}:{}'.format(prefix, qname.localname))\r\n \r\n hostNode = etree_.SubElement(rootNode, dpwsTag('Host'))#, nsmap=_ns)\r\n epRefNode = etree_.SubElement(hostNode, wsaTag('EndpointReference'))#, nsmap=_ns) \r\n for epRef in self.endpointReferences:\r\n epRef.asEtreeSubNode(epRefNode)\r\n \r\n if typesTexts:\r\n typesNode = etree_.SubElement(hostNode, dpwsTag('Types'), nsmap=_ns)# add also namespace prefixes that were locally generated\r\n typesText = ' '.join(typesTexts)\r\n typesNode.text = typesText\r\n\r\n\r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n endpointReferences = []\r\n for tmp in rootNode.findall('wsa:EndpointReference', nsmap):\r\n endpointReferences.append(WsaEndpointReferenceType.fromEtreeNode(tmp))\r\n types = getText(rootNode, 'dpws:Types', nsmap)\r\n if types:\r\n types = types.split()\r\n return cls(endpointReferences, types)\r\n\r\n\r\n def __str__(self):\r\n return 'DPWSHost: endpointReference={}, types=\"{}\"'.format(self.endpointReferences, self.types)\r\n\r\n\r\n\r\nclass DPWSHosted(object):\r\n __slots__ = ('endpointReferences', 'types', 'serviceId', 'soapClient')\r\n def __init__(self, endpointReferencesList, typesList, serviceId):\r\n self.endpointReferences = endpointReferencesList\r\n self.types = typesList # a list of QNames\r\n self.serviceId = serviceId\r\n self.soapClient = None\r\n\r\n\r\n def asEtreeSubNode(self, rootNode):\r\n hostedNode = etree_.SubElement(rootNode, dpwsTag('Hosted'))\r\n epRefNode = etree_.SubElement(hostedNode, wsaTag('EndpointReference'))\r\n for epRef in self.endpointReferences:\r\n epRef.asEtreeSubNode(epRefNode)\r\n if self.types:\r\n typesText = ' '.join([docNameFromQName(t, rootNode.nsmap) for t in self.types])\r\n typesNode = etree_.SubElement(hostedNode, dpwsTag('Types'))#, nsmap=ns)\r\n typesNode.text = typesText\r\n serviceNode = etree_.SubElement(hostedNode, dpwsTag('ServiceId'))#, nsmap=ns)\r\n serviceNode.text = self.serviceId\r\n\r\n\r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n endpointReferences = []\r\n for tmp in rootNode.findall('wsa:EndpointReference', nsmap):\r\n endpointReferences.append(WsaEndpointReferenceType.fromEtreeNode(tmp))\r\n types = getText(rootNode, 'dpws:Types', nsmap)\r\n if types:\r\n types = types.split()\r\n serviceId = getText(rootNode, 'dpws:ServiceId', nsmap)\r\n return cls(endpointReferences, types, serviceId)\r\n\r\n def __str__(self):\r\n return 'DPWSHosted: endpointReference={}, types=\"{}\" serviceId=\"{}\"'.format(self.endpointReferences, self.types, self.serviceId)\r\n\r\n\r\n\r\nclass DPWSRelationShip(object):\r\n def __init__(self, rootNode=None):\r\n hostNode = rootNode.find('dpws:Host', nsmap)\r\n self.hosted = {}\r\n self.host = DPWSHost.fromEtreeNode(hostNode)\r\n for hostedNode in rootNode.findall('dpws:Hosted', nsmap):\r\n hosted = DPWSHosted.fromEtreeNode(hostedNode)\r\n self.hosted[hosted.serviceId] = hosted\r\n\r\n\r\n\r\nclass MetaDataSection(object):\r\n def __init__(self, metadataSections):\r\n self._metadataSections = metadataSections\r\n\r\n\r\n def __getattr__(self, attrname):\r\n try:\r\n return self._metadataSections[attrname]\r\n except KeyError:\r\n raise AttributeError\r\n\r\n\r\n @classmethod\r\n def fromEtreeNode(cls, rootNode):\r\n metadata = rootNode.find('wsx:Metadata', nsmap)\r\n metadataSections = {}\r\n if metadata is not None:\r\n for metadataSection in metadata.findall('wsx:MetadataSection', nsmap):\r\n dialect = metadataSection.attrib['Dialect']\r\n if dialect[-1] == '/': \r\n dialect = dialect[:-1]\r\n if dialect == \"http://schemas.xmlsoap.org/wsdl\":\r\n locationNode = metadataSection.find('wsx:Location', nsmap)\r\n metadataSections['wsdl_location'] = locationNode.text\r\n elif dialect == DIALECT_THIS_MODEL:\r\n thisModelNode = metadataSection.find('dpws:ThisModel', nsmap)\r\n metadataSections['thisModel'] = DPWSThisModel.fromEtreeNode(thisModelNode)\r\n elif dialect == DIALECT_THIS_DEVICE:\r\n thisDeviceNode = metadataSection.find('dpws:ThisDevice', nsmap)\r\n metadataSections['thisDevice'] = DPWSThisDevice.fromEtreeNode(thisDeviceNode)\r\n elif dialect == DIALECT_RELATIONSHIP:\r\n relationshipNode = metadataSection.find('dpws:Relationship', nsmap)\r\n if relationshipNode.get('Type') == HOST_TYPE:\r\n metadataSections['relationShip'] = DPWSRelationShip(relationshipNode)\r\n return cls(metadataSections)\r\n\r\n\r\nclass Soap12EnvelopeBase(object):\r\n __slots__ = ('_headerNode', '_bodyNode', '_headerObjects', '_bodyObjects', '_docRoot')\r\n def __init__(self):\r\n self._headerNode = None\r\n self._bodyNode = None\r\n self._headerObjects = []\r\n self._bodyObjects = []\r\n self._docRoot = None\r\n\r\n @property\r\n def headerNode(self):\r\n return self._headerNode\r\n\r\n @property\r\n def bodyNode(self):\r\n return self._bodyNode\r\n\r\n def _assert_valid_exception_wrapper(self, schema, content):\r\n try:\r\n schema.assertValid(content)\r\n except etree_.DocumentInvalid:\r\n # reformat and validate again to produce better error output\r\n tmp_str = etree_.tostring(content, pretty_print=True)\r\n tmp = etree_.parse(BytesIO(tmp_str))\r\n tmp_str = tmp_str.decode('utf-8')\r\n try:\r\n schema.assertValid(tmp)\r\n except etree_.DocumentInvalid as err:\r\n msg = \"{}\\n{}\".format(str(err), tmp_str)\r\n raise ExtendedDocumentInvalid(msg, error_log=err.error_log)\r\n\r\n\r\nclass Soap12Envelope(Soap12EnvelopeBase):\r\n __slots__ = ('_nsmap', 'address')\r\n def __init__(self, nsmap):\r\n super(Soap12Envelope, self).__init__()\r\n self._nsmap = nsmap\r\n self.address = None\r\n\r\n def addHeaderObject(self, obj):\r\n assert hasattr(obj, 'asEtreeSubNode')\r\n self._headerObjects.append(obj)\r\n self._docRoot = None\r\n\r\n def addHeaderString(self, headerString):\r\n element = etree_.fromstring(headerString)\r\n self.addHeaderObject(GenericNode(element))\r\n self._docRoot = None\r\n \r\n def addHeaderElement(self, element):\r\n self.addHeaderObject(GenericNode(element))\r\n self._docRoot = None\r\n \r\n def addBodyObject(self, obj):\r\n assert hasattr(obj, 'asEtreeSubNode')\r\n self._bodyObjects.append(obj)\r\n self._docRoot = None\r\n\r\n def addBodyString(self, bodyString):\r\n element = etree_.fromstring(bodyString)\r\n self.addBodyObject(GenericNode(element))\r\n self._docRoot = None\r\n\r\n def addBodyElement(self, element):\r\n self.addBodyObject(GenericNode(element))\r\n self._docRoot = None\r\n\r\n def setAddress(self, wsAddress):\r\n self.address = wsAddress\r\n\r\n def buildDoc(self):\r\n if self._docRoot is not None:\r\n return self._docRoot\r\n \r\n root = etree_.Element(s12Tag('Envelope'), nsmap=self._nsmap)\r\n\r\n header = etree_.SubElement(root, s12Tag('Header'))\r\n if self.address:\r\n self.address.asEtreeSubNode(header)\r\n for h in self._headerObjects:\r\n h.asEtreeSubNode(header)\r\n body = etree_.SubElement(root, s12Tag('Body'))\r\n for b in self._bodyObjects:\r\n b.asEtreeSubNode(body)\r\n self._headerNode = header\r\n self._bodyNode = body\r\n self._docRoot = root\r\n return root\r\n\r\n def as_xml(self, pretty=False, request_manipulator=None):\r\n tmp = BytesIO()\r\n root = self.buildDoc()\r\n doc = etree_.ElementTree(element=root)\r\n if hasattr(request_manipulator, 'manipulate_domtree'):\r\n _doc = request_manipulator.manipulate_domtree(doc)\r\n if _doc:\r\n doc = _doc\r\n doc.write(tmp, encoding='UTF-8', xml_declaration=True, pretty_print=pretty)\r\n return tmp.getvalue()\r\n\r\n def validateBody(self, schema):\r\n root = self.buildDoc()\r\n doc = etree_.ElementTree(element=root)\r\n if CHECK_NAMESPACES:\r\n self._find_unused_namespaces(root)\r\n self._find_undefined_namespaces()\r\n if schema is None:\r\n return\r\n bodyNode = doc.find('s12:Body', nsmap)\r\n if bodyNode is not None:\r\n try:\r\n payloadNode = bodyNode[0]\r\n except IndexError: # empty body\r\n return\r\n self._assert_valid_exception_wrapper(schema, payloadNode)\r\n\r\n def _find_unused_namespaces(self, root):\r\n xml_doc = self.as_xml()\r\n unused = []\r\n used = []\r\n for prefix, ns in root.nsmap.items():\r\n _pr = prefix+':'\r\n if _pr.encode() not in xml_doc:\r\n unused.append((prefix, ns))\r\n else:\r\n used.append(prefix)\r\n if unused:\r\n print (root.nsmap, used, xml_doc[:500]) # do not need to see the wohle message\r\n raise RuntimeError('unused namespaces:{}, used={}'.format(unused, used))\r\n\r\n def _find_undefined_namespaces(self):\r\n xml_doc = self.as_xml()\r\n if b':ns0' in xml_doc:\r\n raise RuntimeError('undefined namespaces:{}'.format(xml_doc))\r\n\r\n\r\nclass ReceivedSoap12Envelope(Soap12EnvelopeBase):\r\n __slots__ = ('msgNode', 'rawdata', 'address')\r\n def __init__(self, doc=None, rawdata=None):\r\n super(ReceivedSoap12Envelope, self).__init__()\r\n self._docRoot = doc\r\n self.rawdata = rawdata\r\n self._headerNode = None\r\n self._bodyNode = None\r\n self.address = None\r\n if doc is not None:\r\n self._headerNode = doc.find('s12:Header', nsmap)\r\n self._bodyNode = doc.find('s12:Body', nsmap)\r\n self.address = WsAddress.fromEtreeNode(self.headerNode)\r\n try:\r\n self.msgNode = self.bodyNode[0]\r\n except IndexError: # body has no content, this can happen\r\n self.msgNode = None\r\n \r\n\r\n def as_xml(self, pretty=False):\r\n tmp = BytesIO()\r\n doc = etree_.ElementTree(element=self._docRoot)\r\n doc.write(tmp, encoding='UTF-8', xml_declaration=True, pretty_print=pretty)\r\n return tmp.getvalue()\r\n\r\n def validateBody(self, schema):\r\n if schema is None:\r\n return\r\n self._assert_valid_exception_wrapper(schema, self.msgNode)\r\n\r\n @classmethod\r\n def fromXMLString(cls, xmlString, schema=None, **kwargs):\r\n parser = etree_.ETCompatXMLParser()\r\n \r\n try: \r\n doc = etree_.fromstring(xmlString, parser=parser, **kwargs)\r\n except Exception as ex:\r\n print ('load error \"{}\" in \"{}\"'.format(ex, xmlString))\r\n raise\r\n if schema is not None:\r\n msgNode = doc.find('s12:Body', nsmap)[0]\r\n schema.assertValid(msgNode)\r\n return cls(doc=doc, rawdata=xmlString)\r\n\r\n\r\n\r\n\r\n\r\nclass DPWSEnvelope(ReceivedSoap12Envelope):\r\n __slots__ = ('address', 'thisModel', 'thisDevice', 'hosted', 'host', 'metaData')\r\n\r\n def __init__(self, doc, rawdata):\r\n super(DPWSEnvelope, self).__init__(doc, rawdata)\r\n self.address = None\r\n self.thisModel = None\r\n self.thisDevice = None\r\n self.hosted = {}\r\n self.host = None\r\n self.metaData = None\r\n \r\n if doc is not None:\r\n self.address = WsAddress.fromEtreeNode(self.headerNode)\r\n self.metaData = MetaDataSection(self.bodyNode)\r\n metadata = self.bodyNode.find('wsx:Metadata', nsmap)\r\n if metadata is not None:\r\n for metadataSection in metadata.findall('wsx:MetadataSection', nsmap):\r\n if metadataSection.attrib['Dialect'] == DIALECT_THIS_MODEL:\r\n thisModelNode = metadataSection.find('dpws:ThisModel', nsmap)\r\n self.thisModel = DPWSThisModel.fromEtreeNode(thisModelNode)\r\n elif metadataSection.attrib['Dialect'] == DIALECT_THIS_DEVICE:\r\n thisDeviceNode = metadataSection.find('dpws:ThisDevice', nsmap)\r\n self.thisDevice = DPWSThisDevice.fromEtreeNode(thisDeviceNode)\r\n elif metadataSection.attrib['Dialect'] == DIALECT_RELATIONSHIP:\r\n relationship = metadataSection.find('dpws:Relationship', nsmap)\r\n if relationship.get('Type') == HOST_TYPE:\r\n hostNode = relationship.find('dpws:Host', nsmap)\r\n self.host = DPWSHost.fromEtreeNode(hostNode)\r\n for hostedNode in relationship.findall('dpws:Hosted', nsmap):\r\n hosted = DPWSHosted.fromEtreeNode(hostedNode)\r\n self.hosted[hosted.serviceId] = hosted\r\n\r\n\r\nclass _SoapFaultBase(Soap12Envelope):\r\n '''\r\n created xml:\r\n \r\n \r\n \r\n [code]\r\n \r\n [subcode]\r\n \r\n \r\n \r\n [reason]\r\n \r\n \r\n [detail]\r\n \r\n \r\n \r\n\r\n '''\r\n def __init__(self, requestEnvelope, fault_action, code, reason, subCode, details):\r\n super(_SoapFaultBase, self).__init__(Prefix.partialMap(Prefix.S12, Prefix.WSA,Prefix.WSE))\r\n replyAddress = requestEnvelope.address.mkReplyAddress(fault_action)\r\n self.addHeaderObject(replyAddress)\r\n faultNode = etree_.Element(s12Tag('Fault'))\r\n codeNode = etree_.SubElement(faultNode, s12Tag('Code'))\r\n valueNode = etree_.SubElement(codeNode, s12Tag('Value'))\r\n valueNode.text = 's12:{}'.format(code)\r\n if subCode is not None:\r\n subcodeNode = etree_.SubElement(codeNode, s12Tag('Subcode'))\r\n valueNode = etree_.SubElement(subcodeNode, s12Tag('Value'))\r\n valueNode.text = docNameFromQName(subCode, nsmap)\r\n reasonNode = etree_.SubElement(faultNode, s12Tag('Reason'))\r\n reasontextNode = etree_.SubElement(reasonNode, s12Tag('Text'))\r\n reasontextNode.set(xmlTag('lang'), 'en-US')\r\n reasontextNode.text = reason\r\n if details is not None:\r\n _detailNode = etree_.SubElement(faultNode, s12Tag('Detail'))\r\n _detailNode.set(xmlTag('lang'), 'en-US')\r\n if isinstance(details, str):\r\n detNode = etree_.SubElement(_detailNode, 'data')\r\n detNode.text = details\r\n else:\r\n _detailNode.append(details)\r\n self.addBodyElement(faultNode)\r\n\r\n\r\nclass SoapFault(_SoapFaultBase):\r\n SOAP_FAULT_ACTION = '{}/soap/fault'.format(Prefix.WSA.namespace)\r\n def __init__(self, requestEnvelope, code, reason, subCode=None, details=None):\r\n super(SoapFault, self).__init__(requestEnvelope, self.SOAP_FAULT_ACTION, code, reason, subCode, details)\r\n\r\n\r\nclass AdressingFault(_SoapFaultBase):\r\n ADDRESSING_FAULT_ACTION = '{}/fault'.format(Prefix.WSA.namespace)\r\n def __init__(self, requestEnvelope, code, reason, subCode=None, details=None):\r\n super(AdressingFault, self).__init__(requestEnvelope, self.ADDRESSING_FAULT_ACTION, code, reason, subCode, details)\r\n\r\n\r\nclass ReceivedSoapFault(ReceivedSoap12Envelope):\r\n def __init__(self, doc=None, rawdata=None):\r\n super(ReceivedSoapFault, self).__init__(doc, rawdata)\r\n self.code = ', '.join(self._bodyNode.xpath('s12:Fault/s12:Code/s12:Value/text()', namespaces=nsmap))\r\n self.subcode = ', '.join(self._bodyNode.xpath('s12:Fault/s12:Code/s12:Subcode/s12:Value/text()', namespaces=nsmap))\r\n self.reason = ', '.join(self._bodyNode.xpath('s12:Fault/s12:Reason/s12:Text/text()', namespaces=nsmap))\r\n self.detail = ', '.join(self._bodyNode.xpath('s12:Fault/s12:Detail/text()', namespaces=nsmap))\r\n\r\n def __repr__(self):\r\n return ('ReceivedSoapFault(code=\"{}\", subcode=\"{}\", reason=\"{}\", detail=\"{}\")'.format(self.code, self.subcode, self.reason, self.detail))\r\n\r\n\r\nclass SoapFaultCode:\r\n '''\r\n Soap Fault codes, see https://www.w3.org/TR/soap12-part1/#faultcodes\r\n '''\r\n VERSION_MM = 'VersionMismatch'\r\n MUSTUNSERSTAND = 'MustUnderstand'\r\n DATAENC = 'DataEncodingUnknown'\r\n SENDER = 'Sender'\r\n RECEIVER = 'Receiver'\r\n","sub_path":"sdc11073/pysoap/soapenvelope.py","file_name":"soapenvelope.py","file_ext":"py","file_size_in_byte":33174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486239375","text":"from django.core.management.base import BaseCommand, CommandError\nfrom codetables.models import Section\nfrom common import utils\nimport csv\nimport os\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('--file', nargs=1, type=str)\n # parser.add_argument('--truncate', dest='truncate', action=\"store_true\")\n # parser.set_defaults(truncate=False)\n def handle(self, *args, **options):\n '''\n import all csv file into section table\n '''\n for filename in options['file']:\n if not os.path.isfile(filename):\n raise CommandError('Argument %s is not a file' % filename)\n with open(filename, 'r') as infile:\n csv_reader = csv.reader(infile, delimiter=',')\n instances = []\n for row in csv_reader:\n code = '%s-%s-%s' % (row[0], row[2], row[4])\n desc = '%s - %s - %s' % (row[1], row[3], row[5])\n try:\n instance = Section.objects.get(code=code)\n instance.description = desc\n instance.save()\n except Section.DoesNotExist:\n instances.append(Section(code=code, description=desc))\n if len(instances) > 0:\n Section.objects.bulk_create(instances)\n\n","sub_path":"idam_uam_backend-master/codetables/management/commands/import_section_table.py","file_name":"import_section_table.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419652822","text":"import requests\nimport json\nfrom datetime import datetime\n\n#填写下面四个变量即可\n#url为打卡的网址(就是分享链接),cookie从浏览器复制,name为你的名字,content是地址\nurl = \"填打卡地址\"\ncookie = \"填cookie\"\nname = \"填姓名\"\nqq_name = \"填QQ名称\" #QQ名称\nglobal_padid = \"填global_padid\" #浏览器截取https://docs.qq.com/form/collect/recordcnt的请求data里面的global_padid\ncontent = \"填地址\"\n\n############################\nsession = requests.Session()\nheader = {\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 7.0; Nexus 5X Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.85 Mobile Safari/537.36\",\n \"content-type\": \"application/json\",\n \"origin\": \"https://docs.qq.com\",\n\n}\ncooki = {cookies.split(': ')[0]: cookies.split(': ')[1] for cookies in cookie.replace(\";\",\",\").replace(\"=\", \": \").split(\",\")}\nglobal_padid = {\"global_padid\": global_padid}\n\ndef role_id():\n role_id_re = session.post(url=\"https://docs.qq.com/form/collect/get_sign_role\", headers=header, cookies=cooki, data=json.dumps(global_padid))\n if role_id_re.json()[\"code\"] != 0:\n return \"cookie可能过期了\", quit\n return role_id_re.json()[\"selected_id\"]\n\ndef get_submit():\n data = json.dumps({\n \"global_padid\": global_padid[\"global_padid\"],\n \"data\": json.dumps([{\n \"id\": \"nick\",\n \"type\": \"SIMPLE\",\n \"content\": qq_name,\n \"dataType\": 1\n }, {\n \"id\": \"time\",\n \"type\": \"SIMPLE\",\n \"content\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n \"dataType\": 1\n }, {\n \"id\": \"lbs\",\n \"type\": \"SIMPLE\",\n \"content\": content,\n \"dataType\": 1\n }, {\n \"id\": \"select\",\n \"type\": \"SELECT\",\n \"content\": \"体温正常,身体健康\",\n \"dataType\": 1\n }, {\n \"id\": \"identity\",\n \"type\": \"SIMPLE\",\n \"content\": name,\n \"dataType\": 1\n }]),\n \"role\": {\n \"type\": 1,\n \"name\": name,\n \"role_id\": role_id()\n }\n })\n submit_re = session.post(url=\"https://docs.qq.com/form/collect/submit\", headers=header, data=data, cookies=cooki)\n if submit_re.json()[\"code\"] == 10001:\n print(submit_re.text, \"\\n打卡失败\")\n elif submit_re.json()[\"code\"] == 18:\n print(submit_re.text, \"\\n已打卡\")\n elif submit_re.json()[\"code\"] == 0:\n print(submit_re.text, \"\\n打卡成功\")\n else:\n print(\"global_padid可能不正确\")\n\nif __name__ == \"__main__\":\n get_submit()\n","sub_path":"daka.py","file_name":"daka.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307808142","text":"# coding: utf-8\n\nimport time\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy\nfrom individu import *\n\n\ndef couleur_rgb_to_matplotlib(couleur):\n return couleur[0] / 255, couleur[1] / 255, couleur[2] / 255\n\n\nclass Stats:\n id_axs = 0\n tk_jour = None\n tk_nb_individus = None\n tk_nb_jours_par_min = None\n\n def __init__(self, carte: Carte):\n self.jour = 0\n\n self.nb_individus = numpy.array([0])\n self.nb_individus_naissance = numpy.array([0])\n self.nb_individus_peut_se_reproduire = numpy.array([0])\n self.nb_individus_gestants = numpy.array([0])\n self.nb_morts = numpy.array([0])\n self.time = 0\n\n self.dic_individus_new = []\n self.dic_individus_gestant = []\n self.dic_individus_peut_se_reproduire = []\n self.dic_individus_autre = []\n\n self.dic_id_figs = {}\n\n self.tk_jour.set(0)\n self.tk_nb_individus.set(0)\n self.tk_nb_jours_par_min.set(0.)\n\n self.dic_figures_enregistrement = {}\n\n self.carte = carte\n\n def nouvelle_journee(self, liste_individus: list):\n self.dic_individus_new = []\n self.dic_individus_gestant = []\n self.dic_individus_peut_se_reproduire = []\n self.dic_individus_autre = []\n\n nb_individus = 0\n nb_individus_naissance = 0\n nb_individus_peut_se_reproduire = 0\n nb_individus_gestants = 0\n\n for individu in liste_individus:\n nb_individus += 1\n dic_caractere = copy.deepcopy(individu.dic_variables)\n # LISTE_CARACTERES_INDIVIDU_SECONDAIRES :\n dic_caractere[AGE] = individu.age\n dic_caractere[ENERGIE_DEPENSEE] = individu.energie_depense\n dic_caractere[ENERGIE_INIT] = individu.energie_init\n if individu.age == 0:\n nb_individus_naissance += 1\n self.dic_individus_new.append(dic_caractere)\n elif individu.peut_se_reproduire:\n nb_individus_peut_se_reproduire += 1\n self.dic_individus_peut_se_reproduire.append(dic_caractere)\n elif individu.compte_a_rebour_new_individu > 0:\n nb_individus_gestants += 1\n self.dic_individus_gestant.append(dic_caractere)\n else:\n self.dic_individus_autre.append(dic_caractere)\n\n self.nb_individus = numpy.append(self.nb_individus, [nb_individus])\n self.nb_individus_naissance = numpy.append(self.nb_individus_naissance, [nb_individus_naissance])\n self.nb_individus_peut_se_reproduire = numpy.append(self.nb_individus_peut_se_reproduire,\n [nb_individus_peut_se_reproduire])\n self.nb_individus_gestants = numpy.append(self.nb_individus_gestants, [nb_individus_gestants])\n if self.jour == 0:\n nb_morts = 0\n else:\n nb_morts = self.nb_individus[self.jour] - nb_individus + nb_individus_naissance\n self.nb_morts = numpy.append(self.nb_morts, [nb_morts])\n\n self.jour += 1\n self.tk_jour.set(self.jour)\n self.tk_nb_individus.set(nb_individus)\n if time.time() == self.time:\n self.tk_nb_jours_par_min.set(math.inf)\n else:\n self.tk_nb_jours_par_min.set(round(60 / (time.time() - self.time), 1))\n self.time = time.time()\n for id_fig in self.dic_id_figs:\n self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_NEW_AFFICHAGE] = True\n\n try:\n self.update_enregistrements()\n except:\n print(\"Oups !\")\n\n def new_enregistrement(self, id_fig, tk_var_nb_jours, nom_dossier, periode, type_resolution, afficher_miniature,\n taille_miniature, coef_temperature_miniature):\n plt.ioff()\n id_fig2 = self.init_graph_matplotlib(self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_CARACTERES_3D],\n self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_VITESSE_ROTATION],\n self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_HAUTEUR_Z_AXIS])\n if not afficher_miniature:\n taille_miniature = 0\n coef_temperature_miniature = 0\n self.dic_figures_enregistrement[id_fig2] = {PARAM_MATPLOTLIB_TK_VAR_NB_JOURS: tk_var_nb_jours,\n PARAM_MATPLOTLIB_NOM_DOSSIER: nom_dossier,\n PARAM_MATPLOTLIB_PERIODE_IMAGE: [0, periode],\n PARAM_TAILLE_MINIATURE: taille_miniature,\n PARAM_COEF_TEMPERATURE_MINIATURE: coef_temperature_miniature}\n size = DIC_SIZE_ENREGISTREMENT[type_resolution][PARAM_ENR_SIZE]\n if size is None:\n size = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].get_size_inches()\n self.dic_id_figs[id_fig2][PARAM_MATPLOTLIB_FIG].set_size_inches(*size)\n return id_fig2\n\n def update_enregistrements(self):\n for id_fig, dic_fig in self.dic_figures_enregistrement.items():\n dic_fig[PARAM_MATPLOTLIB_PERIODE_IMAGE][0] -= 1\n if dic_fig[PARAM_MATPLOTLIB_PERIODE_IMAGE][0] <= 0:\n dic_fig[PARAM_MATPLOTLIB_PERIODE_IMAGE][0] = dic_fig[PARAM_MATPLOTLIB_PERIODE_IMAGE][1]\n num_jour = dic_fig[PARAM_MATPLOTLIB_TK_VAR_NB_JOURS].get() + 1\n self.update_graph_matplotlib(id_fig, True)\n self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].savefig(\n f\"{dic_fig[PARAM_MATPLOTLIB_NOM_DOSSIER]}/{num_jour}.png\")\n if not dic_fig[PARAM_TAILLE_MINIATURE] == 0:\n self.carte.affiche_miniature(f\"{dic_fig[PARAM_MATPLOTLIB_NOM_DOSSIER]}/{num_jour}.png\",\n dic_fig[PARAM_TAILLE_MINIATURE],\n dic_fig[PARAM_COEF_TEMPERATURE_MINIATURE])\n dic_fig[PARAM_MATPLOTLIB_TK_VAR_NB_JOURS].set(num_jour)\n\n def init_graph_matplotlib(self, caractere_3D, vitesse_rotate, hauteur_z):\n Stats.id_axs += 1\n if caractere_3D is not None:\n self.init_graph_matplotlib_caracteres_population(self.id_axs, caractere_3D, vitesse_rotate, hauteur_z)\n else:\n self.init_graph_matplotlib_taille_population(self.id_axs)\n return self.id_axs\n\n def init_graph_matplotlib_taille_population(self, id_fig):\n fig = plt.figure(id_fig)\n ax = fig.add_subplot()\n fig.suptitle(\"Evolution de la population\",\n fontsize=fig.get_size_inches()[1] * MATPLOTLIB_COEF_HEIGHT_FONT_SIZE_TITLE)\n self.dic_id_figs[id_fig] = {PARAM_MATPLOTLIB_FIG: fig,\n PARAM_MATPLOTLIB_AX: ax,\n PARAM_MATPLOTLIB_NEW_AFFICHAGE: True,\n PARAM_MATPLOTLIB_CARACTERES_3D: None,\n PARAM_MATPLOTLIB_VITESSE_ROTATION: None,\n PARAM_MATPLOTLIB_HAUTEUR_Z_AXIS: None}\n\n def init_graph_matplotlib_caracteres_population(self, id_fig, caractere_3D, vitesse_rotate, hauteur_z):\n fig = plt.figure(id_fig)\n ax = Axes3D(fig)\n if not vitesse_rotate == 0:\n ax.view_init(elev=hauteur_z, azim=vitesse_rotate * self.jour % int(360 / vitesse_rotate))\n else:\n ax.view_init(elev=hauteur_z)\n self.dic_id_figs[id_fig] = {PARAM_MATPLOTLIB_FIG: fig,\n PARAM_MATPLOTLIB_AX: ax,\n PARAM_MATPLOTLIB_NEW_AFFICHAGE: True,\n PARAM_MATPLOTLIB_CARACTERES_3D: caractere_3D,\n PARAM_MATPLOTLIB_VITESSE_ROTATION: vitesse_rotate,\n PARAM_MATPLOTLIB_HAUTEUR_Z_AXIS: hauteur_z}\n\n def update_graph_matplotlib(self, id_fig, update_if_new_affichage_is_false=False):\n if update_if_new_affichage_is_false or self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_NEW_AFFICHAGE]:\n self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_NEW_AFFICHAGE] = False\n if self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_CARACTERES_3D] is not None:\n self.update_graph_caracteres_population(id_fig)\n else:\n self.update_graph_taille_population(id_fig)\n return True\n return False\n\n def update_graph_taille_population(self, id_fig):\n fig = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG]\n fig.suptitle(MATPLOTLIB_EVOLUTION_POPULATION_TITLE,\n fontsize=fig.get_size_inches()[1] * MATPLOTLIB_COEF_HEIGHT_FONT_SIZE_TITLE)\n ax = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_AX]\n fontsize = (self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].get_size_inches()[1] *\n MATPLOTLIB_COEF_HEIGHT_FONT_SIZE_LEGENDE)\n labelpad = (self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].get_size_inches()[1] *\n MATPLOTLIB_COEF_HEIGHT_FONT_DIST_AXES_LABELS)\n ax.clear()\n x = numpy.append(numpy.append([0], numpy.arange(0, self.jour)), [self.jour - 1])\n y1 = self.nb_individus\n y2 = y1 - self.nb_individus_naissance\n y3 = y2 - self.nb_individus_gestants\n y4 = y3 - self.nb_individus_peut_se_reproduire\n y5 = - self.nb_morts\n for y, color in [(y1, COULEUR_INDIVIDU_NAISSANCE),\n (y2, COULEUR_INDIVIDU_GESTANT),\n (y3, COULEUR_INDIVIDU_PEUT_SE_REPRODUIRE),\n (y4, COULEUR_INDIVIDU_NORMAL),\n (y5, COULEUR_INDIVIDU_MORT)]:\n ax.fill(x, numpy.append(y, [0]), color=couleur_rgb_to_matplotlib(color))\n\n ax.set_xlabel(MATPLOTLIB_LEGENDE_AXES_EVOLUTION_POPULATION[0], fontsize=fontsize, labelpad=labelpad)\n ax.set_ylabel(MATPLOTLIB_LEGENDE_AXES_EVOLUTION_POPULATION[1], fontsize=fontsize, labelpad=labelpad)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize)\n ax.legend(MATPLOTLIB_LEGENDE_EVOLUTION_POPULATION_TITLE, loc=MATPLOTLIB_POSITION_LEGENDE_EVOLUTION_POPULATION,\n fontsize=fontsize)\n ax.grid()\n\n def update_graph_caracteres_population(self, id_fig):\n fig = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG]\n fig.suptitle(MATPLOTLIB_GRAPH_3D_TITLE,\n fontsize=fig.get_size_inches()[1] * MATPLOTLIB_COEF_HEIGHT_FONT_SIZE_TITLE)\n caractere_3D = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_CARACTERES_3D]\n ax = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_AX]\n fontsize = (self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].get_size_inches()[1] *\n MATPLOTLIB_COEF_HEIGHT_FONT_SIZE_LEGENDE)\n pointsize = (self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].get_size_inches()[1] *\n MATPLOTLIB_COEF_HEIGHT_FONT_SIZE_POINTS_3D)\n labelpad = (self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].get_size_inches()[1] *\n MATPLOTLIB_COEF_HEIGHT_FONT_DIST_AXES_LABELS)\n ax.clear()\n vitesse_rotate = self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_VITESSE_ROTATION]\n if not vitesse_rotate == 0:\n ax.view_init(elev=self.dic_id_figs[id_fig][PARAM_MATPLOTLIB_HAUTEUR_Z_AXIS],\n azim=vitesse_rotate * self.jour % int(360 / vitesse_rotate))\n for liste_dics_individus, color in [(self.dic_individus_autre, COULEUR_INDIVIDU_NORMAL),\n (self.dic_individus_new, COULEUR_INDIVIDU_NAISSANCE),\n (self.dic_individus_peut_se_reproduire,\n COULEUR_INDIVIDU_PEUT_SE_REPRODUIRE),\n (self.dic_individus_gestant, COULEUR_INDIVIDU_GESTANT)]:\n x = []\n y = []\n z = []\n for individu in liste_dics_individus:\n x.append(individu[caractere_3D[0]])\n y.append(individu[caractere_3D[1]])\n z.append(individu[caractere_3D[2]])\n\n ax.scatter(x, y, z, color=couleur_rgb_to_matplotlib(color), marker=\"o\", alpha=0.8, s=pointsize)\n\n ax.set_xlabel(DIC_CARACTERES_INDIVIDU[caractere_3D[0]][PARAM_LABEL], fontsize=fontsize, labelpad=labelpad)\n ax.set_ylabel(DIC_CARACTERES_INDIVIDU[caractere_3D[1]][PARAM_LABEL], fontsize=fontsize, labelpad=labelpad)\n ax.set_zlabel(DIC_CARACTERES_INDIVIDU[caractere_3D[2]][PARAM_LABEL], fontsize=fontsize, labelpad=labelpad)\n\n ax.set_xlim3d(Individu.dic_caracteres_individus[caractere_3D[0]][PARAM_MIN_VALUE],\n Individu.dic_caracteres_individus[caractere_3D[0]][PARAM_MAX_VALUE])\n ax.set_ylim3d(Individu.dic_caracteres_individus[caractere_3D[1]][PARAM_MIN_VALUE],\n Individu.dic_caracteres_individus[caractere_3D[1]][PARAM_MAX_VALUE])\n ax.set_zlim3d(Individu.dic_caracteres_individus[caractere_3D[2]][PARAM_MIN_VALUE],\n Individu.dic_caracteres_individus[caractere_3D[2]][PARAM_MAX_VALUE])\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize)\n for tick in ax.zaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize)\n ax.legend(MATPLOTLIB_LEGENDE_GRAPH_3D, loc=MATPLOTLIB_POSITION_LEGENDE_GRAPH_3D, fontsize=fontsize)\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":13705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294882171","text":"# coding: cp949\nfrom pico2d import *\nimport time\n\nimport Scene_NormalStage\nimport Scene_BossStage\n\nimport Manager_Collision\nimport Manager_Sound\n\nimport Object\nimport Object_Bubble\nimport Object_Item\n\nclass Player(Object.GameObject):\n def __init__(self, _x, _y, _type, _ambul = 0, _dart = 0, _pin = 0, _banana = 0):\n #플레이어 위치 및 이미지와 충돌 체크용 사이즈\n self.X, self.Y = _x, _y\n self.sizeX, self.sizeY = 38, 38\n self.image_size = 70\n #플레이어의 속성\n self.type = _type #노말은0, 보스면1\n self.dir = None\n self.speed, self.power, self.bubbleCount = 3, 1, 1\n self.isBushCheck, self.isSlidingPlayer = False, False\n self.birth, self.birthCount = None, 0\n #이미지(BIRTH STAND WALK BUBBLE BUBBLE_WALK DEAD)\n self.player_image = None\n self.player_state = 'STATE_BIRTH'\n self.frame, self.frameMax, self.frameScene, self.frameTime = None, None, None, 0\n #아이템 갯수변수\n self.bananaCount = _banana\n self.dartCount = _dart\n self.pinCount = _pin\n self.ambulanceCount = _ambul\n\n def __del__(self):\n self.exit()\n\n def enter(self):\n # 이미지 사용용도의 변수\n self.player_image = load_image('.\\\\Sprite\\\\03.InGame\\\\Character\\\\Character_Player.png')\n self.player_state = 'STATE_BIRTH'\n self.frame = 0\n self.frameMax = 4\n self.frameScene = 5\n self.dir = 1 #0위,1아래,2오른쪽,3왼쪽\n self.birth = 0 #0삼,1물풍선상태,2죽는과정,3죽음\n self.frameTime = time.time()\n\n def exit(self):\n del (self.player_image)\n\n def update(self, _frametime, _events):\n if self.birth < 3:\n #쿨타임 함수 추가\n self.itemMaxCheck()\n\n #바나나있으면 바나나부터 체크하고 else로 keycheck\n if self.isSlidingPlayer:\n if self.dir == 0:\n self.Y += 6 * _frametime * 50\n elif self.dir == 1:\n self.Y -= 6 * _frametime * 50\n elif self.dir == 2:\n self.X += 6 * _frametime * 50\n elif self.dir == 3:\n self.X -= 6 * _frametime * 50\n else:\n self.keycheck(_events)\n\n #충돌체크(스페셜타일 + 벽)\n self.collisionSpecialTile()\n self.collisionWall()\n #현재상태에 대한 애니메이션 부분\n self.frame_move(_frametime, self.player_state)\n\n # 플레이어 죽음\n elif self.birth == 3:\n return False\n\n def draw(self):\n if self.isBushCheck == True:\n pass\n #프레임이 시작하는 그림에서의 X좌표, Y좌표(Y좌표는 아래서부터 1) => 왼쪽 아래부터 오른쪽 위까지 하나를 그림\n else:\n self.player_image.clip_draw((self.frame * self.image_size), 560 - ((self.frameScene + 1) * self.image_size),\n self.image_size, self.image_size,\n self.X, self.Y + 13)#마지막에 플레이어 위치 보정값\n\n def keycheck(self, _events):\n for event in _events:\n if event.type == SDL_KEYDOWN:\n #방향키\n if event.key == SDLK_UP:\n if self.birth == 0:\n self.dir = 0\n self.frame, self.frameMax = 0, 4\n self.player_state = 'STATE_WALK'\n elif self.birth == 1:\n self.dir = 0\n self.player_state = 'STATE_BUBBLE_WALK'\n elif event.key == SDLK_DOWN:\n if self.birth == 0:\n self.dir = 1\n self.frame, self.frameMax = 0, 4\n self.player_state = 'STATE_WALK'\n elif self.birth == 1:\n self.dir = 1\n self.player_state = 'STATE_BUBBLE_WALK'\n if event.key == SDLK_RIGHT:\n if self.birth == 0:\n self.dir = 2\n self.frame, self.frameMax = 0, 3\n self.player_state = 'STATE_WALK'\n elif self.birth == 1:\n self.dir = 2\n self.player_state = 'STATE_BUBBLE_WALK'\n elif event.key == SDLK_LEFT:\n if self.birth == 0:\n self.dir = 3\n self.frame, self.frameMax = 0, 3\n self.player_state = 'STATE_WALK'\n elif self.birth == 1:\n self.dir = 3\n self.player_state = 'STATE_BUBBLE_WALK'\n #물풍선\n if event.key == SDLK_SPACE:\n #각 맵의 오브젝트 매니저에 넣기\n if (self.type == 0) and (self.birth == 0):\n if (len(Scene_NormalStage.gObjList[5]) < self.bubbleCount):\n indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)\n posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)\n isCheck = False\n for i in Scene_NormalStage.gObjList[5]:\n if (Manager_Collision.collisionMiniIntersectRect(i, self)):\n isCheck = True\n break\n if isCheck == False:\n tempBubble = Object_Bubble.Bubble(posX, posY, self.type, self.power)\n tempBubble.enter()\n Scene_NormalStage.gObjList[5].append(tempBubble)\n elif (self.type == 1) and (self.birth == 0):\n if (len(Scene_BossStage.gObjList[5]) < self.bubbleCount):\n indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)\n posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)\n isCheck = False\n for i in Scene_BossStage.gObjList[5]:\n if (Manager_Collision.collisionMiniIntersectRect(i, self)):\n isCheck = True\n break\n if isCheck == False:\n tempBubble = Object_Bubble.Bubble(posX, posY, self.type, self.power)\n tempBubble.enter()\n Scene_BossStage.gObjList[5].append(tempBubble)\n #아이템사용\n if event.key == SDLK_q:\n if (self.ambulanceCount > 0) and (self.birth == 1):\n self.ambulanceCount -= 1\n self.birth = 0\n self.birthCount = 0\n self.player_state = 'STATE_BIRTH'\n self.frameScene = 5\n self.frame = 0\n Manager_Sound.PlayEffectSound('CHAR_REVIVAL')\n \n if event.key == SDLK_w:\n if (self.dartCount > 0) and (self.birth == 0):\n self.dartCount -= 1\n else: return\n #다트생성\n indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)\n posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)\n tempDart = Object_Item.Item(posX, posY, self.type, 8, self.dir)\n tempDart.enter()\n Manager_Sound.PlayEffectSound('ITEM_DART')\n if self.type == 0:\n Scene_NormalStage.gObjList[4].append(tempDart)\n elif self.type == 1:\n Scene_BossStage.gObjList[4].append(tempDart)\n if event.key == SDLK_e:\n if (self.pinCount > 0) and (self.birth == 0):\n self.pinCount -= 1\n if event.key == SDLK_r:\n if (self.bananaCount > 0) and (self.birth == 0):\n self.bananaCount -= 1\n else: return\n #바나나생성\n indexX, indexY = (int)((self.X - 20) / 40), (int)((560 - self.Y) / 40)\n posX, posY = 40 + (indexX * 40), (600 - 60) - (40 * indexY)\n tempBanana = Object_Item.Item(posX, posY, self.type, 7, self.dir)\n tempBanana.enter()\n Manager_Sound.PlayEffectSound('ITEM_ON')\n if self.type == 0:\n Scene_NormalStage.gObjList[4].append(tempBanana)\n elif self.type == 1:\n Scene_BossStage.gObjList[4].append(tempBanana)\n #치트키(모든 아이템 최대 + 속성 최대)\n if event.key == SDLK_RETURN:\n if self.birth == 0:\n self.speed, self.bubbleCount, self.power = 5, 5, 5\n self.bananaCount, self.ambulanceCount, self.pinCount, self.dartCount = 9, 9, 9, 9\n self.birth = 1\n self.frame = 0\n self.player_state = 'STATE_BUBBLE'\n Manager_Sound.PlayEffectSound('CHAR_FIXED')\n elif event.type == SDL_KEYUP:\n #방향키에서 손을 때는 경우\n if self.player_state == 'STATE_WALK':\n if (event.key == SDLK_UP and self.dir == 0) or (event.key == SDLK_DOWN and self.dir == 1) \\\n or (event.key == SDLK_RIGHT and self.dir == 2) or (event.key == SDLK_LEFT and self.dir == 3):\n self.player_state = 'STATE_STAND'\n elif self.player_state == 'STATE_BUBBLE_WALK':\n if (event.key == SDLK_UP and self.dir == 0) or (event.key == SDLK_DOWN and self.dir == 1) \\\n or (event.key == SDLK_RIGHT and self.dir == 2) or (event.key == SDLK_LEFT and self.dir == 3):\n self.player_state = 'STATE_BUBBLE'\n\n def frame_move(self, _frametime, _player_state):\n #처음 태어날 경우 태어나는 애니매이션 유지\n if ((self.player_state == 'STATE_BIRTH') and (self.frameTime + 1 < time.time())):\n self.frameTime = time.time()\n _player_state = 'STATE_STAND'\n elif (self.player_state != _player_state):\n self.frame = 0\n self.player_state = _player_state\n\n #상태값 변화\n if self.player_state == 'STATE_BIRTH':\n if self.frameTime + 0.25 < time.time():\n self.frameTime = time.time()\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n self.player_state = 'STATE_STAND'\n self.frameScene = self.dir\n\n elif self.player_state == 'STATE_STAND':\n self.frame = 0\n self.frameScene = self.dir\n\n elif self.player_state == 'STATE_WALK':\n if self.frameScene != self.dir:\n self.frameScene = self.dir\n\n if self.frameTime + 0.2 < time.time():\n self.frameTime = time.time()\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n #움직임 추가\n if self.dir == 0:\n self.Y += self.speed * _frametime * 50\n elif self.dir == 1:\n self.Y -= self.speed * _frametime * 50\n elif self.dir == 2:\n self.X += self.speed * _frametime * 50\n elif self.dir == 3:\n self.X -= self.speed * _frametime * 50\n\n elif self.player_state == 'STATE_BUBBLE':\n if self.frameScene != 4:\n self.frameScene = 4\n\n if self.frameMax != 3:\n self.frameMax = 3\n\n if self.frameTime + 0.5 < time.time():\n self.frameTime = time.time()\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n self.birthCount += 1\n if self.birthCount != 3:\n Manager_Sound.PlayEffectSound('CHAR_FIXED')\n if self.birthCount > 2:\n self.birthCount = 0\n self.birth = 2\n self.player_state = 'STATE_DEAD'\n self.frameScene = 6\n self.frameMax = 6\n Manager_Sound.PlayEffectSound('CHAR_DIE')\n\n elif self.player_state == 'STATE_BUBBLE_WALK':\n if self.frameScene != 4:\n self.frameScene = 4\n\n if self.frameTime + 0.5 < time.time():\n \n self.frameTime = time.time()\n self.frame += 1\n if self.frame > self.frameMax:\n self.frame = 0\n self.birthCount += 1\n if self.birthCount != 3:\n Manager_Sound.PlayEffectSound('CHAR_FIXED')\n if self.birthCount > 2:\n self.birthCount = 0\n self.birth = 2\n self.player_state = 'STATE_DEAD'\n self.frameScene = 6\n self.frameMax = 6\n Manager_Sound.PlayEffectSound('CHAR_DIE')\n #움직임 추가\n if self.dir == 0:\n self.Y += 1 * _frametime * 50\n elif self.dir == 1:\n self.Y -= 1 * _frametime * 50\n elif self.dir == 2:\n self.X += 1 * _frametime * 50\n elif self.dir == 3:\n self.X -= 1 * _frametime * 50\n\n elif self.player_state == 'STATE_DEAD':\n if self.frameScene != 6:\n self.frameScene = 6\n\n if self.frameTime + 0.2 < time.time():\n self.frameTime = time.time()\n self.frame += 1\n if self.frame > self.frameMax:\n self.birth = 3\n\n def itemMaxCheck(self):\n if self.speed > 6:\n self.speed = 6\n if self.bubbleCount > 4:\n self.bubbleCount = 4\n if self.power > 4:\n self.power = 4\n\n def collisionWall(self):\n if self.X < 40:\n self.X = 40\n self.isSlidingPlayer = False\n return\n elif self.X > 600:\n self.X = 600\n self.isSlidingPlayer = False\n return\n if self.Y < 60:\n self.Y = 60\n self.isSlidingPlayer = False\n return\n elif self.Y > 540:\n self.Y = 540\n self.isSlidingPlayer = False\n return\n\n def collisionSpecialTile(self):\n self.isBushCheck = False\n if self.type == 0:\n for i in Scene_NormalStage.gObjList[3]:\n if i.breakingOption == 2:\n i.isPlayerCollision = False\n isCollision, left, top, right, bottom = Manager_Collision.collisionIntersectRect(self, i)\n if (isCollision == True) and (i.breakingOption != 2):\n Manager_Collision.collisionAABB(self, i, left, top, right, bottom)\n self.isSlidingPlayer = False\n elif (isCollision == True) and (i.breakingOption == 2):\n self.isBushCheck = True\n i.isPlayerCollision = True\n elif self.type == 1:\n for i in Scene_BossStage.gObjList[3]:\n isCollision, left, top, right, bottom = Manager_Collision.collisionIntersectRect(self, i)\n if (isCollision == True) and (i.breakingOption != 2):\n Manager_Collision.collisionAABB(self, i, left, top, right, bottom)\n self.isSlidingPlayer = False","sub_path":"CrazyArcade_Packaging/Object_Player.py","file_name":"Object_Player.py","file_ext":"py","file_size_in_byte":16248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417162751","text":"\n\n# import packages\nfrom urllib.request import urlopen, Request\n# Specify the url\nurl = \"http://www.google.com\"\n# this packages the request: request\nrequest = Request(url)\n# this packages the request and catches the response\nresponse = urlopen(request)\n# print the datatype of response\nprint(type(response))\n# Be polite and close the response!\nresponse.close()\n\n\"\"\"\nParsing HTML with BeautifulSoup\n\"\"\"\n\n# Import packages\nimport requests\nfrom bs4 import BeautifulSoup\n\n# specify url: url\nurl = 'https://www.python.org/~guido/'\n# package the request, send the request and cath the response: r\nr = requests.get(url)\n# extracts the response as html: html_doc\nhtml_doc = r.text\n# create a beatifulsoup object from the html: soup\nsoup = BeautifulSoup(html_doc)\n# prettify the beautiful soup object: pretty_soup\npretty_soup = soup.prettify()\n# print the response\nprint(pretty_soup)\n\n# Import packages\nimport requests\nfrom bs4 import BeautifulSoup\n\n# Specify url: url\nurl = 'https://www.python.org/~guido/'\n\n# Package the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Extract the response as html: html_doc\nhtml_doc = r.text\n\n# Create a BeautifulSoup object from the HTML: soup\nsoup = BeautifulSoup(html_doc)\n\n# Get the title of Guido's webpage: guido_title\nguido_title = soup.title\n\n# Print the title of Guido's webpage to the shell\nprint(guido_title)\n\n# Get Guido's text: guido_text\nguido_text = soup.text\n\n# Print Guido's text to the shell\nprint(guido_text)\n\n\n# Import packages\nimport requests\nfrom bs4 import BeautifulSoup\n\n# Specify url\nurl = 'https://www.python.org/~guido/'\n\n# Package the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Extracts the response as html: html_doc\nhtml_doc = r.text\n\n# create a BeautifulSoup object from the HTML: soup\nsoup = BeautifulSoup(html_doc, features=\"html.parser\")\n\n# Print the title of Guido's webpage\nprint(soup.title)\n\n# Find all 'a' tags (which define hyperlinks): a_tags\na_tags = soup.find_all('a')\n\n# Print the URLs to the shell\nfor link in a_tags:\n print(link.get('href'))\n\"\"\"\nAPI requests\n\"\"\"\n\n# Import requests package\nimport requests\n\n# Assign URL to variable: url\nurl = 'http://www.omdbapi.com/?apikey=ff21610b&t=the+social+network'\n\n# Package the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Print the text of the response\nprint(r.text)\n\"\"\"\nJSON from the Web to Python\n\"\"\"\n\n# Import package\nimport requests\n\n# Assign URL to variable: url\nurl = 'http://www.omdbapi.com/?apikey=ff21610b&t=social+network'\n\n# Package the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Decode the JSON data into a dictionary: json_data\njson_data = r.json()\n\n# Print each key-value pair in json_data\nfor k in json_data.keys():\n print(k + ': ', json_data[k])\n\n\"\"\"\nChecking out the wikipedia api\n\"\"\"\n\n# Import package\nimport requests\n\n# Assign URL to variable: url\nurl = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&exintro=&titles=pizza'\n\n# Package the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Decode the JSON data into a dictionary: json_data\njson_data = r.json()\n\n# Print the Wikipedia page extract\npizza_extract = json_data['query']['pages']['24768']['extract']\nprint(pizza_extract)\n","sub_path":"importing_data_from_the_internet.py","file_name":"importing_data_from_the_internet.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432244589","text":"import datetime\nimport random\nimport copy\nimport math\n\nfrom seqscout.utils import read_data, read_data_kosarak, \\\n is_subsequence, sequence_mutable_to_immutable, print_results, \\\n read_data_sc2, k_length, generate_bitset, following_ones, \\\n get_support_from_vector, compute_first_zero_mask, compute_last_ones_mask, \\\n count_target_class_data, compute_quality, compute_quality_vertical, create_s_extension, create_i_extension, extract_items, \\\n reduce_k_length, extract_l_max\n\nfrom seqscout.priorityset import PrioritySet\n\ndef compute_children(sequence, items, enable_i=True):\n \"\"\"\n :param enable_i: enable i_extensions or not. Useful when sequences are singletons like DNA\n :return: the set of sequences that we can generate from the current one\n NB: We convert to mutable/immutable object in order to have a set of subsequences,\n which automatically removes duplicates\n \"\"\"\n new_subsequences = set()\n\n for item in items:\n for index, itemset in enumerate(sequence):\n new_subsequences.add(\n create_s_extension(sequence, item, index)\n )\n\n if enable_i:\n pseudo_i_extension = create_i_extension(sequence, item,\n index)\n\n length_i_ext = sum([len(i) for i in pseudo_i_extension])\n len_subsequence = sum([len(i) for i in sequence])\n\n # we prevent the case where we add an existing element to itemset\n if (length_i_ext > len_subsequence):\n new_subsequences.add(pseudo_i_extension)\n\n new_subsequences.add(\n create_s_extension(sequence, item, len(sequence)))\n\n return new_subsequences\n\n\ndef items_to_sequences(items):\n sequences = []\n for item in items:\n sequences.append((frozenset([item]),))\n\n return sequences\n\n\ndef display_info(stage, compute_count, sorted_patterns, begin, data, top_k):\n print(\"The algorithm is at stage {} and did {}\".format(stage, compute_count))\n print(\"The algorithm took :{}\".format(datetime.datetime.utcnow() - begin))\n print(\"We print the best patterns\")\n patterns = sorted_patterns.get_top_k_non_redundant(data, top_k)\n print_results(patterns)\n\ndef exhaustive(DATA, enable_i=True):\n begin = datetime.datetime.utcnow()\n\n items = extract_items(DATA)\n # we remove first element wich are useless\n for i in range(len(DATA)):\n DATA[i] = DATA[i][1:]\n\n l_max = extract_l_max(DATA)\n fifo = [[]]\n\n # to know if elements have already been added\n fifo_elements = set()\n\n\n stage = 0\n compute_count = 0\n\n while len(fifo) != 0:\n seed = fifo.pop(0)\n children = compute_children(seed, items, enable_i)\n\n if k_length(seed) > stage:\n stage = k_length(seed)\n\n for child in children:\n # we do not explore elements with a null support\n if k_length(child) <= l_max and child not in fifo_elements:\n fifo.append(child)\n fifo_elements.add(child)\n\n compute_count += len(children)\n\n print(\"The algorithm took:{}\".format(datetime.datetime.utcnow() - begin))\n # we add the root\n print('The size is: {}'.format(len(fifo_elements)+1))\n\n return fifo_elements\n\ndef launch():\n # DATA = read_data_sc2('../data/sequences-TZ-45.txt')[:5000]\n # DATA = reduce_k_length(10, DATA)\n\n DATA = read_data_kosarak('../data/easy.data')\n results = exhaustive(DATA)\n\n\nif __name__ == '__main__':\n launch()\n","sub_path":"competitors/exhaustive_size.py","file_name":"exhaustive_size.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141516820","text":"from __future__ import unicode_literals\nfrom django.http import *\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.decorators import user_passes_test\n\nfrom author.models import *\nfrom pratilipi_user.models import *\nfrom cms import lib\n\nimport datetime\nimport json\nimport requests\n\n@login_required\ndef search_author(request):\n t = get_template('search_author.html')\n html = t.render()\n response = HttpResponse(html, content_type=\"text/html\")\n\n # i/p param\n qry_param = request.GET\n email_id = qry_param.get('emailid', None)\n slug = qry_param.get('slug', None)\n\n # validate\n if (email_id is None or len(email_id) == 0) and (slug is None or len(slug) == 0):\n response.write(\"Error: Invalid email id or slug\")\n return response\n\n if email_id is not None and len(email_id) > 0:\n identifier = Identifier.objects.filter(value=email_id)\n if len(identifier) == 0:\n response.write(\"Msg: User not found\")\n return response\n author = Author.objects.filter(user_id=identifier[0].user_id, state='ACTIVE')\n elif slug is not None and len(slug) > 0:\n author = Author.objects.filter(slug=slug)\n if len(author) == 0:\n response.write(\"Msg: Author not found\")\n return response\n identifier = Identifier.objects.filter(user_id=author[0].user_id)\n email_id = identifier[0].value\n\n url = 'https://{}.pratilipi.com/user/{}'.format(author[0].language.lower(), author[0].slug)\n response.write('EmailId: {}
'.format(email_id))\n response.write('AuthorId: {}
'.format(author[0].id))\n response.write('UserId: {}
'.format(author[0].user_id))\n response.write('Link: {}
'.format(url, url))\n return response\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser)\ndef delete_comment(request):\n t = get_template('delete_comment.html')\n html = t.render()\n response = HttpResponse(html, content_type=\"text/html\")\n\n # i/p param\n qry_param = request.GET\n comment_id = qry_param.get('commentid', None)\n\n # validate\n if comment_id is None or len(comment_id) == 0:\n response.write(\"Error: Invalid comment id\")\n return response\n rc = lib.delete_comment(comment_id)\n r = 'Deleted {} comment'.format(rc)\n response.write(r)\n return response\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser)\ndef delete_review(request):\n t = get_template('delete_review.html')\n html = t.render()\n response = HttpResponse(html, content_type=\"text/html\")\n\n # i/p param\n qry_param = request.GET\n user_id = qry_param.get('userid', None)\n pratilipi_id = qry_param.get('pratilipiid', None)\n\n # validate\n if user_id is None or len(user_id) != 16:\n response.write(\"Error: Invalid user id\")\n return response\n if pratilipi_id is None or len(pratilipi_id) != 16:\n response.write(\"Error: Invalid pratilipi id\")\n return response\n\n rc = lib.delete_review_based_on_user_pratilipi(user_id, pratilipi_id)\n r = 'Deleted {} review'.format(rc)\n response.write(r)\n return response\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser)\ndef block_user(request):\n t = get_template('block_user.html')\n html = t.render()\n response = HttpResponse(html, content_type=\"text/html\")\n\n # i/p param\n qry_param = request.GET\n user_id = qry_param.get('userid', None)\n\n # validate\n if user_id is None or len(user_id) != 16:\n response.write(\"Error: Invalid user id\")\n return response\n\n rc = lib.block_user(user_id)\n r = 'Blocked {} user'.format(rc)\n response.write(r)\n return response\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser)\ndef merge_profiles(request):\n t = get_template('merge_profiles.html')\n html = t.render()\n response = HttpResponse(html, content_type=\"text/html\")\n\n # i/p param\n qry_param = request.GET\n from_author_slug = qry_param.get('from_author_slug', None)\n to_author_slug = qry_param.get('to_author_slug', None)\n\n # validate\n if from_author_slug is None or len(from_author_slug) != 10:\n response.write(\"Error: Invalid from author slug\")\n return response\n if to_author_slug is None or len(to_author_slug) != 10:\n response.write(\"Error: Invalid to author slug\")\n return response\n\n msg = lib.merge_profiles(from_author_slug, to_author_slug)\n r = 'Merge Status : \\n{}'.format(msg)\n response.write(r)\n return response\n\n@login_required\n@user_passes_test(lambda u: u.is_superuser)\ndef delete_user(request):\n t = get_template('delete_user.html')\n html = t.render()\n response = HttpResponse(html, content_type=\"text/html\")\n\n # i/p param\n qry_param = request.GET\n user_id = qry_param.get('userid', None)\n\n # validate\n if user_id is None or len(user_id) != 16:\n response.write(\"Error: Invalid user id\")\n return response\n\n r = requests.post( \"https://hindi.pratilipi.com/api/users/v2.0/admins/users/delete\", \n headers={\"AccessToken\":\"71037d7e-04a0-4710-8ba7-c9e928b9c74f\"}, \n data={\"userId\":\"{}\".format(user_id)} )\n print(\"delete api - {}, {}\".format(r.status_code, r.text))\n\n if r.status_code == 200:\n response.write(\"successfully sent event to delete user\")\n else:\n response.write(\"failed to register event to delete user\")\n return response\n\n","sub_path":"cms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336423506","text":"# Moves between lists and dictionary. Loops over list.\nwords = ['color', 'color', 'colour', 'amok', 'amok', 'amuck', 'advisor', 'adviser', 'pepper']\ncanonical_spellings = ['color', 'amuck', 'adviser', 'pepper']\nmappings = {'colour': 'color', 'amok': 'amuck', 'advisor': 'adviser'}\n\n#make an empty list\nnew_list = []\n\n#loop over list of words\nfor word in words:\n if word in words:\n if word in mappings:\n #if a word is mispelled do something\n #correct the spelling using the mapping dictionary\n corrected_word = mappings[word]\n # add corrected_word\n new_list.append(corrected_word)\n\n else:\n new_list.append(word)\nprint(new_list)\n","sub_path":"python/method_writing.py","file_name":"method_writing.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496226644","text":"# -*- coding: utf-8 -*-\n\n# comentário com várias linhas. OBS: tudo o que está dentro é uma string.\n'''\nx = 2\ny = 3\nz = 4\n\nsera = bool (x == y)\n\nif sera:\n\tprint(\"iguais\")\nelse:\n\tprint(\"não foi dessa vez\")\n\n\nsoma = x + y\nprint(x == y and x == soma)\n'''\n\nx = 1\ny = 1000000 \n\nif x > y:\n\tprint(\"x é maior que y\")\nelif y > x:\n\tprint(\"y é maior que x\")\n","sub_path":"begin/operadoresLogicos.py","file_name":"operadoresLogicos.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145901907","text":"import pygame\r\n\r\n# constants\r\nLEFT_BUTTON = 1\r\nWINDOW_SIZE = (640, 480)\r\nBLACK = (0, 0, 0)\r\nBLUE = (0, 0, 255)\r\nRED = (255, 0, 0)\r\n\r\nrunning = True\r\n\r\nwindow = pygame.display.set_mode(WINDOW_SIZE)\r\nwindow.fill(BLACK)\r\n\r\n# This time around we are going to check if the mouse is inside a rectangular\r\n# that has been placed on the game window. To do that we make use og a nice little\r\n# function of the Rect class, called collidepoint().\r\n# To make life easier for us we first create a Rect object and call it the_box.\r\n# Notice that this box is placed not far from the origin(the upper left corner)\r\n# To be exact; 30px right and 30px down. The box is 100px X 100px in size.\r\nthe_box = pygame.Rect(30, 30, 100, 100)\r\n\r\n# now we need to paint the box on the screen and give it some (initial) color\r\n# in the process(here: red)\r\npygame.draw.rect(window, RED, the_box)\r\n\r\nwhile running:\r\n event = pygame.event.poll()\r\n if event.type == pygame.QUIT:\r\n running = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == LEFT_BUTTON:\r\n # When the user clicks on the left mouse button the the mouse coordinates\r\n # are sent to the collidepoint() function of the box(the_box) and the if\r\n # statement handles the result(True or False the mouse in either within the box or not)\r\n # if the mouse is within, the_box gets a paint job and turns blue.\r\n # At this moment everyone gets extremely happy :-)\r\n if the_box.collidepoint(event.pos):\r\n pygame.draw.rect(window, BLUE, the_box)\r\n\r\n pygame.display.flip()\r\n","sub_path":"Python/FORR2HF05CU/Lokaverkefni/Sýniverkefni/02_PyGame/05_Mouse_Interaction_I(1).py","file_name":"05_Mouse_Interaction_I(1).py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401927242","text":"import optuna.integration.lightgbm as lgb_tuner\nimport lightgbm as lgb\nimport time\n\ndef lgbtuner(CFG, fold, params, train, dataset_preprocess_train, dataset_postprocess_train, get_result, LOGGER):\n LOGGER.info(f\"===== parameter tunining =====\")\n start_time = time.time()\n\n # dataset\n x_train, y_train, x_valid, y_valid = dataset_preprocess_train(CFG, train, fold)\n trainData = lgb.Dataset(x_train,y_train)\n validData = lgb.Dataset(x_valid,y_valid)\n \n # train\n model = lgb_tuner.train(params,\n trainData,\n valid_sets = [trainData, validData],\n num_boost_round = CFG.lgbm_num_boost_round,\n early_stopping_rounds = CFG.lgbm_early_stopping_rounds,\n verbose_eval = -1,\n show_progress_bar = False)\n\n # eval\n predictions = model.predict(x_valid)\n _oof_df = dataset_postprocess_train(CFG, predictions, train, fold)\n \n # scoring\n score = get_result(_oof_df)\n\n best_params = model.params\n elapsed = time.time() - start_time\n LOGGER.info(f'best_params: {best_params}')\n LOGGER.info(f'Score: {score} - time: {elapsed:.0f}s')\n return best_params\n","sub_path":"optimization/opt_tuner.py","file_name":"opt_tuner.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21906530","text":"import math, random, copy\n\n\nclass NeuralNetwork:\n @staticmethod\n def sigmoid(x):\n \"\"\"\n A sigmoid function is a mathematical function having a characteristic \"S\"-shaped curve or sigmoid curve.\n :param x: argument\n :return: sigmoid(x)\n \"\"\"\n return 1. / (1. + math.exp(-x))\n\n @staticmethod\n def dsigmoid(x):\n \"\"\"\n Derivative of sigmoid function\n :param x: argument\n :return: derivative of sigmoid(x)\n \"\"\"\n return NeuralNetwork.sigmoid(x) * (1 - NeuralNetwork.sigmoid(x))\n\n def __init__(self, input, hidden_layers, output):\n \"\"\"\n Initialization\n :param input: array of input data arrays of several variables\n :param hidden_layers: array of the number of neurons in hidden layers\n :param output: array of output data arrays of several variables (0 <= each value <= 1)\n \"\"\"\n self.input = copy.deepcopy(input)\n self.hidden_layers = copy.deepcopy(hidden_layers)\n self.output = copy.deepcopy(output)\n if len(self.input) != len(self.output):\n raise ValueError\n self.layers = list(map(int, [len(self.input[0]), *self.hidden_layers, len(self.output[0])]))\n self.activations = [[0 for _ in range(self.layers[i])] for i in range(len(self.layers))]\n self.dactivations = [[0 for _ in range(self.layers[i])] for i in range(len(self.layers))]\n self.weights = [[[random.uniform(-5, 5) for _ in range(self.layers[i + 1])] for j in range(self.layers[i])]\n for i in range(len(self.layers) - 1)]\n\n def update(self, arbitrary_input):\n \"\"\"\n Update method: learning and getting results by arbitrary input\n :param arbitrary_input: arbitrary input\n :return: result\n \"\"\"\n if len(arbitrary_input) != self.layers[0]:\n raise ValueError\n for i in range(len(arbitrary_input)):\n self.activations[0][i] = self.dactivations[0][i] = arbitrary_input[i]\n for i in range(len(self.layers) - 1):\n for j in range(self.layers[i + 1]):\n s = 0\n for k in range(self.layers[i]):\n s += self.activations[i][k] * self.weights[i][k][j]\n self.activations[i + 1][j] = NeuralNetwork.sigmoid(s)\n self.dactivations[i + 1][j] = NeuralNetwork.dsigmoid(s)\n return self.activations[-1]\n\n def back_propagation(self, target, learning_rate):\n \"\"\"\n Error back propagation method\n :param target: target for error back propagation method\n :param learning_rate: learning rate\n :return: maximum error between results\n \"\"\"\n if len(target) != self.layers[-1]:\n raise ValueError\n delta = [[0 for _ in range(self.layers[i])] for i in range(len(self.layers))]\n # hidden -> output\n o = len(self.layers) - 1\n for i in range(self.layers[o]):\n delta[o][i] = (target[i] - self.activations[o][i]) * self.dactivations[o][i]\n # input and hidden -> hidden\n for i in range(len(self.layers) - 2, 0, -1):\n for j in range(self.layers[i]):\n for k in range(self.layers[i + 1]):\n delta[i][j] += delta[i + 1][k] * self.weights[i][j][k]\n delta[i][j] *= self.dactivations[i][j]\n # update weights\n for i in range(len(self.layers) - 1):\n for j in range(self.layers[i]):\n for k in range(self.layers[i + 1]):\n self.weights[i][j][k] += learning_rate * delta[i + 1][k] * self.activations[i][j]\n # find error\n error = 0\n for i in range(len(target)):\n error = max(error, abs(target[i] - self.activations[o][i]))\n return error\n\n def train(self, iterations, learning_rate=0.5):\n \"\"\"\n Training method\n :param iterations: number of iterations\n :param learning_rate: learning speed from 0 to 1\n :return: None\n \"\"\"\n k = len(self.output)\n for i in range(iterations):\n self.update(self.input[i % k])\n self.back_propagation(self.output[i % k], learning_rate)\n","sub_path":"files/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404644890","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom app import views\n\nurlpatterns = [\n url(r'^home/',views.home,name=\"home\"),\n url(r'^market/$',views.market,name=\"market\"),\n url(r'^cart/',views.cart,name=\"cart\"),\n url(r'^mine/',views.mine,name=\"mine\"),\n\n url(r'^market/(\\d+)/(\\d+)/(\\d+)',views.marketWithParam,name=\"market_param\"),\n url(r'^register/',views.register,name=\"register\"),\n url(r'^checkUser/',views.checkUser), #检查用户名唯一性的\n url(r'^login/',views.login,name=\"login\"),\n url(r'^logout/',views.loginOut,name=\"loginout\"),\n url(r'^addToCart/',views.addToCart,name=\"addToCart\"),\n url(r'^subToCart/',views.subToCart,name=\"subToCart\"),\n url(r'^addCart/',views.addCart,name=\"addCart\"),\n url(r'^subCart/',views.subCart,name=\"subCart\"),\n url(r'^chanageSelect/',views.chanageSelect,name=\"chanageSelect\"),\n # url(r'^changeManySelect/',views.changeManySelect,name=\"changeManySelect\"),\n # url(r'^createOrder/',views.createOrder,name=\"createOrder\"),\n # url(r'^orderInfo/(.+)',views.orderInfo,name=\"orderInfo\"),\n # url(r'^changeOrderStatu',views.changeOrderStatu,name=\"changeOrderStatu\"),\n]\n","sub_path":"axf/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359776403","text":"\"\"\"leetcode 493 翻转对 困难\r\n给定一个数组 nums ,如果 i < j 且 nums[i] > 2*nums[j] 我们就将 (i, j) 称作一个重要翻转对。\r\n你需要返回给定数组中的重要翻转对的数量。\r\n\r\n示例 1:\r\n输入: [1,3,2,3,1]\r\n输出: 2\r\n\r\n示例 2:\r\n输入: [2,4,3,5,1]\r\n输出: 3\r\n\r\n注意:\r\n给定数组的长度不会超过50000。\r\n输入数组中的所有数字都在32位整数的表示范围内。\r\n\r\n来源:力扣(LeetCode)\r\n链接:https://leetcode-cn.com/problems/reverse-pairs\r\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\"\"\"\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def reversePairs(self, nums: List[int]) -> int:\r\n return self.merge_sort(nums, 0, len(nums) - 1)\r\n\r\n def merge_sort(self, nums, start, end):\r\n # 1. terminator\r\n if start >= end:\r\n return 0\r\n # 2. split and sort\r\n mid = (start + end) >> 1\r\n count = self.merge_sort(nums, start, mid) + self.merge_sort(nums, mid + 1, end)\r\n # 3. counting\r\n i, j = start, mid + 1\r\n while i <= mid and j <= end:\r\n if nums[i] > 2 * nums[j]:\r\n count += (mid - i + 1)\r\n j += 1\r\n else:\r\n i += 1\r\n # 4. merge\r\n i, j = start, mid + 1\r\n temp = []\r\n while i <= mid and j <= end:\r\n if nums[i] <= nums[j]:\r\n temp.append(nums[i])\r\n i += 1\r\n else:\r\n temp.append(nums[j])\r\n j += 1\r\n if i <= mid:\r\n temp.extend(nums[i: mid + 1])\r\n if j <= end:\r\n temp.extend(nums[j: end + 1])\r\n # 5. copy\r\n nums[start: end + 1] = temp[:]\r\n return count\r\n","sub_path":"Week_08/493-reverse-pairs.py","file_name":"493-reverse-pairs.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267282923","text":"import os\nimport pytest\nfrom unittest import mock\n\nfrom minos import tasks, utils\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\ndata_dir = os.path.join(this_dir, \"data\", \"tasks\")\n\n\ndef test_vcf_merge_and_cluster():\n # Merge and cluster are tested properly in `cluster_vcf_records` repo.\n # Here, we'll check they run and make the expected output files (but\n # don't check the contents of the files).\n options = mock.Mock()\n options.ref_fasta = os.path.join(data_dir, \"merge_and_cluster.ref.fa\")\n options.outdir = \"tmp.merge_and_cluster\"\n utils.rm_rf(options.outdir)\n options.vcf_fofn = \"tmp.merge_and_cluster.vcf.fofn\"\n with open(options.vcf_fofn, \"w\") as f:\n for i in (1, 2, 3):\n print(os.path.join(data_dir, f\"merge_and_cluster.{i}.vcf\"), file=f)\n\n options.temp_dir = None\n options.cpus = 2\n options.mem_limit = 1\n options.force = False\n options.sample_limit = 2\n tasks.vcf_merge.run(options)\n expect = [\n \"block.0.tsv.gz\",\n \"block.0.tsv.gz.tbi\",\n \"block.1.tsv.gz\",\n \"block.1.tsv.gz.tbi\",\n \"metadata.json\",\n \"variants.tsv.gz\",\n ]\n for fname in expect:\n assert os.path.exists(os.path.join(options.outdir, fname))\n os.unlink(options.vcf_fofn)\n\n options = mock.Mock()\n options.ref_fasta = os.path.join(data_dir, \"merge_and_cluster.ref.fa\")\n options.merge_dir = \"tmp.merge_and_cluster\"\n options.outprefix = \"tmp.merge_and_cluster.out\"\n options.max_ref_len = 6\n options.max_alleles = 50\n options.cpus = 1\n expect = [f\"{options.outprefix}.excluded.tsv\", f\"{options.outprefix}.vcf\"]\n utils.rm_rf(*expect)\n tasks.vcf_cluster.run(options)\n for fname in expect:\n assert os.path.exists(fname)\n os.unlink(fname)\n utils.rm_rf(options.merge_dir)\n\n\ndef test_get_test_data():\n options = mock.Mock()\n options.outdir = \"tmp.get_test_data\"\n utils.syscall(f\"rm -rf {options.outdir}\")\n tasks.get_test_data.run(options)\n outdir_abs = os.path.abspath(options.outdir)\n assert os.path.exists(options.outdir)\n with open(os.path.join(options.outdir, \"manifest.tsv\")) as f:\n got_lines = [x.rstrip().split(\"\\t\") for x in f]\n assert got_lines == [\n [\"name\", \"reads\", \"vcf\"],\n [\"sample1\", os.path.join(outdir_abs, \"sample1.bam\"), os.path.join(outdir_abs, \"in.1.vcf\")],\n [\"sample2\", os.path.join(outdir_abs, \"sample2.bam\"), os.path.join(outdir_abs, \"in.2.vcf\")],\n ]\n utils.syscall(f\"rm -r {options.outdir}\")\n","sub_path":"tests/tasks_test.py","file_name":"tasks_test.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63809684","text":"from models import *\nfrom sqlalchemy import create_engine\nimport pandas as pd\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///sports.db')\nBase.metadata.create_all(engine)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# CITY OBJECTS\nla = City(name='Los Angeles', state='CA')\nnyc = City(name='New York City', state='NY')\nsession.add_all([la, nyc])\nsession.commit()\n\n# SPORTS OBJECTS\nbaseball = Sports(name='Baseball')\nbball = Sports(name='Basketball')\nsession.add_all([baseball, bball])\nsession.commit()\n\n# READING CSVS\nla_dodgers = pd.read_csv('la_dodgers_baseball.csv').to_dict(orient='records')\nla_lakers = pd.read_csv('la_lakers_basketball.csv').to_dict(orient='records')\nny_yankees = pd.read_csv('ny_yankees_baseball.csv').to_dict(orient='records')\nny_knicks = pd.read_csv('ny_knicks_basketball.csv').to_dict(orient='records')\n\n# FUNCTION TO CREATE PLAYERS\ndef populate_teams(data):\n player_list = []\n for x in data:\n P = Players(name=x.get('name'),age = x.get('age', None),number=x.get('number', None), height=x.get('height',None), weight=x.get('weight', None))\n player_list.append(P)\n return player_list\n\n# SETTING VARIABLES TO OUTCOME OF ABOVE FUNCTION PER TEAM\ndodgers_players = populate_teams(la_dodgers)\nlakers_players = populate_teams(la_lakers)\nyankees_players = populate_teams(ny_yankees)\nknicks_players = populate_teams(ny_knicks)\n\n# TEAM OBJECTS\ndodgers = Teams(name='Dodgers', city=la, sport=baseball, teams_=dodgers_players)\nyankees = Teams(name='NY Yankees', sport=baseball, city=nyc, teams_=yankees_players)\nlakers = Teams(name='LA Lakers', sport=bball, city=la, teams_=lakers_players)\nknicks = Teams(name='NY Knicks', sport=bball, city=nyc, teams_=knicks_players)\n\n# ADDING & COMMITTING TEAM OBJECTS\nsession.add_all([dodgers, lakers, yankees, knicks])\nsession.commit()\n\n# dodgers = Teams(\n# name = 'LA Dodgers',\n# city_id = session.query(City).filter(City.name=='Los Angeles').first().id,\n# sport_id = session.query(Sports).filter(Sports.name=='Baseball').first().id\n# )\n#\n# lakers = Teams(\n# name = 'LA Lakers',\n# city_id = session.query(City).filter(City.name=='Los Angeles').first().id,\n# sport_id = session.query(Sports).filter(Sports.name=='Basketball').first().id\n# )\n#\n# knicks = Teams(\n# name = 'NY Knicks',\n# city_id = session.query(City).filter(City.name=='New York City').first().id,\n# sport_id = session.query(Sports).filter(Sports.name=='Basketball').first().id\n# )\n#\n# yankees = Teams(\n# name = 'NY Yankees',\n# city_id = session.query(City).filter(City.name=='New York City').first().id,\n# sport_id = session.query(Sports).filter(Sports.name=='Baseball').first().id\n# )\n","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"29683217","text":"#-*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport glob\nimport click\nfrom sequana import version\nimport functools\n\n__all__ = [\"main\"]\n\nimport sequana\nfrom sequana import logger\n\nlogger.level = \"INFO\"\n\n# This can be used by all commands as a simple decorator\ndef common_logger(func):\n @click.option(\"--logger\", default=\"INFO\",\n type=click.Choice([\"INFO\", \"DEBUG\", \"WARNING\", \"CRITICAL\", \"ERROR\"]))\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n return wrapper\n\n\ndef get_env_vars(ctx, args, incomplete):\n return [k for k in os.environ.keys() if incomplete in k]\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n\nimport pkg_resources\npipelines = [item.key for item in pkg_resources.working_set if item.key.startswith(\"sequana\")]\nif len(pipelines):\n version +=\"\\nThe following pipelines are installed:\\n\"\nfor item in pkg_resources.working_set:\n if item.key.startswith(\"sequana\") and item.key != 'sequana':\n version += \"\\n - {} version: {}\".format(item.key, item.version)\n\n\n@click.group(context_settings=CONTEXT_SETTINGS)\n@click.version_option(version=version)\ndef main(**kwargs):\n \"\"\"\\bThis is the main entry point for a set of Sequana applications.\n\n Pipelines such as sequana_rnaseq, sequana_variant_calling have their own\n application and help.\n\n In addition, more advanced tools such as sequana_taxonomy or\n sequana_coverage have their own standalone.\n\n \"\"\"\n pass\n\n\n@main.command()\n@click.argument('filename', type=click.STRING, nargs=-1)\n@click.option(\"-o\", \"--output\",\n help=\"filename where to save results. to be used with --head, --tail\")\n\n@click.option(\"--count-reads\", is_flag=True)\n@click.option(\"--head\", type=click.INT,\n help='number of reads to extract from the head')\n@click.option(\"--merge\", is_flag=True)\n@click.option(\"--tail\", type=click.INT,\n help=\"number of reads to extract from the tail\")\ndef fastq(**kwargs):\n \"\"\"Set of useful utilities for FastQ manipulation.\n\n Input file can be gzipped or not. The --output-file\n\n \"\"\"\n from sequana.fastq import FastQ\n\n filenames = kwargs['filename']\n # users may provide a wildcards such as \"A*gz\" or list of files.\n if len(filenames) == 1:\n # if existing files or glob, a glob would give the same answer.\n filenames = glob.glob(filenames[0])\n for filename in filenames:\n os.path.exists(filename)\n\n # could be simplified calling count_reads only once\n if kwargs['count_reads']:\n for filename in filenames:\n f = FastQ(filename)\n Nreads = f.count_reads()\n Nlines = Nreads * 4\n print(f\"Number of reads in {filename}: {Nreads}\")\n print(f\"Number of lines in {filename}: {Nlines}\")\n elif kwargs['head']:\n for filename in filenames:\n f = FastQ(filename)\n if kwargs['output'] is None:\n logger.error(\"Please use --output to tell us where to save the results\")\n sys.exit(1)\n N = kwargs['head'] * 4\n f.extract_head(N=N, output_filename=kwargs['output'])\n elif kwargs['tail']: #pragma: no cover\n raise NotImplementedError\n elif kwargs['merge']:\n import subprocess\n # merge all input files (assuming gz extension)\n extensions = [filename.split(\".\")[-1] for filename in filenames]\n if set(extensions) != set(['gz']):\n raise ValueError(\"Your input FastQ files must be zipped\")\n output_filename = kwargs['output']\n if output_filename is None:\n logger.error(\"You must use --output filename.gz\")\n sys.exit(1)\n if output_filename.endswith(\".gz\") is False:\n raise ValueError(\"your output file must end in .gz\")\n\n p1 = subprocess.Popen(['zcat'] + list(filenames), stdout=subprocess.PIPE)\n fout = open(output_filename, 'wb')\n p2 = subprocess.run(['pigz'], stdin=p1.stdout, stdout=fout)\n\n else: #pragma: no cover\n print(\"Use one of the commands\")\n\n\n@main.command()\n@click.argument('name', type=click.STRING)\n@click.option('--check', is_flag=True)\n@click.option('--extract-adapters', is_flag=True)\n@click.option('--quick-fix', is_flag=True)\n@click.option('--output', default=None)\ndef samplesheet(**kwargs):\n \"\"\"Utilities to manipulate sample sheet\"\"\"\n name = kwargs['name']\n from sequana.iem import IEM\n if kwargs['check']:\n iem = IEM(name)\n iem.validate()\n logger.info(\"SampleSheet looks correct\")\n elif kwargs[\"extract_adapters\"]:\n iem = IEM(name)\n iem.to_fasta()\n elif kwargs[\"quick_fix\"]:\n iem = IEM(name, tryme=True)\n if kwargs['output']:\n filename = kwargs['output']\n else:\n filename = name + \".fixed\"\n logger.info(\"Saving fixed version in {}\".format(filename))\n iem.quick_fix(output_filename=filename)\n\n\n# This will be a complex command to provide HTML summary page for\n# input files (e.g. bam), or results from pipelines. For each module,\n# we should have corresponding option that starts with the module's name\n# This can also takes as input various types of data (e.g. FastA)\n@main.command()\n@click.argument(\"name\", type=click.Path(exists=True), nargs=-1)\n@click.option(\"--module\",\n required=False,\n type=click.Choice([\"rnadiff\", \"bamqc\", \"bam\", \"enrichment\", \"fasta\", \"fastq\"]))\n@click.option(\"--enrichment-taxon\", type=click.INT,\n #required=True,\n default=0,\n help=\"a valid taxon identifiers\")\n@click.option(\"--enrichment-kegg-name\", type=click.STRING,\n default=None,\n help=\"a valid KEGG name (automatically filled for 9606 (human) and 10090 (mmusculus)\")\n@click.option(\"--enrichment-log2-foldchange-cutoff\", type=click.FLOAT,\n default=1,\n show_default=True,\n help=\"remove events with absolute log2 fold change below this value\")\n@click.option(\"--enrichment-padj-cutoff\", type=click.FLOAT,\n default=0.05,\n show_default=True,\n help=\"remove events with pvalue abobe this value default (0.05).\")\n@click.option(\"--enrichment-biomart\", type=click.STRING,\n default=None,\n help=\"\"\"you may need a biomart mapping of your identifier for the kegg\npathways analysis. If you do not have this file, you can use 'sequana biomart'\ncommand\"\"\")\n@click.option(\"--enrichment-go-only\", type=click.BOOL,\n default=False,\n is_flag=True,\n help=\"\"\"to run only panther db enrichment\"\"\")\n@click.option(\"--enrichment-max-genes\", type=click.INT,\n default=3000,\n help=\"\"\"Maximum number of genes (up or down) to use in PantherDB, which is limited to about 3000\"\"\")\n@click.option(\"--enrichment-kegg-only\", type=click.BOOL,\n default=False,\n is_flag=True,\n help=\"\"\"to run only kegg patways enrichment\"\"\")\n@click.option(\"--enrichment-kegg-pathways-directory\", type=click.Path(),\n default=None,\n help=\"\"\"a place where to find the pathways for each organism\"\"\")\n@click.option(\"--enrichment-kegg-background\", type=click.INT,\n default=None,\n help=\"\"\"a background for kegg enrichment. If None, set to number of genes found in KEGG\"\"\")\n@common_logger\ndef summary(**kwargs):\n \"\"\"Create a HTML report for various sequana out\n\n \\b\n * rnadiff: the output of RNADiff pipeline\n * enrichment: the output of RNADiff pipeline\n * bamqc\n * fastq\n\n Example for the enrichment module:\n\n sequana summary T1vsT0.complete.xls --module enrichment --enrichment-taxon 10090 \n --enrichment-log2-foldchange-cutoff 2 --enrichment-kegg-only\n\n The KEGG pathways are loaded and it may take time. Once done, they are saved\n in kegg_pathways/organism and be loaded next time:\n\n sequana summary T1vsT0.complete.xls --module enrichment --enrichment-taxon 10090 \n --enrichment-log2-foldchange-cutoff 2 --enrichment-kegg-only\n --enrichment-kegg-pathways-directory kegg_pathways\n\n \"\"\"\n names = kwargs['name']\n module = kwargs['module']\n\n if module is None:\n if names[0].endswith('fastq.gz') or names[0].endswith('.fastq'):\n module = \"fastq\"\n elif names[0].endswith('.bam'):\n module = \"bam\"\n elif names[0].endswith('fasta.gz') or names[0].endswith('.fasta'):\n module = \"fasta\"\n else:\n logger.error(\"please use --module to tell us about the input fimes\")\n sys.exit(1)\n\n if module == \"bamqc\":\n for name in names:\n print(f\"Processing {name}\")\n from sequana.modules_report.bamqc import BAMQCModule\n report = BAMQCModule(name, \"bamqc.html\")\n elif module == \"rnadiff\":\n for name in names:\n from sequana.rnadiff import RNADiffResults\n from sequana.modules_report.rnadiff import RNAdiffModule\n if name.split(\".\")[-1] == 'xls':\n output_filename = name.replace(\".xls\", \".html\")\n else:\n output_filename = name + \".html\"\n output_filename = output_filename.split(\"/\")[-1]\n logger.info(f\"Processing {name} into {output_filename}\")\n data = RNADiffResults(name)\n report = RNAdiffModule(data, output_filename)\n elif module == \"enrichment\":\n try:\n name = names[0]\n except:\n logger.error()\n sys.exit(1)\n from sequana.modules_report.enrichment import Enrichment\n taxon = kwargs['enrichment_taxon']\n if taxon == 0:\n logger.error(\"You must provide a taxon with --enrichment_taxon\")\n return\n keggname = kwargs['enrichment_kegg_name']\n params = {\"padj\": kwargs['enrichment_padj_cutoff'],\n \"log2_fc\": kwargs['enrichment_log2_foldchange_cutoff'],\n \"max_entries\": kwargs['enrichment_max_genes'],\n \"mapper\": kwargs['enrichment_biomart'],\n \"kegg_background\": kwargs['enrichment_kegg_background'],\n \"preload_directory\": kwargs['enrichment_kegg_pathways_directory'],\n }\n filename = kwargs['enrichment_biomart']\n if filename and os.path.exists(filename) is False:\n logger.error(\"{} does not exists\".format(filename))\n sys.exit(1)\n filename = kwargs['enrichment_kegg_pathways_directory']\n if filename and os.path.exists(filename) is False:\n logger.error(\"{} does not exists\".format(filename))\n sys.exit(1)\n\n report = Enrichment(name, taxon,\n kegg_organism=keggname, \n enrichment_params=params,\n go_only=kwargs[\"enrichment_go_only\"],\n kegg_only=kwargs[\"enrichment_kegg_only\"], \n command=\" \".join(['sequana'] + sys.argv[1:]))\n elif module == \"fasta\": # there is no module per se. HEre we just call FastA.summary()\n from sequana.fasta import FastA\n for name in names:\n f = FastA(name)\n f.summary()\n elif module == \"fastq\": # there is no module per se. HEre we just call FastA.summary()\n from sequana.fastq import FastQ\n from sequana import FastQC\n for filename in names:\n ff = FastQC(filename, max_sample=1e6, verbose=False)\n stats = ff.get_stats()\n print(stats)\n elif module == \"bam\": \n import pandas as pd\n from sequana import BAM\n for filename in names:\n ff = BAM(filename)\n stats = ff.get_stats()\n df = pd.Series(stats).to_frame().T\n print(df)\n\n\n\n\n@main.command()\n@click.option(\"--mart\", default=\"ENSEMBL_MART_ENSEMBL\",\n show_default=True,\n help=\"A valid mart name\")\n@click.option(\"--dataset\", required=True, \n help=\"A valid dataset name. e.g. mmusculus_gene_ensembl, hsapiens_gene_ensembl\")\n@click.option(\"--attributes\", multiple=True,\n default=[\"ensembl_gene_id\",\"go_id\",\"entrezgene_id\",\"external_gene_name\"],\n show_default=True,\n help=\"A list of valid attributes to look for in the dataset\")\n@click.option(\"--output\", default=None,\n help=\"\"\"by default save results into a CSV file named\n biomart____.csv\"\"\")\n@common_logger\ndef biomart(**kwargs):\n \"\"\"Retrieve information from biomart and save into CSV file\n\n This command uses BioMart from BioServices to introspect a MART service\n (--mart) and a specific dataset (default to mmusculus_gene_ensembl). Then,\n for all ensembl IDs, it will fetch the requested attributes (--attributes).\n Finally, it saves the CSV file into an output file (--output). This takes\n about 5-10 minutes to retrieve the data depending on the connection.\n\n \"\"\"\n print(kwargs)\n logger.level = kwargs[\"logger\"]\n\n mart = kwargs['mart']\n attributes = kwargs['attributes']\n dataset = kwargs[\"dataset\"]\n\n from sequana.enrichment import Mart\n conv = Mart(dataset, mart)\n df = conv.query(attributes)\n conv.save(df, filename=kwargs['output'])\n\n\n@main.command()\n@click.option(\"-i\", \"--input\", required=True,\n help=\"The salmon input file.\")\n@click.option(\"-o\", \"--output\", required=True,\n help=\"The feature counts output file\")\n@click.option(\"-f\", \"--gff\", required=True,\n help=\"A GFF file compatible with your salmon file\")\n@click.option(\"-a\", \"--attribute\", default=\"ID\",\n help=\"A valid attribute to be found in the GFF file and salmon input\")\ndef salmon(**kwargs):\n \"\"\"Convert output of Salmon into a feature counts file \"\"\"\n from sequana import salmon\n salmon_input = kwargs['input']\n output = kwargs[\"output\"]\n if os.path.exists(salmon_input) is False:\n logger.critical(\"Input file does not exists ({})\".format(salmon_input))\n gff = kwargs[\"gff\"]\n attribute = kwargs['attribute']\n s = salmon.Salmon(salmon_input)\n s.save_feature_counts(output, gff, attribute=attribute)\n\n\n@main.command()\n@click.option(\"-i\", \"--input\", required=True)\n@click.option(\"-o\", \"--output\", required=True)\ndef gtf_fixer(**kwargs):\n \"\"\"Reads GTF and fix known issues (exon and genes uniqueness)\"\"\"\n from sequana.gtf import GTFFixer\n gtf = GTFFixer(kwargs['input'])\n res = gtf.fix_exons_uniqueness(kwargs['output'])\n #res = gtf.fix_exons_uniqueness(kwargs['output'])\n print(res)\n\n\n\n","sub_path":"sequana/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112462938","text":"#%% import\n\nimport numpy as np\n\n#%% constants\n\nGravitation_constant = 6.674*10**(-8)\t\t\t# gravitational constant in g\nM_Star = 1.989*10**33 \t\t\t\t\t# sun mass in g\nK_b = 1.3807 * 10**(-16)\t\t\t\t\t# boltzmann constant in erg/k\nMass_Proton = 1.673*10**(-24)\n\nR = 10* 1.496*10**13\t\t\t\t\t\t# distance to star, here 1 AU in cm * 10\nDust_Gas_Ratio_Initial = 0.01\nSigma_Dust_Start = 1\t\t\t\t\t\t\t# g/cm^2 (in dust)\n\nepsilon_pebble_drift = 0.5\t\t\t\t\t# pebble drift efficiency\n\n# stokes values for two population model\nSt_pebble = 0.1\n\nsize_pebble = 0 # ausrechnen\nsize_dust = 0\n\nfile = open(\"data.txt\", \"w\")\n\n# absolut value of power law index of the gas pressure\nGamma = 2.75\t\t\t\t\t\t\t\t\t# just a guessm--> ask Christian --> refer to Christians Mail, so far: 2.75\n\nplanetesimal_diameter = 100 * 1000 * 100 # 100km \nplanetesimal_density = 5 # g/cm^3\n\n#%% Bool parameter\npebble_flux = True\ndust_growth = True\nplanetesimal_collisions = True\n\n#h = 10*60*60*24*365\nsteps = 500000\nruntime = (10 * 10**6) *60*60*24*365\ndef get_h():\n\th = runtime / steps\n\treturn h\nh = get_h()\n\n# calculate stokes number of dust\ndef calc_St_dust():\n\tsize = 1* 10**(-4) # 1 um = 10-6 m\n\tsolid_density = 1.2 # g/cm^3\n\tSigma_Gas = Sigma_Dust_Start / Dust_Gas_Ratio_Initial\n\tSt_dust = size * solid_density * np.pi *0.5 / Sigma_Gas\n\treturn St_dust\nSt_dust = calc_St_dust()\n#print(St_dust)\n\n# temperature calculation after armitage 2010 section 2.4.2\ndef get_temperature():\n\tangle = 0.1\t\t\t# radian FIXME\n\tT_star = 4000\t\t# K\n\tR_sun =\t6.96*10**(10)\t\t\t# in cm FIXME\n\tR_star = 1.25 * R_sun\n\ttemperature = T_star * np.sqrt(R_star / R) * angle**(1/4)\n\treturn(temperature)\ntemperature = get_temperature()\n#print(temperature)\n\n# planetesimal mass calculation out of density and diameter\ndef get_planetesimal_mass():\n\tplanetesimal_mass = planetesimal_density * (4/3)*np.pi * (planetesimal_diameter/2)**3\n\treturn planetesimal_mass\nplanetesimal_mass = get_planetesimal_mass()\n#print(planetesimal_mass)\n\n\n\ndef get_sound_velocity():\n\tadiabatic_index = 1.4\n\tvelocity_sound = np.sqrt(adiabatic_index * K_b * temperature / (2* Mass_Proton))\n\treturn velocity_sound\nvelocity_sound = get_sound_velocity()\n\ndef get_kepler_angular_velocity():\n\tomega_k = (1/R) * np.sqrt(Gravitation_constant*M_Star/R)\n\treturn omega_k\nkepler_angular_velocity = get_kepler_angular_velocity()\nprint(kepler_angular_velocity)\n\ndef get_disk_scale_height():\n\tdisk_scale_height = velocity_sound / (kepler_angular_velocity)\n\treturn disk_scale_height\ndisk_scale_height = get_disk_scale_height()\n\n#%% pebble drift\n\ndef get_drift_timescale(St, dust_gas_ratio):\n\tdrift_velocity = (St / (St**2 + (1 + dust_gas_ratio)**2 )) * disk_scale_height * Gamma * velocity_sound / R\n\tdrift_timescale = R / drift_velocity\n\treturn drift_timescale\n\ndef get_critical_pebble_flux(drift_timescale): # this has to be corrected\n\tcrit_pebble_flux_coefficient = 0.00001\n\tF_crit = crit_pebble_flux_coefficient * Sigma_Dust_Start / drift_timescale\n\treturn F_crit\n\ndef get_P_form(pebble_sigma, dust_gas_ratio):\n\tdrift_timescale = get_drift_timescale(St_pebble, dust_gas_ratio)\n\tF_crit = get_critical_pebble_flux(drift_timescale)\n\tF_pebble = pebble_sigma / drift_timescale\n\tif F_pebble > F_crit:\n\t\tP_form = epsilon_pebble_drift * F_pebble\n\telse:\n\t\tP_form = 0\n\treturn P_form, drift_timescale\n\n# dust growth\n\ndef get_timescale_growth(dust_gas_ratio):\n\t# here we assume Epstein drag: lambda_f < dust size\n\tT_growth_old = 1/(dust_gas_ratio*kepler_angular_velocity)\n\tT_growth = T_growth_old * np.log(St_pebble/St_dust)\n\treturn T_growth\n\ndef get_D_form(dust_sigma, dust_gas_ratio):\n\tif dust_sigma <= 0:\n\t\tD_form = 0\n\t\tgrowth_timescale = 0\n\telse:\n\t\tgrowth_timescale = get_timescale_growth(dust_gas_ratio)\n\t\tD_form = dust_sigma / growth_timescale \t\t\t\t\t\t\t\t\t\t\t\n\treturn D_form, growth_timescale\n\n# planetesimal - planetesimal collisions\ndef get_collision_timescale(planetesimal_sigma):\n\tvelocity_escape = np.sqrt(2 * Gravitation_constant * (2 * planetesimal_mass) / (2 * planetesimal_diameter))\n\tvelocity_hill = R * kepler_angular_velocity * (2*planetesimal_mass/ (3*M_Star))**(1/3)\n\tgrav_cross_section = np.pi * (2*planetesimal_diameter)**2 * (1 + (velocity_escape / velocity_hill)**2)\n\tcollision_timescale = np.sqrt(2*np.pi)*planetesimal_mass / ( grav_cross_section * planetesimal_sigma * kepler_angular_velocity)\n\treturn collision_timescale\n\ndef get_P_dest(planetesimal_sigma):\n\tcollision_timescale = get_collision_timescale(planetesimal_sigma)\n\tP_dest = planetesimal_sigma/collision_timescale\n\treturn P_dest, collision_timescale\n\n\n#%% get dynamic parameter\n\ndef get_dynamic_parameter(sigma_array, dust_gas_ratio):\n\tif pebble_flux == True:\n\t\tP_form, drift_timescale = get_P_form(sigma_array[1], dust_gas_ratio)\n\telse:\n\t\tP_form = 0\n\t\tdrift_timescale = 0\n\n\tif dust_growth == True:\n\t\tD_form, growth_timescale = get_D_form(sigma_array[2], dust_gas_ratio)\n\telse:\n\t\tD_form= 0\n\t\tgrowth_timescale = 0\n\n\tif planetesimal_collisions == True:\n\t\tif sigma_array[0] > 0:\n\t\t\tP_dest, collision_timescale = get_P_dest(sigma_array[0])\n\t\telse:\n\t\t\tP_dest = 0\n\t\t\tcollision_timescale = 0\n\telse:\n\t\tP_dest = 0\n\t\tcollision_timescale = 0\n\n\treturn P_form, D_form, P_dest, drift_timescale, growth_timescale, collision_timescale\n\n\n#%% differential equation\ndef function(Sigma_i, i, P_form, D_form, P_dest):\n k = np.array([0,0,0], float) # initialize k - array\n k[0] = P_form - P_dest\n k[1] = D_form - P_form\n k[2] = - D_form + P_dest\n return k\n\n#%% RK2 Step\ndef make_RK2_step(Sigma_i, i, h, P_form, D_form, P_dest):\n k1 = function(Sigma_i, i, P_form, D_form, P_dest)\n k2 = function(Sigma_i + k1 * h, i * h, P_form, D_form, P_dest)\n Sigma_i = Sigma_i + (.5 * k1 + .5 * k2) * h \n return Sigma_i\n\n#%% calc new dust gas ratio\ndef calc_new_dust_gas_ratio(Sigma_i):\n\tdust_gas_ratio_i = Dust_Gas_Ratio_Initial * ( Sigma_i / Sigma_Dust_Start)\n\treturn dust_gas_ratio_i\n\n#%% Initialization\ndef initialize_data():\n\tSigma_array = np.array([0,0,Sigma_Dust_Start], float) # only dust\n\tdust_gas_ratio = Dust_Gas_Ratio_Initial\t\t\t\t\t\t\t\t# initial dust gas ratio --> has to change obviously\n\treturn Sigma_array, dust_gas_ratio\n\ndef integration():\n\tSigma_i, dust_gas_ratio_i = initialize_data()\n\tt = 0\n\tfor i in range (0, steps):\n\n\t\tP_form, D_form, P_dest, drift_timescale, growth_timescale, collision_timescale = get_dynamic_parameter(Sigma_i, dust_gas_ratio_i)\n\t\tSigma_i = make_RK2_step(Sigma_i, i, h, P_form, D_form, P_dest)\n\t\tdust_gas_ratio_i = calc_new_dust_gas_ratio(Sigma_i[2])\n\t\tt = t + h / (60*60*24*365.) # t is passed time in years\n\n\t\t# write data to file\n\t\tfile.write(str(i) + \" \" + str(t) + \" \" )\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 0 1\n\t\tfile.write(str(Sigma_i[0]) + \" \" + str(Sigma_i[1]) + \" \" + str(Sigma_i[2]) + \" \" )\t\t\t\t\t\t# 2 3 4\n\t\tfile.write(str(P_form) + \" \" + str(D_form) + \" \" + str(P_dest) + \" \" )\t\t\t\t\t\t\t\t\t# 5 6 7\n\t\tfile.write(str(drift_timescale) + \" \" + str(growth_timescale) + \" \" + str(collision_timescale) + \" \" )\t# 8 9 10\n\t\tfile.write( \"\\n\" )\n\n\t\tprint(\"Integration Step \" + str(i) + \" of \" + str(steps) + \" h = \" + str(h /(60*60*24*365)))\n\treturn 0\n\n\nintegration()\nfile.close()","sub_path":"SystemWithUnits/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":7024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242670236","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nfrom api import API\nimport time\nimport os\nimport logging\n\napi = API()\nmode = None\nuser_input = \"\"\n\nwhile True:\n if mode == \"1\":\n user_input = input(\"\\nSelect 0) Back to main choices or Search 1) Users or 2) Tickets or 3) Organizations: \")\n if user_input == \"0\":\n mode = None\n continue\n elif user_input != \"1\" and user_input != \"2\" and user_input != \"3\":\n print(\"Invalid input!!!\")\n continue\n search_term = input(\"Enter search term: \")\n search_value = input(\"Enter search value: \")\n\n start = time.time()\n if user_input == \"1\":\n print(\"Searching User for {} with a value of {}\".format(search_term, search_value))\n status, result = api.get_users(search_term, search_value)\n elif user_input == \"2\":\n print(\"Searching Tickets for {} with a value of {}\".format(search_term, search_value))\n status, result = api.get_tickets(search_term, search_value)\n elif user_input == \"3\":\n print(\"Searching Organizations for {} with a value of {}\".format(search_term, search_value))\n status, result = api.get_organizations(search_term, search_value)\n else:\n continue\n end = time.time()\n\n if status == 200:\n if not result:\n print(\"No results found\")\n continue\n for r_dict in result:\n for key, val in r_dict.items():\n print(\"{0:25}{1}\".format(key, val))\n print(\"\\n\")\n elif status == 400:\n print(result)\n continue\n print (\"Found {} results in {} seconds\".format(len(result), end - start))\n elif mode == \"2\":\n print(\"\\n------------------------------------------------------------------\")\n print(\"Search Users with:\")\n for field in api.get_list_searchable_users():\n print(field)\n print(\"\\n------------------------------------------------------------------\")\n print(\"Search Tickets with:\")\n for field in api.get_list_searchable_tickets():\n print(field)\n print(\"\\n------------------------------------------------------------------\")\n print(\"Search Organizations with:\")\n for field in api.get_list_searchable_organizations():\n print(field)\n print(\"\\n\")\n mode = None\n elif mode == \"quit\":\n break\n else:\n print(\"Type 'quit' to exit anytime, Press Enter to continue\")\n print(\"\\n\\t\\tSelect search options:\")\n print(\"\\n\\t\\t * Press 1 to search\")\n print(\"\\t\\t * Press 2 to view a list of searchable fields\")\n print(\"\\t\\t * Type quit to exit\")\n mode = input()\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596982821","text":"#!/usr/bin/python\nimport sys\nsys.path.insert(1, \"/nfs/2016/o/omartyno/python_pachages\")\nimport telegram\nimport logging\nimport time\nfrom threading import Thread\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import Updater\nfrom telegram.error import (TelegramError, Unauthorized, BadRequest, \n\t\t\t\t\t\t\t\tTimedOut, ChatMigrated, NetworkError)\n\nclass TimerThread(Thread):\n\tdef __init__(self, seconds, message):\n\t\t'''Constructor'''\n\n\t\tThread.__init__(self)\n\t\tself.seconds = seconds\n\t\tself.message = message\n\n\tdef run(self):\n\t\ttime.sleep(self.seconds)\n\t\tbot.send_message(chat_id=\"31568844\", text=self.message)\n\nbot = telegram.Bot(token=\"448756955:AAHKS3vwRFETKMWtTr-y-ciTFkzbUlxtTk8\")\n\nTimersArray = []\n\nprint (bot.get_me())\n\nupdater = Updater(token=\"448756955:AAHKS3vwRFETKMWtTr-y-ciTFkzbUlxtTk8\")\ndispatcher = updater.dispatcher\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\ndef start(bot, update):\n\t# time.sleep(10)\n\tbot.send_message(chat_id=update.message.chat_id, text=\"Hello\")\n\ndef ponyal(bot, update, args):\n\tif (len(args) < 2):\n\t\tbot.send_message(chat_id=update.message.chat_id, text=\"Wrong input. You need to unput time and then text\")\n\t\treturn\n\tif not args[0].isdigit():\n\t\tbot.send_message(chat_id=update.message.chat_id, text=\"Wrong input. First argument must be time in seconds\")\n\t\treturn\n\tmessage = args[1];\n\tfor i in range(2, len(args)):\n\t\tmessage += \" \" + args[i]\n\t\ti += 1\n\ttmr = TimerThread(int(args[0]), message)\n\ttmr.start()\n\t# time.sleep(int(args[0]))\n\t# bot.send_message(chat_id=\"31568844\", text=message)\n\n\nstart_handler = CommandHandler('start', start)\nponyal_handler = CommandHandler('time', ponyal, pass_args=True)\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(ponyal_handler)\n\ndef error_callback(bot, update, error):\n\tprint (\"Error: \")\n\tprint (error)\n\tprint (\"Update: \")\n\tprint (update)\ndispatcher.add_error_handler(error_callback)\n\nupdater.start_polling()\nbot.send_message(chat_id=\"31568844\", text=\"idle online\")\nupdater.idle()\n","sub_path":"teletest.py","file_name":"teletest.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80369987","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport os\nfrom flask import render_template, redirect, url_for, request, flash, current_app, send_from_directory\nfrom app import db\nfrom . import admin\nfrom .forms import ResumeForm, MessageForm, UploadForm\nfrom ..models import Resume, Post\nfrom ..email import send_email\nfrom ..decorators import admin_required\n\n\n@admin.route('/resume', methods=['GET','POST'])\ndef resume():\n id = request.args.get('id', type=int)\n if id is not None:\n resume = Resume.query.filter_by(id=id).first()\n elif Resume.query.filter_by(show=True).first() is not None:\n resume = Resume.query.filter_by(show=True).first()\n else:\n resume = Resume.query.filter_by(id=1).first()\n form = MessageForm()\n if form.validate_on_submit():\n send_email('381204760@qq.com',\n '招聘反馈',\n 'admin/email/resume',\n name=form.name.data,\n tel=form.tel.data,\n post=form.post.data,\n message=form.message.data\n )\n flash('感谢您的青睐,我会尽快与您联系', 'success')\n return redirect(url_for('admin.resume'))\n post_blog = Post.query.filter_by(title='Blog展示').first()\n post_train = Post.query.filter_by(title='抢火车票').first()\n post_airplane = Post.query.filter_by(title='爬飞机票').first()\n post_topic = Post.query.filter_by(title='\"百万撒币\"--控制多台手机答题').first()\n posts = [post_blog.id, post_train.id, post_airplane.id, post_topic.id]\n\n return render_template('admin/resume.html', resume=resume, form=form, posts=posts)\n\n\n\n@admin.route('/eidt-resume', methods=['GET','POST'])\n@admin_required\ndef edit_resume():\n id = request.args.get('id', type=int)\n if id is not None:\n resume = Resume.query.get_or_404(id)\n form = ResumeForm()\n if form.validate_on_submit():\n if id is None or form.new_edit.data:\n resume = Resume(\n title=form.title.data,\n epitome=form.epitome.data,\n me=form.me.data,\n skill=form.skill.data\n )\n if id is not None and not form.new_edit.data:\n resume.title = form.title.data\n resume.epitome = form.epitome.data\n resume.me = form.me.data\n resume.skill = form.skill.data\n db.session.add(resume)\n flash('编辑成功','success')\n return redirect(url_for('admin.manage_resume'))\n if id is not None:\n form.title.data = resume.title\n form.epitome.data = resume.epitome\n form.me.data = resume.me\n form.skill.data = resume.skill\n return render_template('admin/edit-resume.html', form=form)\n\n\n\n\n@admin.route('/delete-resume/')\n@admin_required\ndef delete_resume(id):\n resume = Resume.query.filter_by(id=id).first()\n if resume.show:\n flash('正在显示中的简历不可删除','error')\n else:\n db.session.delete(resume)\n return redirect(url_for('admin.manage_resume'))\n\n\n@admin.route('/show-resume/')\n@admin_required\ndef show_resume(id):\n resume = Resume.query.filter_by(id=id).first()\n old_resume = Resume.query.filter_by(show=True).first()\n if old_resume is not None:\n old_resume.show = False\n db.session.add(old_resume)\n resume.show = True\n db.session.add(resume)\n return redirect(url_for('admin.resume'))\n\n\n@admin.route('/manage-resume')\n@admin_required\ndef manage_resume():\n resumes = Resume.query.all()\n return render_template('admin/manage-resume.html', resumes=resumes)\n\n@admin.route('/upload-resume', methods=['GET','POST'])\n@admin_required\ndef upload_resume():\n form = UploadForm()\n if form.validate_on_submit():\n file = form.file.data\n file.name = '100.docx'\n file.save(os.path.join(current_app.config['UPLOADED_WORDS'], file.name))\n flash('上传成功', 'success')\n return redirect(url_for('admin.manage_resume'))\n return render_template('admin/upload-resume.html', form=form)\n\n\n@admin.route('/download-resume')\ndef download_resume():\n directory = os.path.abspath(current_app.config['UPLOADED_WORDS'])\n return send_from_directory(directory, '100.docx', as_attachment=True)\n","sub_path":"app/admin/view_resuem.py","file_name":"view_resuem.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87737627","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Threshold Activation Function\ndef threshold(x):\n cond = tf.less(x, tf.zeros(tf.shape(x), dtype=x.dtype))\n out = tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))\n return out\n\n\n# Plotting Threshold Activation Function\nh = np.linspace(-1, 1, 50)\nout = threshold(h)\n\nh_sigm = np.linspace(-10, 10, 100)\nout_sigm = tf.sigmoid(h_sigm)\nout_tanh = tf.tanh(h_sigm)\n\nh_smax = np.linspace(-5, 5, 100)\nout_smax = tf.nn.softmax(h_smax)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n y = sess.run(out)\n y_sigm = sess.run(out_sigm)\n y_tanh = sess.run(out_tanh)\n y_smax = sess.run(out_smax)\n\nplt.xlabel(\"Activity of Neuron\")\nplt.ylabel(\"Output of Neuron\")\nplt.title(\"Threshold Activation Function\")\nplt.plot(h, y)\nplt.show()\n\nplt.xlabel(\"Activity of Neuron\")\nplt.ylabel(\"Output of Neuron\")\nplt.title(\"Sigmoid Activation Function\")\nplt.plot(h_sigm, y_sigm)\nplt.show()\n\nplt.xlabel(\"Activity of Neuron\")\nplt.ylabel(\"Output of Neuron\")\nplt.title(\"Hyperbolic Tangent Activation Function\")\nplt.plot(h_sigm, y_tanh)\nplt.show()\n\nplt.xlabel(\"Activity of Neuron\")\nplt.ylabel(\"Output of Neuron\")\nplt.title(\"Softmax Activation Function\")\nplt.plot(h_smax, y_smax)\nplt.show()\n","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477193832","text":"#!/usr/bin/python2.7\n# Copyright 2013 SoluRank Inc. All Rights Reserved.\n# Author: andy@solurank.com (Andy Zhau)\n#\n# Configuration of Jat Toolkit.\n\n\nimport os\n\nfrom optparse import OptionParser\n\n\n__version__ = \"v2.0\"\n\n\n# Jat Options.\nusage = \"usage: %prog [command] [options] [targets]\"\nparser = OptionParser(usage, version=\"%prog \" + __version__)\nparser.add_option(\"-f\", \"--force\", action=\"store_true\", dest=\"force\",\n default=False, help=\"Running at force mode.\")\nparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\",\n default=False, help=\"Running at verbose mode.\")\nparser.add_option(\"-O\", \"--opt\", dest=\"opt\", default=0, type=\"int\",\n help=\"Optimization level. Different languages(types of \"\n \"library) have different actions when this value has been \"\n \"set to positive.\")\n\ndef AddArgs(option, opt_str, value, parser):\n addargs = getattr(parser.values, option.dest)\n if value.find(\"=\") == -1: addargs[value] = True\n else:\n name, val = value.split(\"=\")\n addargs[name] = val\n setattr(parser.values, option.dest, addargs)\nparser.add_option(\"-A\", \"--addargs\", action=\"callback\", callback=AddArgs,\n type=\"str\", dest=\"addargs\", default={},\n help=\"Addtional arguments passed to jat, used by plugins.\")\n\noptions, args = parser.parse_args()\n\n\n# Config root_dir for root directry.\nroot_dir = os.getcwd()\nwhile root_dir and root_dir is not \"/\":\n if os.path.exists(os.path.join(root_dir, \".jatrc\")):\n break\n root_dir = os.path.dirname(root_dir)\nif not root_dir or root_dir is \"/\":\n root_dir = os.getcwd()\n\n# Jat rc file.\njatrc = \"\"\ntry:\n with open(os.path.join(root_dir, \".jatrc\")) as f:\n jatrc = f.read()\nexcept IOError:\n pass\n\n# Config gen_dir for generating directory.\ngen_dir = os.path.join(root_dir, \"jat-genfiles\")\n\n# Config bin_dir for generating binary target directory.\nbin_dir = os.path.join(root_dir, \"jat-bin\")\n\n# Config lib_root_paths for searching libraries. The root_path doesn't end\n# with /.\nlib_root_paths = [\n os.path.normpath(os.path.join(__file__, \"../builtin\")),\n root_dir,\n]\n\nbinary_lib_types = (\"apps\", \"tests\", \"services\")\n","sub_path":"jat/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372668211","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[283]:\n\n\nfrom lifetimes.datasets import load_cdnow_summary\nfrom lifetimes import BetaGeoFitter\nfrom lifetimes.plotting import plot_frequency_recency_matrix\nfrom lifetimes.plotting import plot_probability_alive_matrix\nfrom matplotlib import pyplot as plt\nfrom lifetimes.plotting import plot_period_transactions\n\nfrom lifetimes.datasets import load_cdnow_summary_data_with_monetary_value\nfrom lifetimes import GammaGammaFitter\n\nfrom lifetimes.datasets import load_transaction_data\nfrom lifetimes.utils import summary_data_from_transaction_data\n\nfrom lifetimes.utils import calibration_and_holdout_data\nfrom lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases\n\nfrom lifetimes.plotting import plot_history_alive\n\nfrom lifetimes.datasets import load_cdnow_summary_data_with_monetary_value\n\n\n# # Basic Frequency/Recency analysis using the BG/NBD model\n\n# In[284]:\n\n\ndata = load_cdnow_summary(index_col=[0])\n\n\n# In[285]:\n\n\nprint(data.head())\n\n\n# In[286]:\n\n\n# similar API to scikit-learn and lifelines.\nbgf = BetaGeoFitter(penalizer_coef=0.0)\nbgf.fit(data['frequency'], data['recency'], data['T'])\n\n\n# In[287]:\n\n\nprint(bgf)\n\n\n# In[288]:\n\n\nprint(bgf.summary)\n\n\n# ## Visualizing our Frequency/Recency Matrix\n\n# In[289]:\n\n\nplot_frequency_recency_matrix(bgf)\n\n\n# In[290]:\n\n\nplot_probability_alive_matrix(bgf)\n\n\n# ## Ranking customers from best to worst\n\n# In[291]:\n\n\nt = 1\ndata['predicted_purchases'] = bgf.conditional_expected_number_of_purchases_up_to_time(t, data['frequency'], data['recency'], data['T'])\ndata.sort_values(by='predicted_purchases').tail(5)\n\n\n# ## Assessing model fit\n\n# In[292]:\n\n\nplot_period_transactions(bgf)\n\n\n# # Example using transactional datasets\n\n# In[293]:\n\n\ntransaction_data = load_transaction_data()\n\n\n# In[294]:\n\n\nprint(transaction_data.head())\n\n\n# In[295]:\n\n\nsummary = summary_data_from_transaction_data(transaction_data, 'id', 'date', observation_period_end='2014-12-31')\n\n\n# In[296]:\n\n\nprint(summary.head())\n\n\n# In[297]:\n\n\nbgf.fit(summary['frequency'], summary['recency'], summary['T'])\n\n\n# ## More model fitting\n\n# In[298]:\n\n\nsummary_cal_holdout = calibration_and_holdout_data(transaction_data, 'id', 'date', calibration_period_end='2014-09-01', observation_period_end='2014-12-31' )\n\n\n# In[299]:\n\n\nprint(summary_cal_holdout.head())\n\n\n# In[300]:\n\n\nbgf.fit(summary_cal_holdout['frequency_cal'], summary_cal_holdout['recency_cal'], summary_cal_holdout['T_cal'])\nplot_calibration_purchases_vs_holdout_purchases(bgf, summary_cal_holdout)\n\n\n# In[302]:\n\n\nindividual\n\n\n# # Customer Predictions\n\n# In[301]:\n\n\nt = 10 #predict purchases in 10 periods\nindividual = summary.iloc[20]\n# The below function is an alias to `bfg.conditional_expected_number_of_purchases_up_to_time`\nbgf.predict(t, individual['frequency'], individual['recency'], individual['T'])\n\n\n# In[ ]:\n\n\n##Customer Probability Histories\n\n\n# In[303]:\n\n\nid = 35\ndays_since_birth = 200\nsp_trans = transaction_data.loc[transaction_data['id'] == id]\nplot_history_alive(bgf, days_since_birth, sp_trans, 'date')\n\n\n# # Estimating customer lifetime value using the Gamma-Gamma model\n\n# In[304]:\n\n\nsummary_with_money_value = load_cdnow_summary_data_with_monetary_value()\nsummary_with_money_value.head()\nreturning_customers_summary = summary_with_money_value[summary_with_money_value['frequency']>0]\n\n\n# In[305]:\n\n\nprint(returning_customers_summary.head())\n\n\n# # The Gamma-Gamma model and the independence assumption\n\n# In[306]:\n\n\nreturning_customers_summary[['monetary_value', 'frequency']].corr()\n\n\n# In[307]:\n\n\nggf = GammaGammaFitter(penalizer_coef = 0)\nggf.fit(returning_customers_summary['frequency'], returning_customers_summary['monetary_value'])\n\n\n# In[308]:\n\n\nprint(ggf)\n\n\n# In[309]:\n\n\nprint(ggf.conditional_expected_average_profit(summary_with_money_value['frequency'], summary_with_money_value['monetary_value']).head(10))\n\n\n# In[310]:\n\n\nprint(\"Expected conditional average profit: %s, Average profit: %s\" % (\n ggf.conditional_expected_average_profit(\n summary_with_money_value['frequency'],\n summary_with_money_value['monetary_value']\n ).mean(),\n summary_with_money_value[summary_with_money_value['frequency']>0]['monetary_value'].mean()\n))\n\n\n# In[311]:\n\n\n# refit the BG model to the summary_with_money_value dataset\nbgf.fit(summary_with_money_value['frequency'], summary_with_money_value['recency'], summary_with_money_value['T'])\n\n\n# In[312]:\n\n\nprint(ggf.customer_lifetime_value(\n bgf, #the model to use to predict the number of future transactions\n summary_with_money_value['frequency'],\n summary_with_money_value['recency'],\n summary_with_money_value['T'],\n summary_with_money_value['monetary_value'],\n time=12, # months\n discount_rate=0.01 # monthly discount rate ~ 12.7% annually\n).head(10))\n\n\n# # Saving and loading model\n\n# ## Fit model\n\n# In[313]:\n\n\nfrom lifetimes import BetaGeoFitter\nfrom lifetimes.datasets import load_cdnow_summary\n\ndata = load_cdnow_summary(index_col=[0])\nbgf = BetaGeoFitter()\nbgf.fit(data['frequency'], data['recency'], data['T'])\nbgf\n\"\"\"\"\"\"\n\n\n# ## Saving model\n\n# In[314]:\n\n\nbgf.save_model('bgf.pkl')\n# or\nbgf.save_model('bgf_small_size.pkl', save_data=False, save_generate_data_method=False)\n\n\n# ## Loading model\n\n# In[315]:\n\n\nbgf_loaded = BetaGeoFitter()\nbgf_loaded.load_model('bgf.pkl')\nbgf_loaded\n\"\"\"\"\"\"\n\n\n# In[ ]:\n\n\nbgf_loaded\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"lifetimes_BG_NBD_gamma_gamma.py","file_name":"lifetimes_BG_NBD_gamma_gamma.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408267867","text":"# -*- coding:utf-8 -*-\nimport tensorflow as tf\n\na = tf.range(10)\n\nb = tf.maximum(a,2)\nc = tf.minimum(a,8)\nd = tf.clip_by_value(a,2,8) # 2-8限幅\n\n# print(b,c,d)\n\ne = tf.random.normal([2,2],mean = 10)\nprint(tf.norm(e))\nf = tf.clip_by_norm(e,15)\nprint(tf.norm(f))\n\nnew_grads ,total_norm = tf.clip_by_global_norm(grads,25)","sub_path":"张量限幅.py","file_name":"张量限幅.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305817693","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport geopandas as gpd\n\nshp = gpd.read_file('NHDSnapshot/Hydrography/NHDFlowline.shp')\n\ndbf = gpd.read_file('NHDPlusAttributes/PlusFlowlineVAA.dbf')\ndbf = dbf[['ComID','StreamOrde']]\na = shp.merge(dbf, left_on='COMID', right_on='ComID', how='left')\n\na.geometry = a.geometry.centroid\na.to_crs({'init': 'epsg:4326'}, inplace=True) # WGS 84 -- the default\n#a.plot()\na[['COMID', \n 'REACHCODE', \n 'FLOWDIR', \n 'WBAREACOMI', \n 'FTYPE', \n 'StreamOrde', \n 'geometry']].to_file('~/Documents/catitat/delivery/data/flow_03N_centroid.shp')\n","sub_path":"delivery/data/merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421497891","text":"import sys\nsys.path.append(\"../csbw_20_project\")\nimport numpy as np\nfrom Bio.PDB import *\nimport torch\n\nimport configs as CONFIGS\nimport utils.data_utils as DataUtils\n\nclass DistanceMatrix(object):\n def __init__(self, output_matrix_type=\"4N4N\", input_dir=CONFIGS.FRAGMENTS_DIR, input_file_format=CONFIGS.DOT_PDB,\\\n output_dir=CONFIGS.CONTACT_MAP_DIR, parser=PDBParser(QUIET=True), atom_1=\"CB\", atom_2=\"CB\", save=False):\n \"\"\"Compute distance matrix.\n\n Args:\n output_matrix_type (str, optional): Values can be 4N4N, NN. Defaults to \"4N4N\".\n input_dir ([type], optional): [description]. Defaults to CONFIGS.FRAGMENTS_DIR.\n file_format ([type], optional): [description]. Defaults to CONFIGS.DOT_PDB.\n parser ([type], optional): [description]. Defaults to PDBParser(QUIET=True).\n atom_1 (str, optional): [description]. Defaults to \"CB\".\n atom_2 (str, optional): [description]. Defaults to \"CB\".\n save (bool, optional): [description]. Defaults to False.\n \"\"\"\n super(DistanceMatrix, self).__init__()\n self.output_matrix_type = output_matrix_type\n self.input_dir = input_dir\n self.input_file_format = input_file_format\n self.output_dir = output_dir\n self.parser = parser\n self.atom_1 = atom_1\n self.atom_2 = atom_2\n self.save = save\n self.bb_atoms = CONFIGS.BACKBONE_ATOMS\n\n def compute_atom_atom_distance(self, residue_1, residue_2, atom_1=\"CB\", atom_2=\"CB\"):\n \"\"\"\n Compute distance between atom-atom coordinates of two residues'.\n An atom could be CA, CB, N, O.\n Default atoms are beta-beta carbon.\n \"\"\"\n try:\n if atom_1==\"CB\" and residue_1.get_resname()=='GLY':\n atom_1 = \"CA\"\n \n if atom_2==\"CB\" and residue_2.get_resname()=='GLY':\n atom_2 = \"CA\"\n\n diff_vector = residue_1[atom_1].coord - residue_2[atom_2].coord\n except Exception as e:\n print(\"Can not resolve distance: \", residue_1.get_resname(), residue_2.get_resname(), atom_1, atom_2)\n traceback.print_exc()\n raise\n # in case, there is an error but I want the distance matrix, comment out above lines and comment in next line\n # return 0.0 \n\n return np.sqrt(np.sum(diff_vector * diff_vector))\n \n def compute_4n4n_distance_matrix(self, chain_1, chain_2):\n \"\"\"\n All pairwise backbone atom distance. Is is also called full-atom distance matrix.\n 4 backbone atoms CA, CB, N and O. If ther are n residues in a chain,\n the distance matrix is of size (4n x 4n)\n \"\"\"\n l = len(self.bb_atoms)\n dist_matrix = np.zeros((l*len(chain_1), l*len(chain_2)), np.float)\n for row, residue_1 in enumerate(chain_1):\n for col, residue_2 in enumerate(chain_2):\n for k, atom_1 in enumerate(self.bb_atoms):\n for l, atom_2 in enumerate(self.bb_atoms):\n dist_matrix[4*row+k, 4*col+l] = self.compute_atom_atom_distance(residue_1, residue_2, atom_1, atom_2)\n return dist_matrix \n \n def compute_nn_distance_matrix(self, chain_1, chain_2, atom_1=\"CB\", atom_2=\"CB\"):\n \"\"\"\n Compute nxn distance matrix of two chains where n is residue length. Default atoms are beta-beta carbon.\n \"\"\"\n dist_matrix = np.zeros((len(chain_1), len(chain_2)), np.float)\n for row, residue_1 in enumerate(chain_1):\n for col, residue_2 in enumerate(chain_2):\n dist_matrix[row, col] = self.compute_atom_atom_distance(residue_1, residue_2, atom_1, atom_2)\n return dist_matrix \n\n def generate(self, filename, pdb_id, chain_id):\n \n pdb_filename = self.input_dir + filename + self.input_file_format\n structure = self.parser.get_structure(pdb_id, pdb_filename)\n residues = structure[0][chain_id].get_residues()\n list_residues = list(residues)\n \n dist_matrix = None\n if self.output_matrix_type==\"4N4N\":\n dist_matrix = self.compute_4n4n_distance_matrix(list_residues, list_residues)\n elif self.output_matrix_type==\"NN\":\n dist_matrix = self.compute_nn_distance_matrix(list_residues, list_residues, self.atom_1, self.atom_2)\n \n if self.save:\n DataUtils.save_using_pickle(dist_matrix, self.output_dir + filename + CONFIGS.DOT_PKL)\n \n print(\"Computed distance-matrix for {}:{}\".format(filename, dist_matrix.shape))\n return dist_matrix\n ","sub_path":"datasets/distance_matrix.py","file_name":"distance_matrix.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"115063556","text":"#!/usr/bin/env python3\n# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab\n#########################################################################\n# Copyright 2016 Raoul Thill raoul.thill@gmail.com\n# Copyright 2020 Bernd Meiners Bernd.Meiners@mail.de\n#########################################################################\n# This file is part of SmartHomeNG.\n#\n# SmartHomeNG is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# SmartHomeNG is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with SmartHomeNG. If not, see .\n#########################################################################\n\nimport logging\nimport socket\n\nfrom io import StringIO\nfrom lib.model.smartplugin import *\nfrom lib.item import Items\n\nfrom .webif import WebInterface\n\n# If a package is needed, which might be not installed in the Python environment,\n# import it like this:\n\ntry:\n from lxml import etree\n import requests\n REQUIRED_PACKAGE_IMPORTED = True\nexcept:\n REQUIRED_PACKAGE_IMPORTED = False\n\n\n\nclass Mcast(socket.socket):\n def __init__(self, local_port):\n socket.socket.__init__(self, socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if hasattr(socket, \"SO_REUSEPORT\"):\n self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n self.bind(('', local_port))\n\n def mcast_add(self, addr):\n self.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,\n socket.inet_aton(addr) + socket.inet_aton('0.0.0.0'))\n\n\nclass Yamaha(SmartPlugin):\n PLUGIN_VERSION = \"1.0.2\"\n\n def __init__(self, smarthome):\n # Call init code of parent class (SmartPlugin)\n super().__init__()\n\n from bin.smarthome import VERSION\n if '.'.join(VERSION.split('.', 2)[:2]) <= '1.5':\n self.logger = logging.getLogger(__name__)\n\n self.logger.info(\"Init Yamaha\")\n self._yamaha_cmds = ['state', 'power', 'input', 'volume', 'mute']\n self._yamaha_ignore_cmds = ['play_info', 'list_info']\n self._yamaha_rxv = {}\n self.sock = None\n self.mcast_addr = \"239.255.255.250\"\n self.mcast_port = 1900\n self.mcast_buffer = 1024\n self.mcast_service = \"urn:schemas-yamaha-com:service:X_YamahaRemoteControl:1\"\n\n # On initialization error use:\n if not REQUIRED_PACKAGE_IMPORTED:\n self._init_complete = False\n return\n\n # if plugin should start even without web interface\n self.init_webinterface()\n\n def run(self):\n self.logger.info(\"Yamaha starting listener\")\n try:\n self.sock = Mcast(self.mcast_port)\n self.sock.mcast_add(self.mcast_addr)\n except OSError:\n self.logger.error(\"Could not create a socket to Yamaha Receiver\")\n return\n\n ## Todo: why not call initialize directly?\n #self.scheduler_trigger('Yamaha', self._initialize)\n\n ## test to call initialize directly\n if self._initialize():\n self.alive = True\n\n # Todo\n # need to reconnect when disconnected. Maybe lib/network.py has a solution\n while self.alive:\n data, addr = self.sock.recvfrom(self.mcast_buffer)\n try:\n host, port = addr\n except TypeError:\n pass\n notification = data.decode('utf-8')\n #self.logger.debug(\"Received '{}' from {}\".format(notification,addr))\n if self.mcast_service in notification:\n if host not in list(self._yamaha_rxv.keys()):\n self.logger.warn(\"Yamaha received notify from unknown host {}\".format(host))\n else:\n self.logger.info(\"Yamaha multicast received {} bytes from {}\".format(len(data), host))\n self.logger.debug(data)\n for line in notification.split('\\r\\n'):\n if line.startswith('<'):\n line = line.split('?>')[1]\n events = self._return_value(line, 'event')\n for event in events:\n if event.lower() in self._yamaha_cmds:\n self.logger.info(\n \"Yamaha need to update the following item \\\"{}\\\" for host: {}\".format(event,\n host))\n self._get_value(event.lower(), host)\n if event.lower() == 'volume':\n self._get_value('mute', host)\n elif event.lower() in self._yamaha_ignore_cmds:\n self.logger.debug(\"Yamaha ignoring command {}.\".format(event))\n else:\n self.logger.warn(\"Yamaha unsupported notify command.\")\n self.logger.debug(\"Yamaha sending ack to {}:{}\".format(host, port))\n self.sock.sendto(b'ack', addr)\n else:\n if self.sock:\n self.sock.close()\n self.sock = None\n \n\n def stop(self):\n self.alive = False\n try:\n if self.sock:\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()\n except OSError as e:\n self.logger.error(\"OSError '{}' occurred while stopping plugin\".format(e))\n except Exception as e:\n self.logger.error(\"Unknown error '{}' occurred while stopping plugin\".format(e))\n finally:\n self.sock = None\n\n def _initialize(self):\n try:\n self.logger.info(\"Yamaha now initializing current state\")\n for yamaha_host, yamaha_cmd in self._yamaha_rxv.items():\n self.logger.info(\"Initializing items for host: {}\".format(yamaha_host))\n state = self._update_state(yamaha_host)\n self.logger.debug(state)\n for yamaha_cmd, item in yamaha_cmd.items():\n if yamaha_cmd != 'state':\n self.logger.info(\"Initializing cmd {} for item {}\".format(yamaha_cmd, item))\n value = self._return_value(state, yamaha_cmd)\n item(value, self.get_shortname())\n except Exception as e:\n self.logger.error(\"Exception '{}' occurred\".format(e))\n return\n self.logger.info(\"Yamaha finished initializing current state\")\n return True\n\n def _return_document(self, doc):\n return etree.tostring(doc, xml_declaration=True, encoding='UTF-8', pretty_print=False)\n\n def _event_notify(self, value, cmd='PUT'):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', cmd)\n system = etree.SubElement(root, 'System')\n misc = etree.SubElement(system, 'Misc')\n event = etree.SubElement(misc, 'Event')\n notice = etree.SubElement(event, 'Notice')\n if value:\n notice.text = 'On'\n else:\n notice.text = 'Off'\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _power(self, value, cmd='PUT'):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', cmd)\n system = etree.SubElement(root, 'Main_Zone')\n power_control = etree.SubElement(system, 'Power_Control')\n power = etree.SubElement(power_control, 'Power')\n if value is True:\n power.text = 'On'\n elif value is False:\n power.text = 'Standby'\n elif value == 'GetParam':\n power.text = value\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _input(self, value, cmd='PUT'):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', cmd)\n system = etree.SubElement(root, 'Main_Zone')\n input = etree.SubElement(system, 'Input')\n input_sel = etree.SubElement(input, 'Input_Sel')\n input_sel.text = value\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _volume(self, value, cmd='PUT'):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', cmd)\n system = etree.SubElement(root, 'Main_Zone')\n volume = etree.SubElement(system, 'Volume')\n level = etree.SubElement(volume, 'Lvl')\n if cmd == 'GET':\n level.text = value\n else:\n val = etree.SubElement(level, 'Val')\n val.text = str(value)\n exponent = etree.SubElement(level, 'Exp')\n exponent.text = '1'\n unit = etree.SubElement(level, 'Unit')\n unit.text = 'dB'\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _mute(self, value, cmd='PUT'):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', cmd)\n system = etree.SubElement(root, 'Main_Zone')\n volume = etree.SubElement(system, 'Volume')\n mute = etree.SubElement(volume, 'Mute')\n if value is True:\n mute.text = 'On'\n elif value is False:\n mute.text = 'Off'\n elif value == 'GetParam':\n mute.text = value\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _validate_inputs(self):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', 'GET')\n system = etree.SubElement(root, 'System')\n state = etree.SubElement(system, 'Config')\n state.text = 'GetParam'\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _get_state(self):\n root = etree.Element('YAMAHA_AV')\n root.set('cmd', 'GET')\n system = etree.SubElement(root, 'Main_Zone')\n state = etree.SubElement(system, 'Basic_Status')\n state.text = 'GetParam'\n tree = etree.ElementTree(root)\n return self._return_document(tree)\n\n def _get_value(self, notify_cmd, yamaha_host):\n yamaha_payload = None\n if notify_cmd == 'power':\n yamaha_payload = self._power('GetParam', cmd='GET')\n elif notify_cmd == 'volume':\n yamaha_payload = self._volume('GetParam', cmd='GET')\n elif notify_cmd == 'mute':\n yamaha_payload = self._mute('GetParam', cmd='GET')\n elif notify_cmd == 'input':\n yamaha_payload = self._input('GetParam', cmd='GET')\n\n res = self._submit_payload(yamaha_host, yamaha_payload)\n self.logger.debug(res)\n value = self._return_value(res, notify_cmd)\n item = self._yamaha_rxv[yamaha_host][notify_cmd]\n item(value, self.get_shortname())\n\n def _return_value(self, state, cmd):\n try:\n tree = etree.parse(StringIO(state))\n except Exception:\n return \"Invalid data received\"\n if cmd == 'input':\n try:\n value = tree.find('Main_Zone/Basic_Status/Input/Input_Sel')\n return value.text\n except:\n value = tree.find('Main_Zone/Input/Input_Sel')\n return value.text\n elif cmd == 'volume':\n try:\n value = tree.find('Main_Zone/Basic_Status/Volume/Lvl/Val')\n return int(value.text)\n except:\n value = tree.find('Main_Zone/Volume/Lvl/Val')\n return int(value.text)\n elif cmd == 'mute':\n try:\n value = tree.find('Main_Zone/Basic_Status/Volume/Mute')\n if value.text == 'On':\n return True\n elif value.text == 'Off':\n return False\n return value.text\n except:\n value = tree.find('Main_Zone/Volume/Mute')\n if value.text == 'On':\n return True\n elif value.text == 'Off':\n return False\n return value.text\n elif cmd == 'power':\n try:\n value = tree.find('Main_Zone/Basic_Status/Power_Control/Power')\n if value.text == 'Standby':\n return False\n elif value.text == 'On':\n return True\n except:\n value = tree.find('Main_Zone/Power_Control/Power')\n if value.text == 'Standby':\n return False\n elif value.text == 'On':\n return True\n elif cmd == 'event':\n events = []\n for entry in tree.findall('Main_Zone/Property'):\n events.append(entry.text)\n return events\n\n def _submit_payload(self, host, payload):\n if payload:\n self.logger.debug(\"Sending payload {}\".format(payload))\n res = requests.post(\"http://%s/YamahaRemoteControl/ctrl\" % host,\n headers={\n \"Accept\": \"text/xml\",\n \"User-Agent\": \"SmartHomeNG\"\n },\n timeout=4,\n data=payload)\n response = res.text\n del res\n if response == \"\":\n self.logger.warn(\"No response received.\")\n else:\n self.logger.debug(\"Response received: '{}'\".format(response))\n return response\n else:\n self.logger.warn(\"No payload given\")\n return None\n\n def _lookup_host(self, item):\n parent = item.return_parent()\n yamaha_host = self.get_iattr_value(parent.conf,'yamaha_host')\n return yamaha_host\n\n def parse_item(self, item):\n if self.has_iattr(item.conf, 'yamaha_cmd'):\n self.logger.debug(\"parse item: {}\".format(item))\n yamaha_host = self._lookup_host(item)\n yamaha_cmd = self.get_iattr_value(item.conf, 'yamaha_cmd').lower()\n if not yamaha_cmd in self._yamaha_cmds:\n self.logger.warning(\"{} not in valid commands: {}\".format(yamaha_cmd, self._yamaha_cmds))\n return None\n else:\n try:\n self._yamaha_rxv[yamaha_host][yamaha_cmd] = item\n except KeyError:\n self._yamaha_rxv[yamaha_host] = {}\n self._yamaha_rxv[yamaha_host][yamaha_cmd] = item\n return self.update_item\n\n def update_item(self, item, caller=None, source=None, dest=None):\n \"\"\"\n Item has been updated\n\n This method is called, if the value of an item has been updated by SmartHomeNG.\n It should write the changed value out to the device (hardware/interface) that\n is managed by this plugin.\n\n :param item: item to be updated towards the plugin\n :param caller: if given it represents the callers name\n :param source: if given it represents the source\n :param dest: if given it represents the dest\n \"\"\"\n if self.alive and caller != self.get_shortname():\n # code to execute if the plugin is not stopped\n # and only, if the item has not been changed by this this plugin:\n self.logger.info(\"Update item: {}, item has been changed outside this plugin\".format(item.id()))\n\n yamaha_cmd = self.get_iattr_value(item.conf, 'yamaha_cmd')\n yamaha_host = self._lookup_host(item)\n yamaha_payload = None\n yamaha_notify = False\n\n if yamaha_cmd == 'power':\n yamaha_payload = self._power(item())\n yamaha_notify = True\n elif yamaha_cmd == 'volume':\n yamaha_payload = self._volume(item())\n elif yamaha_cmd == 'mute':\n yamaha_payload = self._mute(item())\n elif yamaha_cmd == 'input':\n yamaha_payload = self._input(item())\n\n self._submit_payload(yamaha_host, yamaha_payload)\n self._update_state(yamaha_host)\n if yamaha_notify:\n # When power on, ensure event notify is enabled\n self._submit_payload(yamaha_host, self._event_notify(True))\n return None\n\n def _update_state(self, yamaha_host):\n state = self._submit_payload(yamaha_host, self._get_state())\n return state\n\n def init_webinterface(self):\n \"\"\"\"\n Initialize the web interface for this plugin\n\n This method is only needed if the plugin is implementing a web interface\n \"\"\"\n try:\n self.mod_http = Modules.get_instance().get_module(\n 'http') # try/except to handle running in a core version that does not support modules\n except:\n self.mod_http = None\n if self.mod_http == None:\n self.logger.error(\"Not initializing the web interface\")\n return False\n\n import sys\n if not \"SmartPluginWebIf\" in list(sys.modules['lib.model.smartplugin'].__dict__):\n self.logger.warning(\"Web interface needs SmartHomeNG v1.5 and up. Not initializing the web interface\")\n return False\n\n # set application configuration for cherrypy\n webif_dir = self.path_join(self.get_plugin_dir(), 'webif')\n config = {\n '/': {\n 'tools.staticdir.root': webif_dir,\n },\n '/static': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': 'static'\n }\n }\n\n # Register the web interface as a cherrypy app\n self.mod_http.register_webif(WebInterface(webif_dir, self),\n self.get_shortname(),\n config,\n self.get_classname(), self.get_instance_name(),\n description='')\n\n return True\n","sub_path":"yamaha/_pv_1_0_2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":18530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313135532","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nThis is a QGraphicsScene to load the map and draw the FTS\n'''\n\nfrom math import atan2, cos, sin, pi, atan\nfrom pyquaternion import Quaternion\nimport os\nimport rospkg\nimport yaml\nimport rospy\n\nfrom python_qt_binding.QtWidgets import QGraphicsScene, QGraphicsTextItem, QGraphicsLineItem, QGraphicsEllipseItem\nfrom python_qt_binding.QtCore import QTimer, QEvent, pyqtSignal, QPointF, QLineF, pyqtSlot, Qt, QRectF, QSizeF\nfrom python_qt_binding.QtGui import QPen, QFont, QBrush, QColor, QPixmap, QTransform\n\nclass MapGraphicsScene(QGraphicsScene):\n signalMousePressedPos = pyqtSignal(QPointF)\n signalMouseReleasedPos = pyqtSignal(QPointF)\n signalMouseMovePos = pyqtSignal(QPointF)\n def __init__(self):\n super(MapGraphicsScene, self).__init__()\n self.regionCounter = 0\n self.line_dict = {}\n self.items_dict = {}\n\n # Transfrom pixel coordinates to world coordinates\n # Input: pixel coordinates QPointF\n # Output: world coordinates (x, y, z)\n def pixelToWorld(self, pixel_coords = QPointF()):\n world_coords = ((pixel_coords.x() - self.worldOrigin.x()) * self.map_resolution, -(pixel_coords.y() - self.worldOrigin.y()) * self.map_resolution, 0.0)\n return world_coords\n\n # Transform world coordinates to pixel coordinates\n # Input: world coordinates (x, y, z)\n # Output: pixel coordinates QPointF\n def worldToPixel(self, world_coords):\n pixel_coords = QPointF(world_coords[0] / self.map_resolution + self.worldOrigin.x(), -world_coords[1] / self.map_resolution + self.worldOrigin.y())\n return pixel_coords\n\n # Add ROI to graphics scene\n # Input: pixel coordinates QPointF\n # Update GraphicsScene\n def add_ROI(self, pixel_coords):\n self.regionCounter += 1\n\n markerSize = 25\n ellipse_item = QGraphicsEllipseItem(QRectF(QPointF(pixel_coords.x() - markerSize/2, pixel_coords.y() - markerSize/2), QSizeF(markerSize, markerSize)))\n ellipse_item.setBrush(QBrush(QColor('red')))\n self.addItem(ellipse_item)\n\n label_font = QFont()\n label_font.setPointSize(15)\n region_string = 'r' + str(self.regionCounter).zfill(2)\n ellipse_item_label = QGraphicsTextItem(region_string)\n ellipse_item_label.setPos(pixel_coords)\n ellipse_item_label.setFont(label_font)\n self.addItem(ellipse_item_label)\n\n self.items_dict.update({region_string : {'ellipse_item' : ellipse_item, 'ellipse_item_label' : ellipse_item_label, 'pixel_coords' : pixel_coords, 'ap_item_label' : {}}})\n\n # Remove las added ROI\n # Update GraphicsScene\n def remove_ROI(self):\n region_string = 'r' + str(self.regionCounter).zfill(2)\n self.removeItem(self.items_dict[region_string]['ellipse_item'])\n self.removeItem(self.items_dict[region_string]['ellipse_item_label'])\n self.removeArrow(self.items_dict[region_string]['arrow'])\n for i in range(0, len(self.items_dict[region_string]['ap_item_label'].keys())):\n self.remove_ap(region_string, self.items_dict[region_string]['ap_item_label'].keys()[i])\n\n del self.items_dict[region_string]\n self.regionCounter = self.regionCounter - 1\n\n # Add line between to ROI's\n # Input: number ROI 1 int\n # number ROI 2 int\n # Update GraphicsScene\n def add_edge(self, roi_num_1, roi_num_2):\n pixel_coords_1 = self.items_dict['r' + str(roi_num_1).zfill(2)]['pixel_coords']\n pixel_coords_2 = self.items_dict['r' + str(roi_num_2).zfill(2)]['pixel_coords']\n self.line_dict[(str(roi_num_1) + '-' + str(roi_num_2))] = QGraphicsLineItem(QLineF(pixel_coords_2, pixel_coords_1))\n self.addItem(self.line_dict[(str(roi_num_1) + '-' + str(roi_num_2))])\n\n # Remove line between to ROI's\n # Input: edge label string e.g. 1-2\n # Update GraphicsScene\n def remove_edge(self, edge):\n self.removeItem(self.line_dict[edge])\n del self.line_dict[edge]\n\n # Add general atomic proposition label\n # Input: region label string\n # ap label string\n # Update GraphicsScene\n def add_ap(self, region, ap):\n label_font = QFont()\n label_font.setPointSize(15)\n ap_item_label = QGraphicsTextItem(ap)\n ap_item_label.setPos(QPointF(self.items_dict[region]['pixel_coords'].x()-25, self.items_dict[region]['pixel_coords'].y()))\n ap_item_label.setFont(label_font)\n self.addItem(ap_item_label)\n\n self.items_dict[region]['ap_item_label'].update({ap : ap_item_label})\n\n # Remove general atomic proposition label\n # Input: region label string\n # ap label string\n # Update GraphicsScene\n def remove_ap(self, region, ap):\n self.removeItem(self.items_dict[region]['ap_item_label'][ap])\n\n # Reset graphics scene\n # Update GraphicsScene\n def reset(self):\n for i in range(0, self.regionCounter):\n self.remove_ROI()\n for i in range(0, len(self.line_dict)):\n self.remove_edge(self.line_dict.keys()[0])\n\n self.regionCounter = 0\n self.ellipse_items = []\n self.ellipse_items_labels = []\n self.pixel_coords_list = []\n self.line_dict = {}\n\n # Load map\n # Input: scenario name string\n # Update GraphicsScene\n def load_map(self, scenario):\n self.scenario = scenario\n map_yaml = os.path.join(rospkg.RosPack().get_path('rqt_simulation'), 'scenarios', scenario, 'map.yaml')\n self.loadConfig(map_yaml)\n map = 'map.png'\n\n map_file = os.path.join(rospkg.RosPack().get_path('rqt_simulation'), 'scenarios', scenario, map)\n pixmap = QPixmap(map_file)\n self.mapSize = pixmap.size()\n self.addPixmap(pixmap)\n\n # Add world origin\n self.worldOrigin = QPointF(-self.map_origin[0]/self.map_resolution, self.map_origin[1]/self.map_resolution + self.mapSize.height())\n self.addCoordinateSystem(self.worldOrigin, 0.0)\n\n # Send signal if mouse button is pressed\n # Input: click event\n # Output: pixel coordinates QPointF\n def mousePressEvent(self, event):\n pos = event.lastScenePos()\n self.signalMousePressedPos.emit(pos)\n\n # Send signal if mouse button is released\n # Input: release event\n # Output: pixel coordinates QPointF\n def mouseReleaseEvent(self, event):\n pos = event.lastScenePos()\n self.signalMouseReleasedPos.emit(pos)\n\n # Send signal if mouse is moving in graphic scene\n # Input: move event\n # Output: pixel coordinates QPointF\n def mouseMoveEvent(self, event):\n pos = event.lastScenePos()\n self.signalMouseMovePos.emit(pos)\n\n # Add arrow to graphic scene\n # Input: start point QPointF\n # end point QPointF\n # drawing options QPen\n # Output: arrow list with 3 QGraphicsLineItem\n # Update GraphicsScene\n def addArrow(self, startPoint = QPointF(), endPoint = QPointF(), pen = QPen()):\n alpha = 5*pi/6\n arrowLength = 10\n\n theta = atan2((endPoint.y() - startPoint.y()) , (endPoint.x() - startPoint.x()))\n\n gamma1 = theta + alpha\n gamma2 = theta - alpha\n arrowPoint_1 = QPointF(endPoint.x() + arrowLength * cos(gamma1), endPoint.y() + arrowLength * sin(gamma1))\n arrowPoint_2 = QPointF(endPoint.x() + arrowLength * cos(gamma2), endPoint.y() + arrowLength * sin(gamma2))\n line_0 = QLineF(startPoint, endPoint)\n line_1 = QLineF(endPoint,arrowPoint_1)\n line_2 = QLineF(endPoint,arrowPoint_2)\n\n line_item_0 = QGraphicsLineItem(line_0)\n line_item_0.setPen(pen)\n line_item_1 = QGraphicsLineItem(line_1)\n line_item_1.setPen(pen)\n line_item_2 = QGraphicsLineItem(line_2)\n line_item_2.setPen(pen)\n\n arrowItems = [line_item_0, line_item_1, line_item_2]\n\n self.addItem(line_item_0)\n self.addItem(line_item_1)\n self.addItem(line_item_2)\n\n return arrowItems\n\n # Remove arrow from graphics scene\n # Input: arrow list with 3 QGraphicsLineItem\n # Update GraphicsScene\n def removeArrow(self, arrow):\n for n in arrow:\n self.removeItem(n)\n\n\n def scale_map(self, graphicsView, scenario):\n rectF = graphicsView.geometry()\n if (float(rectF.width())/self.mapSize.width() < float(rectF.height())/self.mapSize.height()):\n scale = float(rectF.width())/self.mapSize.width()\n elif scenario == 'pal_office' or scenario == 'sml':\n scale = 0.7\n else:\n scale = float(rectF.height())/self.mapSize.height()\n transform = QTransform(scale, 0, 0.0, scale, 0, 0)\n\n return transform\n\n # Load ROI's and edges from a FTS\n # Input: FTS dict\n # Update GraphicsScene\n def load_graphic_from_FTS(self, FTS):\n sorted_keys = FTS.region_of_interest.keys()\n sorted_keys.sort()\n\n arrow_length = 50\n\n # Add all the ROI's and edges\n for i in range(0, len(FTS.region_of_interest)):\n region_string = 'r' + str(i+1).zfill(2)\n pixel_coords = self.worldToPixel(FTS.region_of_interest[sorted_keys[i]]['pose']['position'])\n self.add_ROI(pixel_coords)\n\n for j in range(0, len(FTS.region_of_interest[sorted_keys[i]]['propos'])):\n if sorted_keys[i] != FTS.region_of_interest[sorted_keys[i]]['propos'][j]:\n self.add_ap(sorted_keys[i], FTS.region_of_interest[sorted_keys[i]]['propos'][j])\n\n quaternion = Quaternion(FTS.region_of_interest[sorted_keys[i]]['pose']['orientation'])\n rot_axis = quaternion.axis\n theta = quaternion.angle * rot_axis[2]\n end_point = QPointF(pixel_coords.x() + arrow_length * cos(theta), pixel_coords.y() - arrow_length * sin(theta))\n arrow = self.addArrow(pixel_coords, end_point)\n self.items_dict[region_string]['arrow'] = arrow\n\n # Add all edges to graphics scene\n for i in range(0, len(FTS.region_of_interest)):\n for j in range(0, len(FTS.region_of_interest[sorted_keys[i]]['edges'])):\n index = sorted_keys.index(FTS.region_of_interest[sorted_keys[i]]['edges'][j]['target'])\n if i < index:\n if (str(i+1) + '-' + str(index+1)) not in self.line_dict.keys():\n self.add_edge(i+1, index+1)\n else:\n if (str(index+1) + '-' + str(i+1)) not in self.line_dict.keys():\n self.add_edge(index+1, i+1)\n\n\n # Add coordinate system\n # Input: origin QPointF\n # angle Float\n # Update GraphicsScene\n def addCoordinateSystem(self, origin = QPointF(), angle = 0.0):\n XAxis = QPointF(origin.x() + 100, origin.y())\n YAxis = QPointF(origin.x(), origin.y() - 100)\n\n self.addArrow(origin, XAxis)\n self.addArrow(origin, YAxis)\n XLabel = self.addText('X', QFont())\n XLabel.setPos(XAxis)\n YLabel = self.addText('Y', QFont())\n YLabel.setPos(YAxis)\n\n # Load the data from map.yaml file\n # Input: file string\n def loadConfig(self, filename):\n stream = file(filename, 'r')\n data = yaml.load(stream)\n stream.close()\n self.map_image = data['image']\n self.map_resolution = data['resolution']\n self.map_origin = tuple(data['origin'])\n self.map_negate = data['negate']\n self.map_occupied_thresh = data['occupied_thresh']\n self.map_free_thresh = data['free_thresh']\n qualisys = data['qualisys']\n if qualisys:\n self.tf_qualisys_to_map = data['tf_qualisys_to_map']\n rospy.loginfo('rqt_simulation map tf to qualisys : %s' % (self.tf_qualisys_to_map))\n rospy.loginfo('rqt_simulation map : %s' % (self.scenario))\n rospy.loginfo('rqt_simulation map resolution : %.6f' % (self.map_resolution))\n rospy.loginfo('rqt_simulation map origin : %s' % (self.map_origin,))\n rospy.loginfo('rqt_simulation map negate : %s' % (self.map_negate))\n rospy.loginfo('rqt_simulation map occupied threshold : %s' % (self.map_occupied_thresh))\n rospy.loginfo('rqt_simulation map free threshold : %s' % (self.map_free_thresh))\n\n","sub_path":"ltl_gui/rqt_simulation/include/rqt_simulation/MapGraphicsScene.py","file_name":"MapGraphicsScene.py","file_ext":"py","file_size_in_byte":12386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45794874","text":"import pandas as pd\nimport numpy\nimport numpy.ma as ma\nimport math\nimport time\n\ndef reduceCols(A, startIndex, numCols):\n m = len(A)\n n = len(A[0])\n B = []\n for i in range(m):\n B.append(A[i][startIndex:(startIndex+numCols)])\n return B\n\ndef reduceRows(A, k):\n B = A[:k]\n return B\n\ndef norm(v):\n s = 0\n for i in range(len(v)):\n s = s + (v[i] * v[i])\n n_v = math.sqrt(s)\n return n_v\n\ndef normalise(A, col, n_col):\n m = len(A)\n #print(A)\n for i in range(m):\n A[i][col] = A[i][col] / n_col\n\ndef diag(entries, numRows, numCols):\n D = []\n for i in range(numRows):\n row = []\n for j in range(numCols):\n if j == i:\n row.append(entries[i])\n else:\n row.append(0)\n D.append(row)\n return D\n\ndef getIdentity(numRows, numCols):\n entries = [1] * min(numRows, numCols)\n I = diag(entries, numRows, numCols)\n return I\n\ndef copy(M):\n C = M.view()\n return C\n\ndef svd(A, mRow, nCol, kRank):\n #U = reduceCols(copy(A), kRank)\n #V = getIdentity(nCol, kRank)\n start_time = time.time()\n U = copy(A)\n V = getIdentity(nCol, nCol)\n tol = math.exp(-8)\n converge = tol + 1\n\n j_time = time.time()\n while converge > tol:\n print('converge', converge)\n for j in range(1, nCol):\n for i in range(kRank):\n #print('i', i)\n alpha = 0\n beta = 0\n gamma = 0\n #for k in range(kRank):\n for k in range(mRow):\n alpha = alpha + ((U[k][i])**2)\n beta = beta + ((U[k][j])**2)\n gamma = gamma + (U[k][j] * U[k][i])\n #print(alpha)\n #print(gamma)\n #print(beta)\n secargden = math.sqrt(alpha * beta)\n if secargden != 0:\n converge = min(converge, abs(gamma) / math.sqrt(alpha * beta))\n t = 0\n if gamma != 0:\n zeta = (beta - alpha) / (2 * gamma)\n sign = 0\n if zeta != 0:\n sign = int(zeta / abs(zeta))\n t = sign / (abs(zeta) + math.sqrt(1 + (zeta * zeta)))\n else:\n t = 0\n c = 1 / math.sqrt(1 + (t * t))\n s = c * t\n #for k in range(kRank):\n for k in range(mRow):\n t = U[k][i]\n U[k][i] = (c * t) - (s * U[k][j])\n U[k][j] = (s * t) + (c * U[k][j])\n for k in range(nCol):\n t = V[k][i]\n V[k][i] = (c * t) - (s * V[k][j])\n V[k][j] = (s * t) + (c * V[k][j])\n if ((j+1)%100) == 0:\n print('j is', j)\n next_time = time.time()\n print('time taken by last 100 iterations is', (next_time - j_time), 'seconds')\n j_time = next_time\n singvals = [0] * kRank\n #singvals = [0] * nCol\n for j in range(kRank):\n #for j in range(nCol):\n Uj = []\n for i in range(mRow):\n Uj.append(U[i][j])\n singvals[j] = norm(Uj)\n normalise(U, j, singvals[j]) \n #print(Uj)\n #print(singvals[j])\n #print(singvals)\n #singvals = sorted(singvals).reverse()\n #print(len(singvals))\n #print(kRank)\n S = diag(singvals, kRank, kRank)\n #S = diag(singvals, nCol, nCol)\n #print(kRank)\n #U = numpy.where(numpy.max(U, axis=0)==0, U, U*1./numpy.max(U, axis=0))\n U = reduceCols(U, 0, kRank)\n V = reduceCols(V, 0, kRank)\n #final_U = transpose(final_U)\n #final_V = transpose(final_V)\n print('Time taken to compute SVD was', (time.time() - start_time), 'seconds')\n return U, S, V\n \ndef rmse(M, N):\n numRows = len(M)\n numCols = len(M[0])\n total = 0\n ctr = 0\n for i in range(numRows):\n for j in range(numCols):\n diff = M[i][j] - N[i][j]\n sq = diff * diff\n total = total + sq\n ctr += 1\n mean = total/ctr\n error = math.sqrt(mean)\n return error\n\ndef writeToFile(U, V):\n numpy.savetxt('user_vectors.csv', U, delimiter=',')\n numpy.savetxt('item_vectors.csv', V, delimiter=',')\n\ndef readFromFile():\n #A = randomMatrix(100)\n df = pd.read_csv('utility_matrix_ut.csv', skiprows=1, index_col=False, header=None)\n df.drop(df.columns[0], axis=1, inplace=True)\n A = df.values\n #print(A[0])\n return A\n\nnumpy.seterr(divide='ignore', invalid='ignore')\n\nraw = readFromFile()\ncol_avg = numpy.mean(raw, axis=0)\nA = numpy.where(raw==0, ma.array(raw, mask=(raw==0)).mean(axis=0), raw)\nA = A - A.mean(axis=1, keepdims=True)\nm = len(A)\nn = len(A[0])\nprint('Generated')\nk = 14\nU, S, V = svd(A, m, n, k)\n#print('Dimensions of U are', len(U), len(U[0]))\n#print('Dimensions of S are', len(S), len(S[0]))\nT = numpy.transpose(V)\n#print('Dimensions of V_T are', len(T), len(T[0]))\nP = numpy.dot(numpy.dot(U, S), T)\n#print('Product computed. Dimensions are', len(P), len(P[0]))\nprint('Error is', rmse(P, A))\nwriteToFile(U, V)\n","sub_path":"svd_compute.py","file_name":"svd_compute.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165694138","text":"import numpy as np\nimport torch\n\n__all__ = ['State', 'StateTensor']\n\nclass State(dict):\n def __init__(self, x, device='cpu', **kwargs):\n if not isinstance(x, dict):\n x = {'observation': x}\n for k, v in kwargs.items():\n x[k] = v\n if 'observation' not in x:\n raise Exception('State must contain an observation')\n if 'reward' not in x:\n x['reward'] = 0.\n if 'done' not in x:\n x['done'] = False\n if 'mask' not in x:\n x['mask'] = 1. - x['done']\n super().__init__(x)\n self.shape = ()\n self.device = device\n\n @classmethod\n def from_list(cls, list_of_states):\n device = list_of_states[0].device\n shape = (len(list_of_states), *list_of_states[0].shape)\n x = {}\n for key in list_of_states[0].keys():\n v = list_of_states[0][key]\n try:\n if torch.is_tensor(v):\n x[key] = torch.stack([state[key] for state in list_of_states])\n else:\n x[key] = torch.tensor([state[key] for state in list_of_states], device=device)\n except: # # pylint: disable=bare-except\n pass\n return StateTensor(x, shape, device=device)\n\n def apply(self, model, *keys):\n return self.apply_mask(self.as_output(model(*[self.as_input(key) for key in keys])))\n\n def as_input(self, key):\n return self[key].unsqueeze(0)\n\n def as_output(self, tensor):\n return tensor.squeeze(0)\n\n def apply_mask(self, tensor):\n return tensor * self.mask\n\n def update(self, key, value):\n x = {}\n for k in self.keys():\n if not k == key:\n x[k] = super().__getitem__(k)\n x[key] = value\n return self.__class__(x, device=self.device)\n\n @classmethod\n def from_gym(cls, state, device='cpu', dtype=np.float32):\n if not isinstance(state, tuple):\n return State({\n 'observation': torch.from_numpy(\n np.array(\n state,\n dtype=dtype\n ),\n ).to(device)\n }, device=device)\n\n observation, reward, done, info = state\n observation = torch.from_numpy(\n np.array(\n observation,\n dtype=dtype\n ),\n ).to(device)\n x = {\n 'observation': observation,\n 'reward': float(reward),\n 'done': done,\n }\n info = info if info else {}\n for key in info:\n x[key] = info[key]\n return State(x, device=device)\n\n @property\n def observation(self):\n return self['observation']\n\n @property\n def reward(self):\n return self['reward']\n\n @property\n def done(self):\n return self['done']\n\n @property\n def mask(self):\n return self['mask']\n\n def __len__(self):\n return 1\n\nclass StateTensor(State):\n def __init__(self, x, shape, device='cpu', **kwargs):\n if not isinstance(x, dict):\n x = {'observation': x}\n for k, v in kwargs.items():\n x[k] = v\n if 'observation' not in x:\n raise Exception('StateTensor must contain an observation')\n if 'reward' not in x:\n x['reward'] = torch.zeros(shape, device=device)\n if 'done' not in x:\n x['done'] = torch.tensor([False] * np.prod(shape), device=device).view(shape)\n if 'mask' not in x:\n x['mask'] = 1. - x['done'].float()\n super().__init__(x, device=device)\n self.shape = shape\n\n def update(self, key, value):\n x = {}\n for k in self.keys():\n if not k == key:\n x[k] = super().__getitem__(k)\n x[key] = value\n return self.__class__(x, self.shape, device=self.device)\n\n def as_input(self, key):\n value = self[key]\n return value.view((np.prod(self.shape), *value.shape[len(self.shape):])).float()\n\n def as_output(self, tensor):\n return tensor.view((*self.shape, *tensor.shape[1:]))\n\n def apply_mask(self, tensor):\n return tensor * self.mask.unsqueeze(-1) # pylint: disable=no-member\n\n def flatten(self):\n n = np.prod(self.shape)\n dims = len(self.shape)\n x = {}\n for k, v in self.items():\n x[k] = v.view((n, *v.shape[dims:]))\n return StateTensor(x, (n,), device=self.device)\n\n def view(self, shape):\n dims = len(self.shape)\n x = {}\n for k, v in self.items():\n x[k] = v.view((*shape, *v.shape[dims:]))\n return StateTensor(x, shape, device=self.device)\n\n @property\n def observation(self):\n return self['observation']\n\n @property\n def reward(self):\n return self['reward']\n\n @property\n def done(self):\n return self['done']\n\n @property\n def mask(self):\n return self['mask']\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n shape = self['mask'][key].shape\n return StateTensor({k:v[key] for (k, v) in self.items()}, shape, device=self.device)\n if isinstance(key, int):\n return State({k:v[key] for (k, v) in self.items()}, device=self.device)\n if torch.is_tensor(key):\n # some things may get los\n d = {}\n shape = self['mask'][key].shape\n for (k, v) in self.items():\n try:\n d[k] = v[key]\n except: # pylint: disable=bare-except\n pass\n return self.__class__(d, shape, device=self.device)\n try:\n value = super().__getitem__(key)\n except KeyError:\n return None\n return value\n\n def __len__(self):\n return self.shape[0]\n","sub_path":"all/core/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521591749","text":"\r\n\r\n\r\n#O(1) - space\r\n#O(n) time\r\n#passed in leet code\r\n#logic is that at each level we have the numbe rof elements added and accordingly add the next elements in the list - BFS approach\r\n# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\nfrom collections import deque\r\nclass Solution:\r\n def levelOrder(self, root):\r\n res = []\r\n if not(root):\r\n return res\r\n q= [root]\r\n #temp_lst = [] #if we declare here for each level we have to empty this\r\n while q:\r\n #print(\"....\")\r\n temp_lst = [] #for each level we are creating a new list, that is for the current list\r\n size = len(q)\r\n for i in range(size): #as length of queue changes for every iteration, we do not use length here directly\r\n #print(\"..----..\")\r\n curr = q.pop(0) #pop first element in queue\r\n temp_lst.append(curr.val)\r\n if curr.left:\r\n #print(\"..kkkk..\")\r\n q.append(curr.left)\r\n if curr.right:\r\n #print(\"..klklk..\")\r\n q.append(curr.right)\r\n res.append(temp_lst)\r\n return res\r\n\r\n\r\n\r\nfrom collections import deque\r\nclass Solution:\r\n def levelOrder(self, root):\r\n if not root: return []\r\n queue, res = deque([root]), []\r\n\r\n while queue:\r\n cur_level, size = [], len(queue)\r\n for i in range(size):\r\n node = queue.popleft()\r\n if node.left:\r\n queue.append(node.left)\r\n if node.right:\r\n queue.append(node.right)\r\n cur_level.append(node.val)\r\n res.append(cur_level)\r\n return res\r\n\r\n\r\n\r\n\r\n\r\n#Level ordertraversal with list implementation\r\ndef levelOrder(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n result = []\r\n if not root:\r\n return result\r\n\r\n lst = [root]\r\n while lst:\r\n result.append([node.val for node in lst])\r\n size = len(lst)\r\n for i in range(size):\r\n node = lst[i]\r\n if node.left:\r\n lst.append(node.left)\r\n if node.right:\r\n lst.append(node.right)\r\n lst = lst[size:]\r\n return result","sub_path":"prob_56.py","file_name":"prob_56.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285068277","text":"import cv2\nimport glob\nimport numpy as np \nfrom utils import show_img, draw_bbox, rgb2gray, resize_img\nfrom tools import Functions\n\ndef mean_adaptive_threshold(img):\n\tthreshold = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n\treturn threshold\n\ndef gaussian_adaptive_threshold(img):\n\tthreshold = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)\n\treturn threshold\n\ndef find_contour(img):\n\t_, contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\treturn contours\n\ndef preprocess(img):\n\n\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\thue, saturation, value = cv2.split(hsv)\n\tvalue = cv2.equalizeHist(value)\n\t# kernel to use for morphological operations\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n\t# applying topHat/blackHat operations\n\ttopHat = cv2.morphologyEx(value, cv2.MORPH_TOPHAT, kernel)\n\tblackHat = cv2.morphologyEx(value, cv2.MORPH_BLACKHAT, kernel)\n\n\t# add and subtract between morphological operations\n\tadd = cv2.add(value, topHat)\n\tsubtract = cv2.subtract(add, blackHat)\n\n\t# applying gaussian blur on subtract image\n\tblur = cv2.GaussianBlur(subtract, (5, 5), 0)\n\n\tthreshold = gaussian_adaptive_threshold(blur)\n\tcontours = find_contour(threshold)\n\treturn contours, threshold\n\ndef matching_chars(c, pos_char, check_list):\n \"\"\"\n using to finding a list of character like c and can be a list of license numbers\n \"\"\"\n list_matching_chars = [c]\n count = 0\n for pos_matching in pos_char:\n if (c == pos_matching) or (pos_matching in check_list):\n continue\n for char in list_matching_chars:\n \n distance_between_chars = Functions.distanceBetweenChars(char, pos_matching)\n\n angle_between_chars = Functions.angleBetweenChars(char, pos_matching)\n\n area_ratio = float(abs(char.boundingRectArea - pos_matching.boundingRectArea)) / float(\n char.boundingRectArea)\n \n width_ratio = float(abs(char.boundingRectWidth - pos_matching.boundingRectWidth)) / float(\n char.boundingRectWidth)\n \n height_ratio = float(char.boundingRectHeight - pos_matching.boundingRectHeight) / float(\n char.boundingRectHeight)\n\n if distance_between_chars < (char.diagonalSize * 2) and \\\n angle_between_chars < 10.0 and \\\n area_ratio < 0.5 and \\\n width_ratio < 0.8 and \\\n height_ratio < 0.2:\n list_matching_chars.append(pos_matching)\n count += 1\n # print('haha' + str(count))\n # print('end')\n if len(list_matching_chars) > 3:\n return list_matching_chars\n return []\n\ndef possible_license_plate(contours):\n\tpos_char = []\n\tfor c in contours:\n\t\tcontour = Functions.Contour(c)\n\t\tif Functions.checkIfChar(contour):\n\t\t\tpos_char.append(contour)\n\t# print([x.centerX for x in pos_char])\n\tpos_char = sorted(pos_char, key=lambda x: x.centerX)\n\t# print([x.centerX for x in pos_char])\n\tcheck_list = []\n\tlicense_list = []\n\tfor c in pos_char:\n\t\tif c in check_list:\n\t\t\tcontinue\n\t\tlist_matching_chars = matching_chars(c, pos_char, check_list)\n\t\tif list_matching_chars:\n\t\t\tlicense_list.append(list_matching_chars)\n\t\t\tcheck_list.extend(list_matching_chars)\n\treturn [get_plate_detail(contours) for contours in license_list]\n\ndef get_plate_detail(contours):\n\tplate = Functions.Plate(contours)\n\tx1 = np.min([x.boundingRectX for x in plate.contours])\n\ty1 = np.min([x.boundingRectY for x in plate.contours])\n\tx2 = np.max([(x.boundingRectX + x.boundingRectWidth) for x in plate.contours])\n\ty2 = np.max([(x.boundingRectY + x.boundingRectHeight) for x in plate.contours])\n\tplate.box = (x1, y1, x2, y2)\n\tplate.width = x2 - x1 \n\tplate.height = y2 - y1 \n\tplate.area = plate.width * plate.height\n\tplate.center_x = (x1 + x2) / 2\n\tplate.center_y = (y1 + y2) / 2\n\treturn plate\n\ndef detect_plate(img):\n\t# img = cv2.imread(img_path)\n\twidth, height, chanel = img.shape\n\tcontours, threshold = preprocess(img)\n\t# show_img(threshold)\n\tpossible_plate_list = possible_license_plate(contours)\n\treturn possible_plate_list\n\nif __name__=='__main__':\n\timg_path = 'test.jpg'\n\timg = cv2.imread(img_path)\n\tplate_list = detect_plate(img)\n\n\tfor plate in plate_list:\n\t\tcontours = [x.contour for x in plate.contours]\n\t\timg = cv2.imread(img_path)\n\t\twidth, height, chanel = img.shape\n\t\timg_contours = np.zeros((width, height, chanel))\n\t\tcv2.drawContours(img_contours, contours, -1, (0, 255, 255))\n\t\tshow_img(img_contours)\n\n\t\tdraw_bbox(img, plate.get_extend_box())\n\t\tshow_img(img)","sub_path":"car_classification/alpr/preprocess4lpdetection.py","file_name":"preprocess4lpdetection.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347074938","text":"import re\nimport torch\nimport nibabel as nib\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom torch._six import container_abcs, string_classes, int_classes\n\nfrom src.data.datasets import BaseDataset\nfrom src.data.transforms import Compose, ToTensor\n\n\nnp_str_obj_array_pattern = re.compile(r'[SaUO]')\ndefault_collate_err_msg_format = (\n \"default_collate: batch must contain tensors, numpy arrays, numbers, \"\n \"dicts or lists; found {}\")\n\n\nclass LitsSegDataset(BaseDataset):\n \"\"\"The dataset of the Liver Tumor Segmentation Challenge (LiTS) in MICCAI 2017\n for the segmentation task.\n\n Ref:\n https://competitions.codalab.org/competitions/17094\n https://github.com/PatrickChrist/LITS-CHALLENGE/blob/master/submission-guide.md\n\n Args:\n data_split_file_path (str): The data split file path.\n transforms (BoxList): The preprocessing techniques applied to the data.\n augments (BoxList): The augmentation techniques applied to the training data (default: None).\n \"\"\"\n\n def __init__(self, data_split_file_path, transforms, augments=None, **kwargs):\n super().__init__(**kwargs)\n data_split_file = pd.read_csv(data_split_file_path)\n self.csv_name = Path(data_split_file_path).name\n patient_dirs = map(Path, data_split_file[data_split_file.type == self.type].path)\n self.data_paths = tuple(\n data_path\n for patient_dir in patient_dirs\n for data_path in zip(\n sorted(patient_dir.glob('**/*volume-*.nii')),\n (\n sorted(patient_dir.glob('**/*volume-*.nii'))\n if self.csv_name == 'testing.csv'\n else sorted(patient_dir.glob('**/segmentation-*.nii'))\n )\n )\n )\n self.transforms = Compose.compose(transforms)\n self.augments = Compose.compose(augments)\n self.to_tensor = ToTensor()\n\n def __getitem__(self, index):\n ct_path, gt_path = self.data_paths[index]\n nii_img = nib.load(ct_path.as_posix())\n ct = nii_img.get_fdata().astype(np.float32)[..., np.newaxis]\n gt = nib.load(gt_path.as_posix()).get_fdata().astype(np.int64)[..., np.newaxis]\n input_spacing = nii_img.header['pixdim'][1:4]\n\n if self.type == 'train':\n transforms_kwargs = {\n 'Resample': {\n 'input_spacings': (input_spacing, input_spacing),\n 'orders': (1, 0)\n },\n 'Clip': {\n 'transformed': (True, False)\n },\n 'MinMaxScale': {\n 'transformed': (True, False),\n }\n }\n ct, gt = self.transforms(ct, gt, **transforms_kwargs)\n ct, gt = self.augments(ct, gt)\n ct, gt = self.to_tensor(ct, gt)\n else:\n transforms_kwargs = {\n 'Resample': {\n 'input_spacings': (input_spacing,),\n 'orders': (1,)\n }\n }\n ct, = self.transforms(ct, **transforms_kwargs)\n ct, gt = self.to_tensor(ct, gt)\n metadata = {'input': ct, 'target': gt}\n\n if self.type == 'test':\n metadata.update(affine=nii_img.affine,\n header=nii_img.header,\n name=ct_path.name.replace('volume', 'segmentation'))\n return metadata\n\n def __len__(self):\n return len(self.data_paths)\n\n @classmethod\n def collate_fn(cls, batch):\n \"\"\"Puts each data field into a tensor with outer dimension batch size\n Ref:\n https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py\n \"\"\"\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(default_collate_err_msg_format.format(elem.dtype))\n\n return cls.collate_fn([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, nib.nifti1.Nifti1Header):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: cls.collate_fn([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(cls.collate_fn(samples) for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n transposed = zip(*batch)\n return [cls.collate_fn(samples) for samples in transposed]\n\n raise TypeError(default_collate_err_msg_format.format(elem_type))\n","sub_path":"src/data/datasets/lits_seg_dataset.py","file_name":"lits_seg_dataset.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476187872","text":"import matplotlib.pyplot as plt \nfrom matplotlib import rc \nimport numpy as np \n\n#age = [.03, .4, 2, 4] #Gyr\nlog_age = [np.log10(30e6), np.log10(.4e9), np.log10(2e9), np.log10(4e9)]\nCO_AD = [-12.86, 0.18,36.99, 37.15] #km/s\nHI_AD = [-8.15, 17.69, 50.43, 62.97] #km/s\nHI_low_errors = [23.515 / np.sqrt(1009), 68.237/ np.sqrt(430), 32.248 / np.sqrt(880), 33.043 / np.sqrt(3129)]\nHI_high_errors = [22.951 / np.sqrt(1009), 57.714 / np.sqrt(430), 37.272 / np.sqrt(880), 22.351 / np.sqrt(3129)]\nCO_low_errors = [35.494 / np.sqrt(738), 117.362 / np.sqrt(338), 55.888 / np.sqrt(713), 38.107 / np.sqrt(2320)]\nCO_high_errors = [23.334 / np.sqrt(738), 76.638 / np.sqrt(338), 41.397 / np.sqrt(713), 30.796/ np.sqrt(2320)]\n\nIWM_HI_AD = [-13.54, 6.32, 36.45, 50.43] #km/s\nIWM_HI_low_errors = [21.34 / np.sqrt(983), 36.66/ np.sqrt(420), 37.61 / np.sqrt(836), 25.31 / np.sqrt(2998)]\nIWM_HI_high_errors = [27.44 / np.sqrt(983), 81.77 / np.sqrt(420), 41.98 / np.sqrt(836), 21.14 / np.sqrt(2998)]\n\nHI_errors = np.row_stack((HI_low_errors, HI_high_errors))\nCO_errors = np.row_stack((CO_low_errors, CO_high_errors))\nIWM_HI_errors = np.row_stack((IWM_HI_low_errors, IWM_HI_high_errors))\n \nrc('font', family = 'serif')\nfig, ax=plt.subplots(1)\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\nax.tick_params(axis='x',which='both',bottom='on',top='off', direction='out')\nax.tick_params(axis='x',which='both',top='on', direction='in')\nax.tick_params(axis='y',which='both',left='on',top='off', direction='out')\nax.tick_params(axis='y',which='both',right='on', direction='in')\nplt.tick_params(which='both', width=2)\nplt.tick_params(which='major', length=7)\nplt.tick_params(which='minor', length=4)\nplt.tick_params(labelsize=12) \nplt.minorticks_on()\n\nelb1 = plt.errorbar(log_age, CO_AD, yerr=CO_errors, capsize=7, c='darkcyan', linewidth = 3, label=r'$\\rm w.r.t.\\ CO$')\nelb2 = plt.errorbar(log_age, HI_AD, yerr=HI_errors, capsize=7, c='darkgrey', linewidth = 3,linestyle='-', label=r'$\\rm w.r.t.\\ HI$')\n#elb3 = plt.errorbar(log_age, IWM_HI_AD, yerr=IWM_HI_errors, capsize=5, linestyle='--', c='darkgrey', label=r'$\\rm w.r.t.\\ IWM\\ HI$')\n# elb1[-1][0].set_linestyle('-.')\n#elb3[-1][0].set_linestyle('--')\nplt.legend(frameon=False, loc=4)\n#plt.xscale('log')\n#plt.xlabel(r'$\\rm Average\\ Stellar\\ Age\\ (Gyr)$', fontsize=13)\nplt.xlabel(r'$\\rm Average\\ Stellar\\ Age\\ [log(yr)]$', fontsize=13)\nplt.ylabel(r'$\\rm Median\\ Asymmetric\\ Drift:\\ \\itv_{a}\\ \\rm(km\\ s^{-1})$', fontsize=13)\nplt.savefig('/Users/amandaquirk/Desktop/AD_transition.pdf', bbox_inches='tight')\n#print('HI IWM')\n#print(HI_errors)\n#print('CO')\n#print(CO_errors)\n","sub_path":"transition_plot.py","file_name":"transition_plot.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209754221","text":"\n\n#calss header\nclass _HALTER():\n\tdef __init__(self,): \n\t\tself.name = \"HALTER\"\n\t\tself.definitions = [u\"a piece of rope or a leather strap that is tied around an animal's head so that it can be led by someone or tied to something\", u\"a piece of women's clothing that is held in position by a strap that goes behind the neck so that the upper back and shoulders are not covered: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_halter.py","file_name":"_halter.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173292391","text":"from datetime import datetime\nfrom bs4 import BeautifulSoup\nimport locale, requests\n\ndef Tilburg013Loader():\n locale.setlocale(locale.LC_ALL,'nl_NL.UTF-8')#Dutch\n\n URL = 'http://www.013.nl/programma'\n container = []\n \n #Scrape the main site for links to events\n for link in BeautifulSoup(requests.get(URL).content,\"html.parser\").find('div',attrs={'class':'event-overview'}).findAll('a',href=True):\n #Compose the hyperlinks\n url = 'http://www.013.nl' + link['href']\n \n title = link.find('span',attrs={'class':'event__act'}).text\n date_raw = link.find('span',attrs={'class':'event__date'}).text.replace(\"'\", \"\")\n time_raw = link.find('div',attrs={'class':'event__meta'}).text.strip()[8:13]\n\n date_time = datetime.strptime(date_raw + ' ' + time_raw,'%a %d %b %y %H:%M')\n date = date_time.date()\n time = date_time.time()\n \n container.append([title,date,time,url]) \n \n locale.setlocale(locale.LC_ALL,'en_US.UTF-8')#English US\n \n return container","sub_path":"lvmscraper/eventscraper/Venues/Tilburg013.py","file_name":"Tilburg013.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21875921","text":"r\"\"\"A Cog5 solver in Python.\n\nThis is a pure Python implementation of the Cog5 solution using Numpy.\nThe exact solution takes the form,\n\n.. math::\n\n \\rho(r,t) &= \\rho_0\\, r^{-2}\n \\\\\n u(r,t) &= u_0\\, t\n \\\\\n T(r,t) &= \\frac{u_0}{\\Gamma} \\cdot r\n \\\\\n k &= 2 ~\\textrm{and}~ \\gamma = \\frac{1}{2}\n\nFree parameters: :math:`u_0`, :math:`\\rho_0`, and :math:`\\Gamma`.\n\n\"\"\"\n\nimport numpy as np\n\nfrom ...base import ExactSolver, ExactSolution, Jump, JumpCondition\n\n\nclass Cog5(ExactSolver):\n \"\"\"Computes the solution to the Cog5 problem.\n\n Computes the solution to the Cog5 problem with defaults rho0 = 1.8, u0 = 2.3,\n Gamma = 40.\n \"\"\"\n\n parameters = {\n 'rho0': \"density coefficient\",\n 'u0': \"velocity coefficient\",\n 'Gamma': \"|Gruneisen| gas parameter\",\n }\n\n rho0 = 1.8\n u0 = 2.3\n Gamma = 40.\n \n def __init__(self, **kwargs):\n\n super(Cog5, self).__init__(**kwargs)\n\n def _run(self, r, t):\n\n geometry = 3\n k = geometry - 1.\n gamma = 1. / 2.\n bigGamma = self.Gamma\n\n density = self.rho0 * pow(r, -2) * \\\n np.ones(shape=r.shape) # mass density [g/cc]\n velocity = self.u0 * t * np.ones(shape=r.shape) # speed [cm/s]\n temperature = (self.u0 * r / bigGamma) * \\\n np.ones(shape=r.shape) # temperature [eV]\n\n pressure = bigGamma * density * temperature # pressure [dyn/cm^2]\n sie = pressure / density / (gamma - 1) # specific energy [erg/g]\n\n return ExactSolution([r, density, velocity, temperature, pressure,\n sie],\n names=['position',\n 'density',\n 'velocity',\n 'temperature',\n 'pressure',\n 'specific_internal_energy'])\n","sub_path":"exactpack/solvers/cog/cog5.py","file_name":"cog5.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28191730","text":"from sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# 데이터 변수 선언 -------------------------------------\nTEMP_CSV = \"./tem10y.csv\"\n\n# ----------------------------------------------------\n# 함수\n# ----------------------------------------------------\n# 과거 6일 데이터 기반으로 학습 데이터 새로 생성 함수\ndef make_data(data):\n x = [] # 학습 데이터\n y = [] # 결과\n interval = 6 # 6일 단위 누적평균\n temps = list(data[\"기온\"])\n\n for i in range(len(temps)):\n if i < interval: continue\n y.append(temps[i]) #6개당 하나의 레이블 저장된다\n xa = []\n for p in range(interval):\n d = i + p - interval #6개를 값의 평균을 저장한다\n xa.append(temps[d]) #마이너스 라벨이 되겠지 -헷갈려\n x.append(xa)\n #뭐 굳이 이렇게 안하고 딕셔너리 만들어줄 수도 있겠지 더 안좋은 코드겠지만!\n return (x, y)\n\n# ----------------------------------------------------\n# 데이터 준비\n# ----------------------------------------------------\ndf = pd.read_csv(TEMP_CSV, encoding=\"utf-8\")\n\n# 학습 전용과 테스트 전용 분리\ntrain_year = (df[\"연\"] <= 2015)\ntest_year = (df[\"연\"] >= 2016)\n\n# 학습용 & 검증용 데이터 분리\ndata_train, label_train = make_data(df[train_year])\ndata_test, label_test = make_data(df[test_year])\n\n# ----------------------------------------------------\n# 학습\n# ----------------------------------------------------\nlr = LinearRegression(normalize=True)\nlr.fit(data_train, label_train)\npre = lr.predict(data_test)\n\n# 결과 평가 - 직접하는 방법\ndiff = abs(pre - label_test)\nprint('average=', sum(diff)/len(diff)) # 오차 평균\nprint('max=', max(diff)) # 최대값\nprint(lr.coef_, lr.intercept_) #w, b 보기\n\nac_score = lr.score(data_test, label_test) #linear Regression 일때\nprint(\"정답률 =\", ac_score)\n\n# ----------------------------------------------------\n# 출력 - 그래프로비교\n# ----------------------------------------------------\nplt.figure(figsize=(10, 6), dpi=100)\nplt.plot(label_test, c='r')\nplt.plot(pre, c='b')\nplt.savefig('tenki-kion-lr.png')\nplt.show()","sub_path":"자율과제형/3 머신러닝 특강/src10/weather/temp_predict_05.py","file_name":"temp_predict_05.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542700113","text":"\"\"\"\nTCP Server to capture UDF script output.\nUnlike HTTP Transport, we have to use real server with proper bindings.\n\nIncoming connections may be blocked by firewalls!\nIt mostly affects local laptops trying to connect to Exasol located in remote data centre.\n\nThis module can work in two modes:\n\n 1) DEBUG MODE\n Useful for manual debugging during UDF script development.\n Accepts connections from all VM's, but displays output of first connected VM only.\n Runs forever, until stopped by Ctrl + C (SIGTERM).\n\n How to run: python -m pyexasol script_debug\n\n\n 2) SCRIPT MODE\n Useful for production usage and during last stages of development.\n Accepts connections from all VM's and stores output into separate log files.\n Runs for one SQL statement only, stops automatically.\n\n How to run: ExaConnection.execute_with_udf_output()\n\n\nWe use ThreadingMixIn because:\na) Workload is pure I/O, so GIL should not be a problem.\nb) Potential amount of VM's connected in parallel is huge and may surpass 1000+, which may cause problems with forks.\n\n\"\"\"\nimport socket\nimport socketserver\nimport sys\nimport os\nimport shutil\nimport subprocess\nimport pathlib\n\nfrom . import utils\n\n\nclass ExaScriptOutputProcess(object):\n def __init__(self, host, port, output_dir=None, initial_ppid=None):\n self.host = host\n self.port = port\n\n self.output_dir = output_dir\n self.initial_ppid = initial_ppid\n\n self.server = None\n self.output_address = None\n\n self.proc = None\n\n def start(self):\n args = [sys.executable,\n '-m', 'pyexasol', 'script_output',\n '--output-dir', str(self.output_dir),\n '--ppid', str(utils.get_pid())\n ]\n\n if self.host:\n args.append('--host')\n args.append(self.host)\n\n if self.port:\n args.append('--port')\n args.append(str(self.port))\n\n self.proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n self.output_address = self.proc.stdout.readline().decode().rstrip('\\n')\n\n self.proc.stdout.close()\n\n def init_server_script_mode(self):\n output_dir = pathlib.Path(self.output_dir)\n\n if not output_dir.is_dir():\n raise ValueError(f\"Output_dir does not exist or not a directory: {output_dir}\")\n\n self.server = ExaScriptOutputServer((self.host, self.port), ExaScriptOutputScriptModeHandler)\n self.server.output_dir = output_dir\n self.server.initial_ppid = self.initial_ppid\n\n def handle_requests_script_mode(self):\n # Server is stopped by shutdown() call in handler after closing last connection\n self.server.serve_forever()\n self.server.server_close()\n\n def init_server_debug_mode(self):\n self.server = ExaScriptOutputServer((self.host, self.port), ExaScriptOutputDebugModeHandler)\n self.output_address = self.server.get_output_address()\n\n def handle_requests_debug_mode(self):\n # Stop server with SIGTERM (Ctrl + C, etc.)\n try:\n self.server.serve_forever()\n except KeyboardInterrupt:\n pass\n\n self.server.server_close()\n\n def send_output_address(self):\n sys.stdout.buffer.write(f'{self.server.get_output_address()}\\n'.encode())\n sys.stdout.buffer.flush()\n\n def get_output_address(self):\n if self.output_address is None:\n raise RuntimeError(\"Script output address 'host:port' is not available\")\n\n return self.output_address\n\n def join(self):\n code = self.proc.wait()\n\n if code != 0:\n raise RuntimeError(f\"Script output server process finished with exitcode: {code}\")\n\n def terminate(self):\n if self.proc:\n self.proc.terminate()\n\n\nclass ExaScriptOutputServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n connected_clients = 0\n total_clients = 0\n\n # Stop all sub-threads immediately\n daemon_threads = True\n allow_reuse_address = True\n\n output_dir = None\n initial_ppid = None\n\n def get_output_address(self):\n return f\"{socket.gethostbyname(socket.getfqdn())}:{self.socket.getsockname()[1]}\"\n\n def service_actions(self):\n utils.check_orphaned(self.initial_ppid)\n\n\nclass ExaScriptOutputHandler(socketserver.StreamRequestHandler):\n def setup(self):\n super().setup()\n self.server.connected_clients += 1\n self.server.total_clients += 1\n\n def finish(self):\n super().finish()\n self.server.connected_clients -= 1\n\n\nclass ExaScriptOutputDebugModeHandler(ExaScriptOutputHandler):\n def handle(self):\n if self.server.connected_clients == 1:\n print('\\n-------- NEW STATEMENT --------', flush=True)\n\n # Read and flush line-by-line, show log to user as soon as possible\n for line in self.rfile:\n sys.stdout.buffer.write(line)\n sys.stdout.buffer.flush()\n else:\n dst = open(os.devnull, 'wb')\n shutil.copyfileobj(self.rfile, dst)\n dst.close()\n\n\nclass ExaScriptOutputScriptModeHandler(ExaScriptOutputHandler):\n def handle(self):\n path = self.server.output_dir / (str(self.server.total_clients).rjust(5, '0') + '.log')\n dst = open(path, 'wb')\n\n shutil.copyfileobj(self.rfile, dst)\n dst.close()\n\n def finish(self):\n super().finish()\n\n # No more opened connections? -> Shutdown server\n if self.server.connected_clients == 0:\n self.server.shutdown()\n","sub_path":"pyexasol/script_output.py","file_name":"script_output.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341776081","text":"# *_*coding:utf-8 *_*\nimport sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QMainWindow, QMenu, QAction, QApplication, qApp, QLCDNumber, QSlider, QVBoxLayout\n\n\nclass Example(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.InitUI()\n\n def InitUI(self):\n\n self.InitStausBar()\n\n self.InitFileMenu()\n\n self.InitEditMenu()\n\n self.InitViewMenu()\n\n self.InitTool()\n\n # self.InitCDNumber()\n\n self.setGeometry(300,300,300,200)\n self.setWindowTitle(\"Submenu\")\n self.show()\n\n def contextMenuEvent(self, event):\n\n cmenu = QMenu(self)\n\n newAct = cmenu.addAction(\"New\")\n opnAct = cmenu.addAction(\"Open\")\n quitAct = cmenu.addAction(\"Quit\")\n action = cmenu.exec_(self.mapToGlobal(event.pos()))\n\n if action == quitAct:\n qApp.quit()\n\n def InitFileMenu(self):\n menubar = self.menuBar()\n fileMenu = menubar.addMenu(\"File\")\n\n impMenu = QMenu('Import', self)\n impAct = QAction(\"Import mail\", self)\n impMenu.addAction(impAct)\n newAct = QAction(\"New\", self)\n exportMenu = QMenu(\"export\", self)\n exportCVS = QAction(\"export CVS\", self)\n exportSQL = QAction(\"export SQL\", self)\n exportMenu.addAction(exportCVS)\n exportMenu.addAction(exportSQL)\n\n #File Main\n fileMenu.addAction(newAct)\n fileMenu.addMenu(impMenu)\n fileMenu.addMenu(exportMenu)\n\n def InitEditMenu(self):\n menubar = self.menuBar()\n\n fileMenu = menubar.addMenu(\"Edit\")\n formatAct = QAction(\"format\",self)\n\n findMenu = QMenu(\"find\",self)\n findAct = QAction(\"find\",self)\n replaceAct = QAction(\"replace\",self)\n findMenu.addAction(findAct)\n findMenu.addAction(replaceAct)\n\n #Edit Main\n fileMenu.addAction(formatAct)\n fileMenu.addMenu(findMenu)\n\n def InitViewMenu(self):\n menubar = self.menuBar()\n viewMenu = menubar.addMenu('View')\n viewStatAct = QAction('View statusbar', self, checkable=True)\n viewStatAct.setStatusTip('View statusbar Tip')\n viewStatAct.setChecked(True)\n viewStatAct.triggered.connect(self.toggleMenu)\n\n viewMenu.addAction(viewStatAct)\n menubar.addMenu(viewMenu)\n\n def toggleMenu(self, status):\n self.statusbar = self.statusBar()\n if status:\n self.statusbar.showMessage('Ready')\n else:\n self.statusbar.showMessage('Ready GO?')\n\n def InitStausBar(self):\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ready')\n\n\n def InitTool(self):\n exitAct = QAction(QIcon('ee.jpg'), 'Exit', self)\n exitAct.setShortcut(\"Ctrl+Q\")\n exitAct.triggered.connect(qApp.quit)\n\n self.toolbar = self.addToolBar('Exit')\n self.toolbar.addAction(exitAct)\n\n def InitCDNumber(self):\n lcd = QLCDNumber(self)\n sld = QSlider(Qt.Horizontal, self)\n\n vbox = QVBoxLayout()\n vbox.addWidget(lcd)\n vbox.addWidget(sld)\n\n self.setLayout(vbox)\n sld.valueChanged.connect(lcd.display)\n\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\n","sub_path":"gui/pyqt/menu/PreMenuDemo.py","file_name":"PreMenuDemo.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182613498","text":"from __future__ import unicode_literals\nimport json\nfrom general_tools.url_utils import get_url\n\n\nclass Language(object):\n def __init__(self, json_obj=None):\n \"\"\"\n Class constructor. Optionally accepts an object for initialization.\n :param object json_obj: An object to initialize the instance member variables\n \"\"\"\n # deserialize\n if json_obj:\n self.__dict__ = json_obj\n\n else:\n self.ln = ''\n self.gw = False\n self.ang = ''\n self.lr = ''\n self.ld = 'ltr'\n self.lc = ''\n self.alt = []\n self.pk = 0\n self.cc = []\n\n @staticmethod\n def load_languages():\n return_val = []\n\n lang_file = 'http://td.unfoldingword.org/exports/langnames.json'\n langs = json.loads(get_url(lang_file))\n for lang in langs:\n return_val.append(Language(lang))\n\n return return_val\n","sub_path":"door43_tools/language_handler.py","file_name":"language_handler.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82889955","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('estimating', '0004_auto_20150519_2129'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='quickstimate',\n name='envelope',\n field=models.ForeignKey(blank=True, to='production.Envelope', null=True),\n ),\n ]\n","sub_path":"estimating/migrations/0005_auto_20150519_2134.py","file_name":"0005_auto_20150519_2134.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382464663","text":"# -*- coding: utf-8 -*-\n\"\"\"\n第 0015 题: 纯文本文件 city.txt为城市信息, 里面的内容(包括花括号)如下所示:\n\n{\n \"1\" : \"上海\",\n \"2\" : \"北京\",\n \"3\" : \"成都\"\n}\n\"\"\"\n\nimport json, xlwt\n\nfrom collections import OrderedDict\n\nwith open('0015/city.txt', 'r') as f:\n data = json.load(f, object_pairs_hook=OrderedDict)\n # 创建 xls 文件对象\n wb = xlwt.Workbook()\n # 新增一个表单\n sh = wb.add_sheet('city')\n for index, (key, value) in enumerate(data.items()):\n # 按位置添加数据\n sh.write(index, 0, key)\n sh.write(index, 1, value)\n # 保存文件\n wb.save('0015/city.xls')","sub_path":"0015.py","file_name":"0015.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249493565","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2015-2016 Alessio Colucci\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\"\"\"This plugin is used to parse all the galleries based on the Coppermine Photo\nGallery. It follows the guidelines defined in the class Parser in\nparse_manager, defining a parse and a check functions, receiving a url and an\nalbum code and returning the generator of the single images, for the parse\nfunction, and an url to check that it uses Coppermine Photo Gallery, for the\ncheck function.\n\nIt can be used without a Parser instance, but it must be taken into account\nthat it imports functions contained in pygallery.\n\"\"\"\n\nimport re\nimport urllib.error\nimport urllib.parse\n\n# This bs4 can be either the one present in lib, because it is added to path by\n# Parser, or the standard installation\nimport bs4\n\nfrom logger import init_logger\nimport tools\n\n# This RegEx parses the album codes passed to the functions\n# This here is the old regex, for historical purposes\n# ALB_REGEX = r'''(?P\\d+) # Parses the album code\n# ((:(?P\\d+) # This is the second album code, used as stop if provided\n# :(?P\\d*)) # This is used as step, default is 1 if there is stop\n# | # We check between range of albums or particular range of images\n# (\\[(?P\\d*) # The starting point, default 1\n# : # Both semicolons must be present if there is at least 1 given parameter\n# (?P\\d*) # The finishing point, default to the end\n# :\n# (?P\\d*)\\] # The step, default is 1\n# )?) # Neither of them is mandatory'''\nALB_REGEX = r'''(?P\\d+) # Parses the main album code\n((:(?P\\d+) # This is the stop for a range of albums\n(:(?P\\d*) # The step for the range, default is 1\n?) # Step can be omitted, but depends on the presence of stop\n?) # Stop can be omitted, not giving the range of albums\n| # Or we can pass a range of images\n(\\[(?P\\d*) # Start, default to 1\n(:(?P\\d*) # Stop, default to the end\n(:(?P\\d*)) # Step, default 1\n?) # Step can be omitted, but depends on the presence of stop\n?\\]) # Also stop can be omitted\n?) # Both range of albums/images can be totally omitted\n'''\nREGEX = re.compile(ALB_REGEX, re.VERBOSE)\n\n\ndef check_404(page):\n \"\"\"This function checks that a read page is a 404 - Not Found generated by\n the Coppermine Photo Gallery.\n\n Arguments:\n - page (string or buffer or BeautifulSoup): the page to check, if not an\n instance of BeautifulSoup, is\n converted.\n\n Returned object:\n - boolean: True if the page is a 404, False otherwise.\n \"\"\"\n # Conversion\n if not isinstance(page, bs4.BeautifulSoup):\n page = bs4.BeautifulSoup(page)\n # We check the presence of this tag, which contains the error\n tag = page.select_one('div.cpg_message_warning > span.cpg_user_message')\n return tag is not None\n\n\ndef check(url):\n \"\"\"This function is defined as described in Parser in parse_manager.\n It is used to check whether an url can be parsed by this plugin.\n If it catches a connection error, it refuses the url.\n\n Arguments:\n - url (string): the url to check to control the compatibility of the page\n with this plugin.\n\n Returned object:\n - boolean: a boolean value depending on the result of the check.\n It is False if there is a connection error that is caught and\n handled by the function.\n \"\"\"\n # Check for connection errors\n try:\n page = tools.read_url(url)\n except urllib.error.URLError as exc:\n # Log in case of failing and negative returned value\n init_logger().error(('An error occurred while opening the page \"{}\", '\n 'skipping: {!r}').format(url, exc))\n return False\n else:\n # Otherwise we create the soup\n bs = bs4.BeautifulSoup(page, 'html.parser')\n # Check the final comment containing Coppermine Photo Gallery\n str_selector = lambda x: 'Coppermine Photo Gallery' in x\n taglist = bs.find_all(string=str_selector)\n return len(taglist) > 0 and isinstance(taglist[-1], bs4.Comment)\n # The old way, checking presence of tag in the page with a certain text\n # tag = bs.find('div', {'class': 'footer'})\n # return tag.text == 'Powered by Coppermine Photo Gallery'\n\n\ndef parse(url, album):\n \"\"\"This function parses an album code with a base url to return a generator\n of the images present in the range generated by the album code. It follows\n the guidelines given in parse_manager.\n\n Arguments:\n - url (string): the base url of the gallery to be parsed;\n - album (string): the album code containing info on the albums to be\n parsed.\n\n Returned object:\n - generator: a generator of the images, following the guidelines in\n parse_manager, each element is a tuple containing a tuple as\n first element for the relative path of the image and an url\n as second, which is the url of the image itself.\n \"\"\"\n # We obtain our generator for the albums\n albs = parse_album_code(album)\n for album_, start, stop, step in albs:\n # The url of the thumbnails of the album\n curr_url = urllib.parse.urljoin(url,\n 'thumbnails.php?album={}'.format(album_))\n # We try to read and parse the url\n try:\n page = tools.read_url(curr_url)\n except urllib.error.URLError as exc:\n init_logger().error(('An error occurred while opening the page '\n '\"{}\", skipping: {!r}'.format(curr_url, exc)))\n continue\n page = bs4.BeautifulSoup(page, 'html.parser')\n if check_404(page):\n init_logger().error(('404 - Page \"{}\" not found, '\n 'skipping'.format(curr_url)))\n continue\n # We get the title of the album to be included in the path and all the\n # images\n title = page.find('a', {'href': curr_url.split('/')[-1]}).text\n imgs = page.find_all('img', {'class': 'image thumbnail'})\n n_imgs = len(imgs)\n # We set our parameters for the range by checking their values\n if start is None or start == '':\n start = 1\n else:\n start = int(start)\n if stop is None or stop == '':\n stop = len(imgs)\n else:\n stop = int(stop)\n if step is None or step == '':\n step = 1\n else:\n step = int(step)\n init_logger().info(('Processing album \"{}\" (\"{}\") with '\n '{} images'.format(title, album_, n_imgs)))\n init_logger().info(('Processing images from {} to {} '\n 'by {}'.format(start, stop, step)))\n # Check of the validity of the range\n try:\n # We use start - 1 since the indexes start from 0 while our range\n # from 1\n n_imgs = len(imgs[start - 1:stop:step])\n except (TypeError, ValueError) as exc:\n init_logger().warning(('Indexes are invalid, skipping: '\n '{!r}'.format(exc)))\n # else:\n # # We skip directly in case of null list\n # if not n_imgs:\n # init_logger().warning(('Indexes could be invalid, '\n # 'resulting list is empty, skipping'))\n # continue\n for pos, thumb in enumerate(imgs[start - 1:stop:step], 1):\n # The name will be used as filename of the image\n name = thumb['alt']\n init_logger().debug('Processing image \"{}\" '\n '({} of {})'.format(name, pos, n_imgs))\n # We obtain the number referring to the image and we create our url\n parsed_url = urllib.parse.urlparse(thumb.parent['href'])\n pid = parsed_url.query.split('=')[-1]\n pid_url = urllib.parse.urljoin(url, 'displayimage.php?pid={}&'\n 'fullsize=1'.format(pid))\n # We read and check over the page\n try:\n pid_page = tools.read_url(pid_url)\n except urllib.error.URLError as exc:\n init_logger().error('An error occurred while opening the '\n 'page \"{}\", skipping: '\n '{!r}'.format(pid_url, exc))\n continue\n pid_page = bs4.BeautifulSoup(pid_page, 'html.parser')\n if check_404(pid_page):\n init_logger().error(('404 - Page \"{}\" not found, '\n 'skipping'.format(pid_url)))\n continue\n # We obtain the url of the image\n image_url = urllib.parse.urljoin(url,\n pid_page.find('img',\n {'id': 'fullsize_image'})['src'])\n init_logger().info('Correctly processed image \"{}\" (url: \"{}\") '\n '({} of {})'.format(name,\n image_url, pos, n_imgs))\n # We format the path and we yield our tuple\n yield (('{} - {}'.format(album_, tools.escape_path(title)), name),\n image_url)\n\n\ndef parse_album_code(album):\n \"\"\"This function is used to parse the album codes passed to the function\n parse. It returns a range of albums through a generator, where each album\n is represented by a tuple, containing the name, start, stop and step.\n If the album code is invalid, it is skipped.\n\n Arguments:\n - album (string): the album code to be parsed.\n\n Returned object:\n - generator: generator containing the range of albums inserted in the\n album code. Each element is a tuple formed by the name of the\n album, start, stop and step. An invalid album code is\n skipped.\n \"\"\"\n # Match with the regex\n res = REGEX.match(album)\n init_logger().debug('RegEx matched \"{}\" in \"{}\"'.format(res.string, album))\n # We check that is valid - the regex must match all the string\n if (res.end() - res.start()) != len(album):\n init_logger().warning(('Invalid argument because match is different '\n 'from argument itself, skipping'))\n # album2 and album3 are None if we match just an album with an eventual\n # range of images\n elif res.group('album2') is None and res.group('album3') is None:\n init_logger().debug('Correctly parsed album code containing '\n 'album \"{}\" going from \"{}\" to \"{}\" by '\n '\"{}\"'.format(res.group('album'),\n res.group('start'),\n res.group('stop'),\n res.group('step')))\n yield (res.group('album'), res.group('start'), res.group('stop'),\n res.group('step'), )\n # If instead start, stop and step are None, then we must build our range of\n # albums\n elif (res.group('start') is None and res.group('stop') is None and\n res.group('step') is None):\n # Conversion so that they can be used in range\n start = int(res.group('album'))\n stop = int(res.group('album2'))\n # Check for omission of step\n step = int(res.group('album3')) if res.group('album3') else 1\n init_logger().debug('Adding to albums to parse range of albums going '\n 'from \"{}\" to \"{}\" by '\n '\"{}\"'.format(start, stop, step))\n for album in range(start, stop, step):\n yield (str(album), None, None, None, )\n","sub_path":"plugins/cpg.py","file_name":"cpg.py","file_ext":"py","file_size_in_byte":12987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470757624","text":"import random\nfrom typing import Type\n\nfrom .base import LoultObject\nfrom .objects import (ScrollOfQurk,\n Crown, Scolopamine, AlcoholBottle, Microphone, C4, Detonator, SuicideJacket, Flower,\n Costume, WealthDetector, RectalExam, Cigarettes, Lighter,\n MollyChute, CaptainHaddockPipe, Cacapulte, LaxativeBox,\n PandorasBox, EffectsStealer, Transmutator, SantasSack,\n Crouton, PoetryBook, EffectsDemultiplicator, XMagazine,\n Fridge)\nfrom .unused_objects import SimpleInstrument, PolynectarPotion, Cocaine, Revolver, RevolverCartridges, SniperRifle, \\\n SniperBullets, RPG, RPGRocket, Grenade\nfrom .weapons import Quiver\n\n# objects which can be given to users and are not specifically linked to any events\nAVAILABLE_OBJECTS = [Crown, Scolopamine,\n AlcoholBottle, C4,\n Detonator, SuicideJacket, Flower, Quiver,\n RectalExam, Costume, Cigarettes, Lighter, MollyChute,\n CaptainHaddockPipe, ScrollOfQurk, EffectsStealer,\n PandorasBox, LaxativeBox, Cacapulte, Transmutator,\n SantasSack, Crouton, PoetryBook, EffectsDemultiplicator,\n XMagazine, Fridge\n ]\n\n\ndef get_random_object() -> LoultObject:\n obj_class: Type[LoultObject] = random.choice(AVAILABLE_OBJECTS)\n return obj_class()\n","sub_path":"loult_serv/objects/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497960438","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 13 13:21:12 2017\n\n@author: koohyh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef app():\n\n msg = \"\"\"\n Welcome to illumina RNA tools.\n \"\"\"\n print(msg)\n \n#==============================================================================\n# thp1_and_cd4_de_fname = '/Volumes/Data/ASS1_ATF4_Data/DE_genes.txt'\n# df = get_illumina_data(thp1_and_cd4_de_fname)\n# #ass1 = df[df['symbol']=='ASS1']\n# #print(ass1)\n# low_logFC_thp1 = df[df['logFC_thp1'] < -1.5]\n# low_logFC_thp1 = low_logFC_thp1[low_logFC_thp1['adj.P.Val_thp1'] < 0.05]\n# print(low_logFC_thp1.shape)\n# # print(low_logFC_thp1)\n# for id in low_logFC_thp1['symbol']:\n# print(id)\n# get_volucano_plots(df)\n# compare_logFCs(df)\n# \n# \n#==============================================================================\n#==============================================================================\n# print(df[df['logFC_thp1'] < -3])\n#==============================================================================\n \n#=============================================================================\n #sns.factorplot(x='Cell', y='Expression', hue='Condition', data=df)\n \n \n \n#==============================================================================\n# thp1_fname = '/Volumes/Data/ASS1_ATF4_Data/annotatedSAMPTHP1_FULL-SAMPTHP1_LAGD.xls'\n# cd4_fname = '/Volumes/Data/ASS1_ATF4_Data/annotatedSAMPCD4T_FULL-SAMPCD4T_LAGD.xls'\n# thp1_de_df = get_illumina_data(thp1_fname, definition=False)\n# #print(thp1_de_df.shape)\n# print(list(thp1_de_df))\n# get_volucano_plots(thp1_de_df)\n# \n# #print(thp1_de_df[thp1_de_df['Padj'] < 0.01].shape)\n# \n# #cd4_de_df = get_illumina_data(cd4_fname, definition=False)\n# #print(cd4_de_df.shape)\n# \n# \n#==============================================================================\n\ndef get_status_and_its_statistics(initial_df):\n status = np.unique(initial_df['status'])\n n_genes = []\n dif_status = []\n for s in status:\n sub_df = initial_df[initial_df['status']==s]\n num_genes = sub_df.shape[0]\n if not s == 'notDeg':\n n_genes.append(num_genes)\n dif_status.append(s)\n print(' state %s has %d genes' %(s, num_genes))\n df = pd.DataFrame({'n_genes':n_genes, 'status':dif_status})\n sns.set_style(\"whitegrid\")\n# fig = plt.figure(figsize=(8, 8))\n g = sns.factorplot(data=df, x='status', y='n_genes', kind='bar', size=6, color='blue')\n g.set_xticklabels(rotation=90)\n plt.xlabel('gene status')\n plt.ylabel('number of genes')\n sns.set(font_scale=2)\n \n return(status)\n \n#£''\\;\\\\\\\\\\\\\\£££$£\n\n\ndef get_volucano_plots(deg_df, pval_thr=0.05, logFC_thr=2, cell_type='THP', fig_fname=None):\n if not cell_type in ['THP', 'CD4']:\n raise ValueError('Only THP and CD4 are accepted cell_type')\n \n helper_df = deg_df.copy()\n helper_df['tph1_pval_log_based'] = -np.log10(helper_df['adj.P.Val_thp1'])\n helper_df['cd4t_pval_log_based'] = -np.log10(helper_df['adj.P.Val_cd4t'])\n y_min = -np.log10(pval_thr)\n \n fig = plt.figure()\n if cell_type == 'THP':\n ax =helper_df.plot(x='logFC_thp1', y='tph1_pval_log_based', kind='scatter', \n s=50, alpha=0.5, color='blue', figsize=(8,8), fontsize=20)\n plt.title('Expression in THP1 Cells', fontsize=30)\n else:\n ax = helper_df.plot(x='logFC_cd4t', y='cd4t_pval_log_based', kind='scatter', \n s=50, alpha=0.5, color='blue', figsize=(8,8), fontsize=20)\n plt.title('Expression in CD4T Cells', fontsize=30)\n \n plt.hlines(y=y_min, xmax=6, xmin=2, linestyles='dashed', colors='red', linewidth=5)\n plt.hlines(y=y_min, xmax=-2, xmin=-6, linestyles='dashed', colors='red', linewidth=5)\n plt.vlines(x=logFC_thr, ymax=6, ymin=y_min, linestyles='dashed', colors='red', linewidth=5)\n plt.vlines(x=-logFC_thr, ymax=6, ymin=y_min, linestyles='dashed', colors='red', linewidth=5)\n plt.ylim((-1, 6))\n plt.xlim((-6,6))\n plt.xlabel('log2FC', fontsize=30)\n plt.ylabel('-log10(pval)', fontsize=30)\n ax.set_facecolor(\"lightgray\")\n #ax.set_axis_bgcolor(\"lightgray\")\n if fig_fname:\n fig = ax.get_figure()\n fig.savefig(fig_fname)\n plt.close(fig)\n \n \n\ndef compare_logFCs(df, logfc_thr=2, pval_thr=0.05, fig_fname=None):\n \n ax1 = df.plot(x='logFC_thp1', y='logFC_cd4t', kind='scatter', s=5,\n color='black', figsize=(8,8), fontsize=20, alpha=0.5)\n plt.ylim((-8, 8))\n plt.xlim((-8, 8))\n plt.hlines(y=logfc_thr, xmin=-8, xmax=8, linestyles='dashed', colors='red')\n plt.hlines(y=-logfc_thr, xmin=-8, xmax=8, linestyles='dashed', colors='red')\n plt.vlines(x=logfc_thr, ymin=-8, ymax=8, linestyles='dashed', colors='red')\n plt.vlines(x=-logfc_thr, ymin=-8, ymax=8, linestyles='dashed', colors='red')\n \n \n # up genes in THP1\n up_in_thp1_df = df[df['logFC_thp1'] > logfc_thr ]\n sig_up_in_thp1_df = up_in_thp1_df[up_in_thp1_df['adj.P.Val_thp1'] < pval_thr]\n #print(sig_up_in_thp1_df.shape)\n sig_up_in_thp1_df.plot(x='logFC_thp1', y='logFC_cd4t', kind='scatter', s=50,\n color='red', figsize=(8,8), fontsize=20, ax=ax1, alpha=0.5, label='Up in THP')\n #down in THP1\n down_in_thp1_df = df[df['logFC_thp1'] < -logfc_thr ]\n sig_up_in_thp1_df = down_in_thp1_df[down_in_thp1_df['adj.P.Val_thp1'] < pval_thr]\n sig_up_in_thp1_df.plot(x='logFC_thp1', y='logFC_cd4t', kind='scatter', s=50,\n color='yellow', figsize=(8,8), fontsize=20, ax=ax1, alpha=0.5, label='Down in THP')\n \n # up genes in CD4D\n up_in_cd4t_df = df[df['logFC_cd4t'] > logfc_thr ]\n sig_up_in_cd4t_df = up_in_cd4t_df[up_in_cd4t_df['adj.P.Val_cd4t'] < pval_thr]\n sig_up_in_cd4t_df.plot(x='logFC_thp1', y='logFC_cd4t', kind='scatter', s=50,\n color='green', figsize=(8,8), fontsize=20, ax=ax1, alpha=0.5, label='Up in CD4T')\n \n # down in CD4D\n down_in_cd4t_df = df[df['logFC_cd4t'] < -logfc_thr ]\n sig_down_in_cd4t_df = down_in_cd4t_df[down_in_cd4t_df['adj.P.Val_cd4t'] < pval_thr]\n sig_down_in_cd4t_df.plot(x='logFC_thp1', y='logFC_cd4t', kind='scatter', s=50,\n color='navy', figsize=(8,8), fontsize=20, ax=ax1, alpha=0.5, label='Down in CD4T')\n plt.xlabel('log2FC(THP1)', fontsize=30)\n plt.ylabel('log2FC(CD4T)', fontsize=30)\n plt.title('THP1 vs CD4D', fontsize=30)\n \n \n ax1.set_facecolor(\"lightgray\")\n if fig_fname:\n fig = ax1.get_figure()\n fig.savefig(fig_fname)\n plt.close(fig)\n \n \n\n\ndef get_illumina_data(illumina_fname, definition=False):\n df = pd.read_csv(illumina_fname, sep='\\t')\n return(df)\n \n ########\n # ilmn_id\tsymbol\tlogFC_cd4t\tAveExpr_cd4t\tadj.P.Val_cd4t\tlogFC_thp1\tAveExpr_thp1\tadj.P.Val_thp1\tstatus\n #########\n\ndef get_kdm4c_df(fname):\n df = pd.read_excel(fname)\n return(df)\n \nif __name__ == '__main__':\n app()\n \n \n ","sub_path":"pyArginine/illumina_RNA_tools.py","file_name":"illumina_RNA_tools.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"557895133","text":"from os.path import join, exists, abspath\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom my_utils.python_utils.general import make_dir_if_not_exist\nfrom utils.visualization import plot_comp_dist\n\nfrom global_settings import RESULTS_DIR\n\n\ndef plot_info_bar(run_id, save_dir,\n informativeness_metrics_dir,\n num_bins, bin_limits, data_proportion):\n\n z_data_file = join(informativeness_metrics_dir, \"z_data[data={}].npz\".format(data_proportion))\n\n with np.load(z_data_file, \"r\") as f:\n all_z_mean = f['all_z_mean']\n all_z_stddev = f['all_z_stddev']\n\n # Plotting\n # =========================================== #\n save_dir = make_dir_if_not_exist(save_dir)\n plot_comp_dist(join(save_dir, 'z_mean_{}.pdf'), all_z_mean, x_lim=(-5, 5),\n subplot_adjust={'left': 0.1, 'right': 0.98, 'bottom': 0.05, 'top': 0.95})\n plot_comp_dist(join(save_dir, 'z_stddev_{}.pdf'), all_z_stddev, x_lim=(0, 3),\n subplot_adjust={'left': 0.1, 'right': 0.98, 'bottom': 0.05, 'top': 0.95})\n # =========================================== #\n\n\ndef main():\n # run_id = \"1_tc50_multiSave\"\n run_id = \"1_tc50_zdim100\"\n # run_id = \"5_tc50_zdim200\"\n # run_id = \"6_VAE_beta50\"\n # run_id = \"8_VAE\"\n save_dir = abspath(join(RESULTS_DIR, \"celebA\", \"FactorVAE\", \"auxiliary\", \"plot_z_stat\",\n \"FactorVAE_{}\".format(run_id)))\n\n informativeness_metrics_dir = abspath(join(RESULTS_DIR, \"celebA\", \"FactorVAE\",\n \"auxiliary\", \"informativeness_metrics_v3\", \"FactorVAE_{}\".format(run_id)))\n\n num_bins = 100\n bin_limits = \"(-4.0, 4.0)\"\n data_proportion = 1.0\n\n plot_info_bar(run_id, save_dir, informativeness_metrics_dir,\n num_bins, bin_limits, data_proportion)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"working/disentanglement/celebA/FactorVAE/exp_4_paper/plot_z_stat.py","file_name":"plot_z_stat.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469810237","text":"import sys\nimport os\nimport sqlite3\nimport datetime\nimport numpy as np\n\n# does not support adding new key-value pairs in the dictionary, the\n# number of columns must be manually set with \"{}\" in ALTER DATA, before\n# logging the data/running!\n\n# connecting to database:\n\ndbname = \"test\"\ntablename = \"measured_data\"\ncolnamelist = [\"Voltage\", \"Current\", \"CurrentTime\"]\n\n\n# test dict:\ntestdict = {\n \"Voltage\": \"10\",\n \"Current\": \"20\",\n \"Temperature\": \"0\",\n \"Testcol1\": 10,\n \"Testcol2\": 5,\n}\n\n# it was the only way i could implement date and time and still select them\ntimedict = {\"CurrentTime\": datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")}\n# merging the old dict with a new key value pair for time\ntestdict = {**timedict, **testdict}\n\n\ndef connectdb(dbname):\n try:\n global conn\n conn = sqlite3.connect(dbname)\n conn.commit()\n except sqlite3.connect.Error as err:\n print(\"Couldn't establish connection {}\".format(err))\n\n\n# cursor setup:\n\nconnectdb(dbname)\nmycursor = conn.cursor()\n\n# Optional command to delete a table, must be commented out\n# mycursor.execute(\"DROP TABLE measured_data\")\n\n# initializing a table with primary key as first column:\n\n\ndef createtable(tablename, dictname):\n\n mycursor.execute(\n \"CREATE TABLE IF NOT EXISTS {} (id INTEGER PRIMARY KEY,{} INTEGER,{} REAL,{} REAL,{} TEXT,{} REAL,{} REAL)\".format(\n tablename, *list(dictname.keys())\n )\n )\n conn.commit()\n\n\ncreatetable(tablename, testdict)\n\n# inserting in the measured values:\n\n\ndef updatetable(tablename, dictname):\n\n sql = \"INSERT INTO {} ({},{},{},{},{},{}) VALUES ({},{},{},{},{},{})\".format(\n tablename, *list(dictname.keys()), *list(dictname.values())\n )\n mycursor.execute(sql)\n conn.commit()\n\n\nupdatetable(\"measured_data\", testdict)\n\n\ndef printtable(tablename, dictname, date1, date2):\n\n print(\"{},{},{},{},{},{},{})\".format(\"id\", *list(dictname.keys())))\n sql = \"SELECT * from {} WHERE CurrentTime BETWEEN {} AND {}\".format(\n tablename, date1, date2\n )\n mycursor.execute(sql)\n\n data = mycursor.fetchall()\n for row in data:\n print(row)\n\n\nprinttable(tablename, testdict, 20180915155243, 20190915155242)\n\n\ndef exportdatatoarr(tablename, colnamelist):\n\n array = []\n\n sql = \"SELECT {},{},{} from {} \".format(*colnamelist, tablename)\n mycursor.execute(sql)\n data = mycursor.fetchall()\n\n for row in data:\n array.append(list(row))\n print(row)\n\n nparray = np.asarray(array)\n print(\"the numpy array:\")\n print(nparray)\n\n\nexportdatatoarr(tablename, colnamelist)\n","sub_path":"pysqlite_database.py","file_name":"pysqlite_database.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323731070","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Account import Account\n\nParentSubscription = SuperClass(\"Subscription\",\"SalesTransaction\",__file__)\nclass Subscription(ParentSubscription):\n #ContractType\n PREPAID = 0\n CLOSEDPERIOD = 1\n #Frequency\n WEEKLY = 1\n MONTHLY = 2\n ANNUAL = 3\n #Actions\n INVOICE = 0\n EMAILANDINVOICE = 1\n\n frequencies = {1:\"Weekly\",2:\"Monthly\",3:\"Yearly\"}\n\n\n def defaults(self):\n ParentSubscription.defaults(self)\n self.Frequency = self.MONTHLY\n self.StartDate = today()\n self.EndDate = addMonths(today(),1)\n self.ContractType = self.PREPAID\n self.Actions = self.INVOICE\n self.FreqQty = 1\n\n def afterCopy(self):\n ParentSubscription.afterCopy(self)\n self.Invoices.clear()\n\n def checkRows(self):\n #for srow in self.SubscriptionRows:\n # if (not srow.VATCode):\n # return srow.FieldErrorResponse(\"NONBLANKERR\",\"VATCode\")\n return True\n\n def check(self):\n res = ParentSubscription.check(self)\n if not res: return res\n for fieldname in (\"TransDate\",\"CustCode\",\"PayTerm\",\"LastInvDate\",\"Frequency\",\"FreqQty\",\"StartDate\",\"EndDate\"):\n if not self.fields(fieldname).getValue():\n return self.FieldErrorResponse(\"NONBLANKERR\",fieldname)\n if self.Actions == None :\n return self.FieldErrorResponse(\"NONBLANKERR\",\"Actions\")\n res = self.checkRows()\n if (not res): return res\n return True\n\n def pastePriceDeal(self):\n pass\n\n def sumUp(self):\n #Calculate Net\n neto = 0\n for row in self.SubscriptionRows:\n neto += row.RowNet\n self.Net = neto\n #Calculate VATPrice\n from VATCode import VATCode\n monto = 0\n for row in self.SubscriptionRows:\n VAT = VATCode()\n VAT.Code = row.VATCode\n if (VAT.load()):\n monto += row.RowNet * (VAT.Percent/100)\n self.VATPrice = monto\n self.Total = self.Net + self.VATPrice\n\n def genDistribution(self):\n from Distribution import Distribution\n dist = Distribution()\n dist.defaults()\n dist.CustCode = self.CustCode\n dist.CustName = self.CustName\n dist.User = currentUser()\n return dist\n\n def genPartialInvoice(self,InvoiceDate,fromDate,toDate,rate,Comment=\"\"):\n #Se usa el rate para realizar facturas parciales por abonos no completos, ej: abono mensual donde hay que facturar 15 dias\n # TODO: Agregar bloqueo\n # Checkeo de limites de credito\n from Invoice import Invoice,InvoiceItemRow,InvoicePayModeRow\n inv = Invoice()\n\n inv.defaults()\n inv.UpdStockFlag = False\n inv.TransDate = InvoiceDate\n inv.InvoiceDate = InvoiceDate\n inv.CustCode = self.CustCode\n inv.pasteCustCode()\n inv.Office = self.Office\n inv.PayTerm = self.PayTerm\n inv.pastePayTerm()\n inv.SalesMan = self.SalesMan\n inv.User = currentUser()\n inv.PriceDeal = self.PriceDeal\n\n inv.Currency = self.Currency\n inv.AuthorizedBy = \"\"\n inv.OriginNr = self.SerNr\n inv.OriginType = self.Origin[self.name()]\n inv.Comment = \"Desde %s\" %(fromDate.strftime(\"%d-%m-%Y\"))\n inv.Comment += \" hasta %s\" %(toDate.strftime(\"%d-%m-%Y\"))\n if Comment: inv.Comment += \" , %s\" %(Comment)\n total = 0\n #Agrega la fecha facturada y el abono\n invRow = InvoiceItemRow()\n invRow.Name = \"Abono nro: %s\" % self.SerNr\n inv.Items.append(invRow)\n invRow = InvoiceItemRow()\n invRow.Name = Comment\n inv.Items.append(invRow)\n for item in self.SubscriptionRows:\n if item.Qty:\n invRow = InvoiceItemRow()\n invRow.ArtCode = item.ArtCode\n invRow.pasteArtCode(inv)\n invRow.Name = item.Name\n invRow.Qty = item.Qty\n invRow.Discount = item.Discount\n invRow.Price = item.Price * rate\n invRow.VATCode = item.VATCode\n invRow.pastePrice(inv)\n invRow.OriginType = self.Origin[self.name()]\n invRow.OriginSerNr = self.SerNr\n invRow.OriginRowNr = item.rowNr\n invRow.sumUp(inv)\n total += invRow.RowTotal #Para evitar hacer un sumUp\n inv.Items.append(invRow)\n if (self.PayMode):\n ipmr = InvoicePayModeRow()\n ipmr.PayMode = self.PayMode\n ipmr.pastePayMode()\n ipmr.CreditCard = self.CreditCard\n ipmr.Paid = total\n inv.Payments.append(ipmr)\n inv.sumUp()\n return inv\n\n def genInvoice(self,InvoiceDate,fromDate,Comment=\"\"):\n #Se usa el rate para realizar facturas parciales por abonos no completos, ej: abono mensual donde hay que facturar 15 dias\n # TODO: Agregar bloqueo\n # Checkeo de limites de credito\n from Invoice import Invoice,InvoiceItemRow,InvoicePayModeRow\n inv = Invoice()\n\n inv.defaults()\n inv.UpdStockFlag = False\n inv.TransDate = InvoiceDate\n inv.InvoiceDate = InvoiceDate\n inv.CustCode = self.CustCode\n inv.pasteCustCode()\n inv.Office = self.Office\n inv.PayTerm = self.PayTerm\n inv.pastePayTerm()\n inv.SalesMan = self.SalesMan\n inv.User = currentUser()\n inv.PriceDeal = self.PriceDeal\n\n inv.Currency = self.Currency\n inv.AuthorizedBy = \"\"\n inv.OriginNr = self.SerNr\n inv.OriginType = self.Origin[self.name()]\n if self.ContractType == self.PREPAID:\n if self.Frequency == self.WEEKLY:\n inv.Comment = \"%s %s \" %(tr(\"From\"),fromDate.strftime(\"%d-%m-%Y\"))\n days = 7 * self.FreqQty - 1\n toDate = addDays(fromDate, days)\n inv.Comment += \" %s %s \" %(tr(\"To\"),toDate.strftime(\"%d-%m-%Y\"))\n elif self.Frequency == self.MONTHLY:\n inv.Comment = \"%s %s \" %(tr(\"From\"),fromDate.strftime(\"%d-%m-%Y\"))\n toDate = addMonths(fromDate, self.FreqQty)\n toDate = addDays(toDate , -1)\n inv.Comment += \" %s %s \" %(tr(\"To\"),toDate.strftime(\"%d-%m-%Y\"))\n elif self.Frequency == self.ANNUAL:\n inv.Comment = \"%s %s \"%(tr(\"From\"),fromDate.strftime(\"%d-%m-%Y\"))\n toDate = addMonths(fromDate, self.FreqQty)\n toDate = addDays(toDate , -1)\n if self.ContractType == self.CLOSEDPERIOD:\n if self.Frequency == self.WEEKLY:\n days = -7 * self.FreqQty\n frDate = addDays(fromDate , days)\n inv.Comment = \"%s %s \" %(tr(\"From\"),frDate.strftime(\"%d-%m-%Y\"))\n inv.Comment += \" %s %s \" %(tr(\"To\"),addDays(fromDate, -1).strftime(\"%d-%m-%Y\"))\n elif self.Frequency == self.MONTHLY:\n frDate = addMonths(fromDate, -1 * self.FreqQty)\n inv.Comment = \"%s %s \" %(tr(\"From\"),frDate.strftime(\"%d-%m-%Y\"))\n inv.Comment += \" %s %s \" %(tr(\"To\"),addDays(fromDate, -1).strftime(\"%d-%m-%Y\"))\n elif self.Frequency == self.ANNUAL:\n frDate = addMonths(fromDate, -12 * self.FreqQty)\n inv.Comment = \"%s %s \" %(tr(\"From\"),frDate.strftime(\"%d-%m-%Y\"))\n inv.Comment += \" %s %s \" %(tr(\"To\"),addDays(fromDate, -1).strftime(\"%d-%m-%Y\"))\n\n #Agrega la fecha facturada y el abono\n from SalesSettings import SalesSettings\n sset = SalesSettings.bring()\n if (sset.SubscAddInvInfoRows):\n invRow = InvoiceItemRow()\n invRow.Name = \"%s: %s\" % (tr(\"Subscription\"),self.SerNr)\n inv.Items.append(invRow)\n invRow = InvoiceItemRow()\n invRow.Name = inv.Comment\n inv.Items.append(invRow)\n\n if Comment: inv.Comment += \" , %s\" %(Comment)\n\n total = 0\n for item in self.SubscriptionRows:\n if item.Qty:\n invRow = InvoiceItemRow()\n invRow.ArtCode = item.ArtCode\n invRow.pasteArtCode(inv)\n invRow.Name = item.Name\n invRow.Labels = item.Labels\n invRow.Qty = item.Qty\n invRow.Price = item.Price\n invRow.Discount = item.Discount\n invRow.VATCode = item.VATCode\n invRow.pastePrice(inv)\n invRow.OriginType = self.Origin[self.name()]\n invRow.OriginSerNr = self.SerNr\n invRow.OriginRowNr = item.rowNr\n invRow.sumUp(inv)\n total += invRow.RowTotal #Para evitar hacer un sumUp\n inv.Items.append(invRow)\n if (self.PayMode):\n ipmr = InvoicePayModeRow()\n ipmr.PayMode = self.PayMode\n ipmr.pastePayMode()\n ipmr.CreditCard = self.CreditCard\n ipmr.ChequeNr = 1\n ipmr.Paid = total\n inv.Payments.append(ipmr)\n inv.sumUp()\n return inv\n\n def addInvoice(self, fromdate, todate, invoice): \n found = False\n for inv in self.Invoices:\n if inv.InvoiceSerNr == invoice:\n found = True\n break\n if not found:\n invrow = SubscriptionInvoiceRow()\n invrow.FromDate = fromdate\n invrow.ToDate = todate\n invrow.InvoiceSerNr = invoice\n self.Invoices.append(invrow)\n return True\n\n def canRemoveInvoice(self, invoice):\n if (self.Invoices.count() > 0):\n if (self.Invoices[-1].InvoiceSerNr != invoice):\n return ErrorResponse(\"There Is Another Invoice of Subscription Later\")\n return True\n\n def removeInvoice(self, invoice):\n if (self.Invoices.count() > 0):\n if (self.Invoices[-1].InvoiceSerNr == invoice):\n self.Invoices.remove(self.Invoices[-1].rowNr)\n if (self.Invoices.count() > 0):\n self.LastInvDate = self.Invoices[-1].ToDate\n else:\n self.LastInvDate = \"0000-00-00\"\n return True\n \n def getServiceDates (self, serviceDate):\n if self.ContractType == self.PREPAID:\n fromDate = serviceDate \n if self.Frequency == self.WEEKLY:\n days = 7 * self.FreqQty - 1\n toDate = addDays(fromDate, days)\n elif self.Frequency == self.MONTHLY:\n toDate = addMonths(fromDate, self.FreqQty)\n toDate = addDays(toDate , -1)\n elif self.Frequency == self.ANNUAL:\n toDate = addMonths(fromDate, self.FreqQty)\n toDate = addDays(toDate , -1)\n if self.ContractType == self.CLOSEDPERIOD:\n toDate = serviceDate \n if self.Frequency == self.WEEKLY:\n days = -7 * self.FreqQty\n fromDate = addDays(toDate , days)\n elif self.Frequency == self.MONTHLY:\n fromDate = addMonths(toDate, -1 * self.FreqQty)\n elif self.Frequency == self.ANNUAL:\n fromDate = addMonths(toDate, -12 * self.FreqQty)\n return fromDate, toDate\n\nParentSubscriptionRow = SuperClass(\"SubscriptionRow\",\"Record\",__file__)\nclass SubscriptionRow(ParentSubscriptionRow):\n\n def pasteArtCode(self, subscription):\n from VATCode import VATCode\n from Item import Item\n item = Item.bring(self.ArtCode)\n if not item: return False\n self.Name = item.Name\n self.Labels = item.Labels\n from ItemGroup import ItemGroup\n ig = ItemGroup.bring(item.ItemGroup)\n if ig:\n if (ig.Labels):\n if (self.Labels):\n self.Labels = \"%s,%s\" %(self.Labels,ig.Labels)\n else:\n self.Labels = ig.Labels\n from PriceDeal import PriceDeal\n if PriceDeal.getInclVAT(subscription.PriceDeal, subscription.TransDate):\n self.VATPrice = item.getPrice(subscription.TransDate, subscription.PriceDeal, subscription.Currency, self.Qty)\n self.pasteVATPrice()\n else:\n self.Price = item.getPrice(subscription.TransDate,subscription.PriceDeal, subscription.Currency, self.Qty)\n self.pastePrice(subscription)\n self.Price = subscription.roundValue(self.Price)\n self.pastePrice(subscription)\n self.RowNet = self.Qty * item.Price\n self.VATCode = item.VATCode\n vatCode= VATCode.bring(self.VATCode)\n if (vatCode):\n self.VATCode = item.VATCode\n self.RowTotal = self.RowNet * (1+vatCode.Percent/100)\n self.RowTotal = subscription.roundValue(self.RowTotal)\n\n def pasteVATPrice(self):\n from VATCode import VATCode\n vatCode = VATCode.bring(self.VATCode)\n if vatCode:\n self.Price = self.VATPrice / (1+vatCode.Percent/100)\n else:\n self.Price = self.VATPrice\n\n def pastePrice(self, subscription):\n from VATCode import VATCode\n vatCode = VATCode.bring(self.VATCode)\n if vatCode:\n self.VATPrice = self.Price * (1+vatCode.Percent/100)\n else:\n self.VATPrice = self.Price\n\n def pasteQty(self,subscription):\n from Item import Item\n item = Item.bring(self.ArtCode)\n if item:\n from PriceDeal import PriceDeal\n if PriceDeal.getInclVAT(subscription.PriceDeal, subscription.TransDate):\n self.VATPrice = item.getPrice(subscription.TransDate,subscription.PriceDeal, subscription.Currency, self.Qty)\n self.pasteVATPrice()\n else:\n self.Price = item.getPrice(subscription.TransDate,subscription.PriceDeal, subscription.Currency, self.Qty)\n self.pastePrice(subscription)\n if (item.LuxGoodTaxPercent):\n neto = (self.Price * self.Qty) * (1 - self.Discount/100)\n\n def pasteVATCode(self, subscription):\n self.pastePrice(subscription)\n\n def sumUp(self,subscription):\n from VATCode import VATCode\n from Item import Item\n from PriceDeal import PriceDeal\n vatincl = PriceDeal.getInclVAT(subscription.PriceDeal, subscription.TransDate)\n vatCode = VATCode.trybring(self.VATCode)\n vatpercent = 0.0\n if vatCode: vatpercent = vatCode.Percent\n if not vatincl:\n #cuando los precios vienen sin iva incluido\n #se utiliza Price para calcular RowNet y luego con RowNet se calcula RowTotal\n #esto permite evitar errores de redondeo muy importantes: proporcionales a Qty\n self.RowNet = (self.Price * self.Qty) * (1 - self.Discount/100)\n self.RowNet = subscription.roundValue(self.RowNet, \"SubscriptionRows\", \"RowNet\")\n self.RowTotal = self.RowNet * (1+vatpercent/100)\n else:\n #cuando los precios vienen con iva incluido\n #se utiliza VATPrice para calcular RowTotal y luego con RowTotal se calcula RowNet\n #esto permite evitar errores de redondeo muy importantes: proporcionales a Qty\n self.RowTotal = (self.VATPrice * self.Qty) * (1 - self.Discount/100)\n self.RowTotal = subscription.roundValue(self.RowTotal, \"SubscriptionRows\", \"RowTotal\")\n self.RowNet = self.RowTotal / (1+vatpercent/100)\n self.RowNet = subscription.roundValue(self.RowNet, \"SubscriptionRows\", \"RowNet\")\n\nParentSubscriptionInvoiceRow = SuperClass(\"SubscriptionInvoiceRow\",\"Record\",__file__)\nclass SubscriptionInvoiceRow(ParentSubscriptionInvoiceRow):\n pass","sub_path":"standard/records/Subscription.py","file_name":"Subscription.py","file_ext":"py","file_size_in_byte":15703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485284143","text":"#!/usr/bin/env python\n\nimport gtk\n\nclass Statusbar(gtk.Window):\n def __init__(self):\n self.count = 0\n\n gtk.Window.__init__(self)\n self.set_title(\"Statusbar\")\n self.connect(\"destroy\", gtk.main_quit)\n\n vbox = gtk.VBox(False, 5)\n self.add(vbox)\n\n buttonbox = gtk.HButtonBox()\n vbox.pack_start(buttonbox, False, False, 0)\n\n buttonPush = gtk.Button(\"Push Message\")\n buttonPush.connect(\"clicked\", self.on_push_clicked)\n buttonbox.add(buttonPush)\n\n buttonPop = gtk.Button(\"Pop Message\")\n buttonPop.connect(\"clicked\", self.on_pop_clicked)\n buttonbox.add(buttonPop)\n\n buttonRemoveAll = gtk.Button(\"Remove All\")\n buttonRemoveAll.connect(\"clicked\", self.on_remove_all_clicked)\n buttonbox.add(buttonRemoveAll)\n\n self.statusbar = gtk.Statusbar()\n self.context = self.statusbar.get_context_id(\"example\")\n vbox.pack_start(self.statusbar, False, False, 0)\n\n def on_push_clicked(self, button):\n self.count += 1\n self.statusbar.push(self.context, \"Message number %i\" % (self.count))\n\n def on_pop_clicked(self, button):\n self.statusbar.pop(self.context)\n\n def on_remove_all_clicked(self, button):\n self.statusbar.remove_all(self.context)\n\nwindow = Statusbar()\nwindow.show_all()\n\ngtk.main()\n","sub_path":"examples/statusbar.py","file_name":"statusbar.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157085979","text":"def test_all_1d_norm_preserving(self):\n x = random(30)\n x_norm = np.linalg.norm(x)\n n = (x.size * 2)\n func_pairs = [(np.fft.fft, np.fft.ifft), (np.fft.rfft, np.fft.irfft), (np.fft.ihfft, np.fft.hfft)]\n for (forw, back) in func_pairs:\n for n in [x.size, (2 * x.size)]:\n for norm in [None, 'ortho']:\n tmp = forw(x, n=n, norm=norm)\n tmp = back(tmp, n=n, norm=norm)\n assert_array_almost_equal(x_norm, np.linalg.norm(tmp))","sub_path":"Data Set/bug-fixing-5/9f9fa567cfb0536fdae402c50005c22febb163cf--bug.py","file_name":"9f9fa567cfb0536fdae402c50005c22febb163cf--bug.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44882961","text":"import json\nimport logging\nimport os\nfrom collections import defaultdict\nfrom queue import Queue\nfrom socketserver import ThreadingTCPServer, StreamRequestHandler\nfrom threading import Condition, RLock\nfrom uuid import uuid4\n\nfrom jsonschema import validate, ValidationError, SchemaError\nfrom redis import Redis, ConnectionError as RedisConnectionError\n\nfrom app.core.messaging import read_message, serialize_message, Message\nfrom app.core.messaging import QueueAlreadyExists\nfrom app.core.messaging import SchemaValidationFailed, BadOperation\nfrom app.core.messaging import RequiredFieldsMissing, InternalMessagingError\nfrom app.core.messaging import MessagingException, QueueNotFound\nfrom app.core.services import BaseService, BackgroundProcessServiceStart\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FakeRedis(object):\n \"\"\" Fake Redis. Use for testing only. Uses queue.Queue.\"\"\"\n def __init__(self):\n self.queue_map = defaultdict(Queue)\n\n def lpush(self, queue, obj):\n self.queue_map[queue].put(obj)\n\n def brpop(self, queue, timeout=0):\n timeout = timeout if timeout else None\n return queue, self.queue_map[queue].get(timeout=timeout)\n\n\nclass BaseQueue(object):\n def __init__(self, queue_info):\n self.queue_info = queue_info\n\n def enqueue(self, task, headers):\n pass\n\n def dequeue(self, requestor_id):\n pass\n\n def connect(self):\n return True\n\n def disconnect(self):\n return True\n\n def validate_schema(self, msg):\n validate(msg, self.queue_info[\"request_schema\"])\n\n def __repr__(self):\n return (self.__class__.__name__ +\n \"({})\".format(self.queue_info[\"queue_name\"]))\n\n\nclass RedisQueue(BaseQueue):\n REDIS_PORT = 6379\n\n def __init__(self, queue_info, queue_name, redis_config):\n super().__init__(queue_info)\n self.queue_name = queue_name\n self.redis_queue = \"queue-\" + queue_name\n self.redis_config = {\n \"host\": redis_config.get(\"REDIS_HOST\") or \"localhost\",\n \"port\": int(redis_config.get(\"REDIS_PORT\") or self.REDIS_PORT),\n \"db\": int(redis_config.get(\"REDIS_DB\") or 0),\n \"password\": redis_config.get(\"REDIS_PASSWD\")\n }\n self.use_fake = redis_config.get(\"USE_FAKE_REDIS\", None)\n self.redis = None\n\n def enqueue(self, task, headers):\n self.validate_schema(task)\n self.get_connection().lpush(self.redis_queue, json.dumps(task))\n return True\n\n def dequeue(self, requestor_id, timeout=0):\n data = self.get_connection().brpop(self.redis_queue, timeout=timeout)\n if data:\n task = json.loads(data[1])\n return task\n logger.warning(\"Redis dequeue returned nothing: %s\", data)\n return None\n\n def connect(self):\n if self.use_fake:\n self.redis = FakeRedis()\n return True\n\n try:\n self.redis = Redis(**self.redis_config)\n self.redis.info()\n except RedisConnectionError:\n logger.exception(\"Unable to connect to real Redis.\")\n return False\n return True\n\n def get_connection(self):\n if self.redis is None:\n raise RedisConnectionError()\n return self.redis\n\n\nclass StickyQueue(BaseQueue):\n def __init__(self, queue_info, *args, **kwargs):\n super().__init__(queue_info)\n self.sticky_message = None\n self.requestors = set()\n self.requestor_lock = RLock()\n self.condition = Condition(self.requestor_lock)\n\n def enqueue(self, task, headers):\n self.validate_schema(task)\n with self.condition:\n self.sticky_message = task\n self.requestors = set()\n self.condition.notify_all()\n\n def dequeue(self, requestor_id):\n def can_dequeue():\n has_msg = self.sticky_message is not None\n new_requestor = requestor_id not in self.requestors\n return has_msg and new_requestor\n\n with self.condition:\n self.condition.wait_for(can_dequeue)\n self.requestors.add(requestor_id)\n return self.sticky_message\n\n def connect(self):\n return True\n\n\nclass KeyedStickyQueue(BaseQueue):\n def __init__(self, queue_info, *args, **kwargs):\n super().__init__(queue_info)\n self.sticky_map = {}\n self.sticky_map_version = 1\n self.requestor_map = defaultdict(int)\n self.condition = Condition()\n\n def enqueue(self, task, headers):\n try:\n key = headers[\"KEY\"]\n except KeyError:\n raise RequiredFieldsMissing(\"Field 'KEY' is required.\")\n\n self.validate_schema(task)\n with self.condition:\n self.sticky_map[key] = task\n self.sticky_map_version += 1\n self.condition.notify_all()\n\n def dequeue(self, requestor_id):\n def can_dequeue():\n return self.sticky_map_version != self.requestor_map[requestor_id]\n\n with self.condition:\n self.condition.wait_for(can_dequeue)\n self.requestor_map[requestor_id] = self.sticky_map_version\n return self.sticky_map\n\n\nclass MessageHandler(StreamRequestHandler):\n def handle(self):\n sess = str(uuid4())\n while True:\n try:\n msg = read_message(self.rfile)\n msg.headers[\"SESS\"] = sess\n self.reply(self.server.handle_message(msg))\n except MessagingException as e:\n self.reply(serialize_message(e.to_msg()))\n continue\n except IOError:\n break\n\n def reply(self, msg):\n self.wfile.write((msg + \"\\n\").encode())\n self.wfile.flush()\n\n\nclass MessageServer(ThreadingTCPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n def __init__(self, service, port, redis_config, queue_config):\n super().__init__((\"\", port), MessageHandler)\n self.service = service\n self.sent_start_notification = False\n self.queue_map = {}\n self.queue_map_lock = RLock()\n self.listener_map = {}\n self.sticky_messages = {}\n self.clients = {}\n self.redis_config = redis_config\n\n for queue_info in queue_config.get(\"custom_queues\", []):\n queue = self.create_queue(queue_info)\n self.queue_map[queue_info[\"queue_name\"]] = queue\n\n def create_queue(self, queue_info):\n try:\n schema = queue_info[\"request_schema\"]\n if not isinstance(schema, dict):\n raise SchemaValidationFailed(json.dumps(schema))\n validate({}, schema)\n except KeyError:\n raise SchemaValidationFailed(\"'request_schema' required.\")\n except SchemaError:\n raise SchemaValidationFailed(json.dumps(schema))\n except ValidationError:\n pass\n\n queue_types = {\n \"redis\": RedisQueue,\n \"sticky\": StickyQueue,\n \"keyedsticky\": KeyedStickyQueue,\n }\n queue_name = queue_info[\"queue_name\"]\n\n cls = queue_types[queue_info.get(\"queue_type\", \"redis\")]\n queue = cls(queue_info, queue_name, self.redis_config)\n self.queue_map[queue_name] = queue\n logger.info(\"Connecting to %s\", queue)\n return queue\n\n def handle_message(self, msg):\n if msg.operation == \"dequeue\":\n item = self.handle_dequeue(msg)\n return serialize_message(Message(\"inform\", item))\n elif msg.operation == \"enqueue\":\n self.handle_enqueue(msg)\n msg = Message(\"result\")\n msg.headers[\"RES\"] = \"OK\"\n return serialize_message(msg)\n elif msg.operation == \"create\":\n self.handle_create(msg)\n msg = Message(\"result\")\n msg.headers[\"RES\"] = \"OK\"\n return serialize_message(msg)\n else:\n raise BadOperation(msg.operation)\n\n def handle_enqueue(self, msg):\n if msg.task is None:\n raise RequiredFieldsMissing(\"Task is required for enqueue.\")\n try:\n queue_name = msg.headers[\"Q\"]\n except KeyError:\n raise RequiredFieldsMissing(\"Field 'Q' is required for enqueue.\")\n\n try:\n queue = self.queue_map[queue_name]\n queue.enqueue(msg.task, msg.headers)\n except KeyError:\n raise QueueNotFound(queue_name)\n except ValidationError:\n raise SchemaValidationFailed()\n except RedisConnectionError:\n logger.exception(\"Failed to talk to Redis.\")\n raise InternalMessagingError()\n\n def handle_dequeue(self, msg):\n try:\n queue_name = msg.headers[\"Q\"]\n except KeyError:\n raise RequiredFieldsMissing(\"Field 'Q' is required for dequeue.\")\n\n requestor_id = msg.headers[\"SESS\"]\n try:\n queue = self.queue_map[queue_name]\n return queue.dequeue(requestor_id)\n except KeyError:\n raise QueueNotFound(queue_name)\n except RedisConnectionError:\n logger.exception(\"failed to talk to Redis.\")\n raise InternalMessagingError\n\n def handle_create(self, msg):\n if msg.task is None:\n raise RequiredFieldsMissing(\"QueueInfo is required for create.\")\n\n queue_name = os.path.join(\"/\", msg.task[\"queue_name\"].lstrip(\"/\"))\n msg.task[\"queue_name\"] = queue_name\n with self.queue_map_lock:\n if queue_name in self.queue_map:\n raise QueueAlreadyExists(queue_name)\n queue = self.create_queue(msg.task)\n\n if not queue.connect():\n raise InternalMessagingError(\"Cant connect: \" + queue_name)\n\n self.queue_map[msg.task[\"queue_name\"]] = queue\n\n logger.info(\"Connected: %s\", queue)\n\n def run(self):\n for queue in self.queue_map.values():\n if not queue.connect():\n logger.error(\"Unable to connect to: %s\", queue)\n return\n self.serve_forever()\n\n def service_actions(self):\n if not self.sent_start_notification:\n self.service.notify_start()\n self.sent_start_notification = True\n\n def shutdown(self):\n for _, queue in self.queue_map.items():\n queue.disconnect()\n super().shutdown()\n super().server_close()\n\n\nclass MessageService(BackgroundProcessServiceStart, BaseService):\n PORT = 11023\n\n def __init__(self, config):\n self.redis_config = config[\"redis_config\"]\n self.queues = config[\"queues\"]\n super().__init__()\n\n def get_component_name(self):\n return \"messaging\"\n\n def on_service_start(self, *args, **kwargs):\n self.message_server = MessageServer(self, self.PORT, self.redis_config,\n self.queues)\n self.message_server.run()\n\n def on_service_stop(self):\n self.message_server.shutdown()\n","sub_path":"app/services/messaging/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597346743","text":"import numpy as np\n\nclass DeepNet:\n def __init__(self, p):\n self.nLayer = len(p['nNode'])\n\n self.b = []\n self.W = []\n for i in range(1, self.nLayer):\n self.b += [p['bSTD'] * np.random.normal(0, 1, p['nNode'][i])]\n self.W += [p['wSTD'] * np.random.normal(0, 1, (p['nNode'][i], p['nNode'][i - 1]))]\n self.func = p['nonlinFunc']\n self.fprim = p['nonlinPrimFunc']\n\n # Linear part of the feedforward step\n def lin(self, x, W, b):\n return x.dot(W.T) + b # Flipped order to do vector broadcasting\n\n # Dimensions of x are [nTrial, nFeature]\n def predict(self, x):\n hThis = x\n for b, W, func in zip(self.b, self.W, self.func):\n lin = self.lin(hThis, W, b)\n hThis = func(lin)\n return hThis\n\n # Make single gradient descent step, given a bunch of data and associated labels\n def step(self, x, y, eta):\n # Forwards pass\n f = [x] # Function values for different layers\n g = [1] # Derivative function values for different layers\n for b, W, func, fprim in zip(self.b, self.W, self.func, self.fprim):\n lin = self.lin(f[-1], W, b)\n f += [func(lin)]\n g += [fprim(lin)]\n\n # Backwards pass\n err = [1] * (self.nLayer - 2) + [y - f[-1]]\n for iLayer in range(self.nLayer - 2, 0, -1):\n # print(iLayer, err[iLayer].shape, f[iLayer].shape, g[iLayer].shape)\n errg = err[iLayer] * g[iLayer + 1]\n err[iLayer - 1] = errg.dot(self.W[iLayer])\n self.b[iLayer] += eta * np.sum(errg, axis=0)\n self.W[iLayer] += eta * errg.T.dot(f[iLayer])\n\n return np.mean(np.linalg.norm(err[-1], axis=1))","sub_path":"lib/fnn/deepNet.py","file_name":"deepNet.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489726410","text":"# #homework3\r\n\r\n# Global Ai HUB homework3 19/02/2021\r\n\r\n\r\ndef prime_first(number):\r\n for i in range(2,number):\r\n \r\n if (number % i) == 0:\r\n break\r\n else: \r\n print(number)\r\n \r\ndef prime_second(number):\r\n for i in range(2,number):\r\n \r\n if (number % i) == 0:\r\n break\r\n else: \r\n print(number)\r\n \r\n \r\nfor number in range(0,1000):\r\n \tif 0<=number<500:\r\n prime_first(number)\r\n \telse:\r\n prime_second(number)","sub_path":"HOMERWORK/HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441292527","text":"import math\nimport numpy as np\nimport socket\nimport time\n\nfrom pprint import pprint\n\n\nclass Leg:\n def __init__(self, x: float, y: float, z: float, name: str = \"leg\"):\n self.name: str = name\n self.x: float = x\n self.y: float = y\n self.z: float = z\n\n def move(self, x: float, y: float, z: float) -> None:\n self.x: float = x\n self.y: float = y\n self.z: float = z\n return None\n\n def show(self) -> str:\n return \"X:\" + str(self.x) + \" \\tY:\" + str(self.y) + \"\\tZ:\" + str(self.z) + \"\\t\" + str(self.name)\n\n\nclass RobotModel:\n def __init__(self, host: int, port: int = 80, activate: bool = False, mode: int = 1):\n\n # Connecting variables.\n self.host: str = host\n self.port: int = port\n self.mode: int = mode\n # Leg variable setup.\n self.leg_1: Leg = Leg(x=20, y=60, z=40, name=\"leg-1\")\n self.leg_2: Leg = Leg(x=20, y=60, z=40, name=\"leg-2\")\n self.leg_3: Leg = Leg(x=20, y=60, z=40, name=\"leg-3\")\n self.leg_4: Leg = Leg(x=20, y=60, z=40, name=\"leg-4\")\n\n self.showOffMode: bool = False\n self.increment: int = 15\n self.activate: bool = activate\n\n self.previousPayload = []\n self.newPayload = []\n\n def _map(self, x, in_min, in_max, out_min, out_max) -> int:\n return int((x-in_min) * (out_max - out_min) / (in_max - in_min) + out_min)\n\n def _getTrajectory(self, pt1: tuple, pt2: tuple, fine: int) -> list:\n return list(zip(np.linspace(pt1[0], pt2[0], fine+1),\n np.linspace(pt1[1], pt2[1], fine+1)))\n\n def _smooth(self, value: float, smooth: int) -> int:\n return sum([x for x in range(0, smooth)]) / smooth\n\n def sendLoad(self, load: list) -> None:\n if self.activate:\n print(load)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((self.host, self.port))\n s.sendall(load)\n \n return None\n\n def restart(self):\n print(\"Restarting Robot\")\n self.sendLoad(load=bytearray([9]))\n\n def reload(self):\n self.sendLoad(bytearray(\n [\n self.mode,\n int(self.leg_1.x), int(self.leg_2.x), int(\n self.leg_3.x), int(self.leg_4.x),\n int(self.leg_1.y), int(self.leg_2.y), int(\n self.leg_3.y), int(self.leg_4.y),\n int(self.leg_1.z), int(self.leg_2.z), int(\n self.leg_3.z), int(self.leg_4.z)\n ]\n ))\n\n def resolve(self, loops: dict, stepInterval: int = 0.2):\n for key, value in loops.items():\n for key2, value2 in value.items():\n for key3, value3 in value2.items():\n try:\n value3(int(key2))\n except TypeError:\n pass\n time.sleep(stepInterval)\n self.reload()\n\n\n def trotTraverse(self, direction):\n stepInterval: float = 0.3 \n self.increment = 5\n\n loops: dict = {\n \"cycle-2\" : {\n \"1\" : {\n \"fn\" : self.up\n },\n \"4\" : {\n \"fn\" : self.up\n },\n \"3\": {\n \"fn\" : self.shortDownFront\n },\n \"2\" : {\n \"fn\" : self.shortDownBack\n }\n },\n \"cycle-3\": {\n \"1\" : {\n \"fn\": self.shortDownBack\n },\n \"4\" : {\n \"fn\": self.shortDownFront\n }\n },\n \"cycle-4\": {\n \"1\" : {\n \"fn\": self.shortDownFront\n },\n \"4\" : {\n \"fn\" : self.shortDownBack\n },\n \"3\" : {\n \"fn\" : self.up\n },\n \"2\" : {\n \"fn\" : self.up\n }\n }\n }\n\n self.resolve( loops , stepInterval)\n\n def trotRotate2(self, direction):\n stepInterval: float = 0.3\n self.increment = 5\n loops: dict = {\n \"cycle-1\": {\n \"1\": {\n \"fn\": self.down\n },\n \"2\": {\n \"fn\": self.down\n },\n \"3\": {\n \"fn\": self.down\n },\n \"4\": {\n \"fn\": self.down\n }\n },\n \"cycle-2\": {\n \"1\": {\n \"fn\": self.up\n },\n \"4\": {\n \"fn\": self.up\n }\n },\n \"cycle-3\": {\n \"1\" : {\n \"fn\" : self.upFront if direction else self.upBack\n },\n \"4\" : {\n \"fn\" : self.upFront if direction else self.upBack\n }\n },\n \"cycle-4\": {\n \"1\" : {\n \"fn\" : self.downFront if direction else self.downBack\n },\n \"4\" : {\n \"fn\" : self.downFront if direction else self.downBack\n }\n },\n \"cycle-5\": {\n \"3\" : {\n \"fn\" : self.up\n },\n \"2\" : {\n \"fn\" : self.up\n }\n },\n \"cycle-6\" : {\n \"3\" : {\n \"fn\" : self.upFront if direction else self.upBack\n },\n \"2\" : {\n \"fn\" : self.upFront if direction else self.upBack\n }\n },\n \"cycle-7\" : {\n \"3\" : {\n \"fn\" :self.downFront if direction else self.downBack\n },\n \"2\" : {\n \"fn\" : self.downFront if direction else self.downBack\n }\n },\n \"cycle-8\": {\n \"1\" : {\n \"fn\": self.down\n },\n \"2\" : {\n \"fn\" : self.down\n },\n \"3\" : {\n \"fn\" : self.down\n },\n \"4\" : {\n \"fn\" : self.down\n }\n\n }\n }\n\n for key, value in loops.items():\n for key2, value2 in value.items():\n for key3, value3 in value2.items():\n try:\n value3(int(key2))\n except TypeError:\n pass\n time.sleep(stepInterval)\n self.reload()\n\n def trotRotate(self, direction):\n print(\"doing this\")\n stepInterval: float = 0.5\n increment: int = 15\n\n loops: dict = {\n \"cycle-1\": {\n \"1\": {\n \"fn\": self.down,\n \"args\": increment\n },\n \"2\": {\n \"fn\": self.down,\n \"args\": increment\n },\n \"3\": {\n \"fn\": self.down,\n \"args\": increment\n },\n \"4\": {\n \"fn\": self.down,\n \"args\": increment\n }\n },\n \"cycle-2\": {\n \"1\": {\n \"fn\": self.upFront if direction else self.upBack\n },\n \"2\": {\n \"fn\": self.down\n },\n \"3\": {\n \"fn\": self.upFront if direction else self.upBack\n },\n \"4\": {\n \"fn\": self.down\n },\n },\n \"cycle-3\": {\n \"1\": {\n \"fn\": self.downFront if direction else self.downBack\n },\n \"2\": {\n \"fn\": self.down\n },\n \"3\": {\n \"fn\": self.downFront if direction else self.downBack\n },\n \"4\": {\n \"fn\": self.down\n }\n },\n \"cycle-4\": {\n \"1\": {\n \"fn\": self.downFront if direction else self.downBack,\n },\n \"2\": {\n \"fn\": self.upFront if direction else self.upBack,\n },\n \"3\": {\n \"fn\": self.downFront if direction else self.downBack\n },\n \"4\": {\n \"fn\": self.upFront if direction else self.upBack\n }\n },\n \"cycle-5\": {\n \"1\": {\n \"fn\": self.downFront if direction else self.downBack,\n },\n \"2\": {\n \"fn\": self.downFront if direction else self.downBack,\n },\n \"3\": {\n \"fn\": self.downFront if direction else self.downBack\n },\n \"4\": {\n \"fn\": self.downFront if direction else self.downBack\n }\n },\n \"cycle-6\": {\n \"1\": {\n \"fn\": self.down,\n },\n \"2\": {\n \"fn\": self.down,\n },\n \"3\": {\n \"fn\": self.down\n },\n \"4\": {\n \"fn\": self.down\n }\n }\n }\n\n for key, value in loops.items():\n for key2, value2 in value.items():\n for key3, value3 in value2.items():\n try:\n value3(int(key2))\n except TypeError:\n pass\n time.sleep(stepInterval)\n self.reload()\n\n def handleRotate(self,\n theta_1: int, theta_2: int,\n theta_3: int, x: int,\n y: int, z: int):\n\n theta_1 = math.radians(theta_1)\n theta_2 = math.radians(theta_2)\n theta_3 = math.radians(theta_3)\n\n rotationMatrix = np.array([\n y *\n (\n math.cos(theta_1) * math.sin(theta_3) +\n math.cos(theta_3) * math.sin(theta_1) *\n math.sin(theta_2)\n ) +\n z *\n (\n math.sin(theta_1) * math.sin(theta_3) -\n math.cos(theta_1) * math.cos(theta_3) *\n math.sin(theta_2)\n ) +\n x *\n math.cos(theta_2) *\n math.cos(theta_3),\n # First set of equations.\n y *\n (\n math.cos(theta_1) *\n math.cos(theta_3) -\n math.sin(theta_1) *\n math.sin(theta_2) *\n math.sin(theta_3)\n ) +\n z *\n (\n math.cos(theta_3) *\n math.sin(theta_1) +\n math.cos(theta_1) *\n math.sin(theta_2) *\n math.sin(theta_3)\n ) -\n x *\n math.cos(theta_2) *\n math.sin(theta_3),\n # Second set of equations.\n x *\n math.sin(theta_2) +\n z * math.cos(theta_1) * math.cos(theta_2) -\n y * math.cos(theta_2) * math.sin(theta_1),\n ])\n return rotationMatrix\n\n def rotate(self, axis_1, axis_2, axis_3, axis) -> None:\n\n precision: int = 1000\n\n data_points = self._getTrajectory((10, 65), (150, 100), precision)\n rotation_precision: int = 40\n\n point_z = self._map(axis_1, - 1, 1, 0, precision)\n point_x = self._map(axis_2, -1, 1, 0, rotation_precision)\n point_3 = self._map(axis_3, -1, 1, 0, precision)\n\n if axis == \"x\":\n self.leg_1.y = data_points[precision - point_z][0]\n self.leg_1.z = data_points[precision - point_z][1]\n\n self.leg_3.y = data_points[point_3][0]\n self.leg_3.z = data_points[point_3][1]\n\n elif axis == \"y\":\n self.leg_1.y = data_points[point_3][0]\n self.leg_1.z = data_points[point_3][1]\n\n self.leg_3.y = data_points[precision - point_z][0]\n self.leg_3.z = data_points[precision - point_z][1]\n\n self.leg_1.x = point_x\n\n self.leg_2.x = point_x\n self.leg_2.y = data_points[point_3][0]\n self.leg_2.z = data_points[point_3][1]\n\n self.leg_3.x = point_x\n\n self.leg_4.x = point_x\n self.leg_4.y = data_points[precision - point_z][0]\n self.leg_4.z = data_points[precision - point_z][1]\n return None\n\n def translate(self, axis_1, axis_2, axis_3, axis) -> None:\n precision: int = 1000\n rotation_precision: int = 40\n data_points = self._getTrajectory((10, 65), (155, 95), precision)\n point_1 = self._map(axis_3, -1, 1, 0, precision)\n point_2 = self._map(\n self._map(axis_1, -1, 1, 0, precision), 0, precision, 0, rotation_precision)\n\n print(\"Point 1: \" + str(point_1))\n print(\"data_points: \" + str(data_points[point_1]))\n\n if axis == \"x\":\n self.leg_1.x = rotation_precision - point_2\n self.leg_3.x = point_2\n\n elif axis == \"y\":\n self.leg_1.x = point_2\n self.leg_3.x = rotation_precision - point_2\n\n self.leg_1.y = data_points[point_1][0]\n self.leg_1.z = data_points[point_1][1]\n\n self.leg_2.x = point_2\n self.leg_2.y = data_points[point_1][0]\n self.leg_2.z = data_points[point_1][1]\n\n self.leg_3.y = data_points[point_1][0]\n self.leg_3.z = data_points[point_1][1]\n\n self.leg_4.x = rotation_precision - point_2\n self.leg_4.y = data_points[point_1][0]\n self.leg_4.z = data_points[point_1][1]\n return None\n\n def step(self, leg):\n\n self.up(leg)\n self.reload()\n time.sleep(0.2)\n self.shortDownFront(leg)\n self.reload()\n time.sleep(0.2)\n self.shortDownBack(leg)\n self.reload()\n time.sleep(0.2)\n self.up(leg)\n self.reload()\n\n def up(self, leg: int) -> None:\n if leg == 1:\n self.leg_1.move(20, 60, 45)\n elif leg == 2:\n self.leg_2.move(20, 60, 45)\n elif leg == 3:\n self.leg_3.move(20, 60, 45)\n elif leg == 4:\n self.leg_4.move(20, 60, 45)\n\n return None\n\n def down(self, leg: int, increment=0) -> None:\n\n if leg == 1:\n self.leg_1.move(20, 45 + self.increment, 60 + self.increment)\n elif leg == 2:\n self.leg_2.move(20, 45 + self.increment, 60 + self.increment)\n elif leg == 3:\n self.leg_3.move(20, 45 + self.increment, 60 + self.increment)\n elif leg == 4:\n self.leg_4.move(20, 45 + self.increment, 60 + self.increment)\n\n return None\n\n def upFront(self, leg: int, ) -> None:\n turn: int = 40\n if leg == 1:\n self.leg_1.move(turn, 20, 40)\n elif leg == 2:\n self.leg_2.move(turn, 20, 40)\n elif leg == 3:\n self.leg_3.move(turn, 20, 40)\n elif leg == 4:\n self.leg_4.move(turn, 20, 40)\n return None\n\n def downFront(self, leg: int) -> None:\n turn: int = 40\n if leg == 1:\n self.leg_1.move(turn, 20, 60 + self.increment)\n elif leg == 2:\n self.leg_2.move(turn, 20, 60 + self.increment)\n elif leg == 3:\n self.leg_3.move(turn, 20, 60 + self.increment)\n elif leg == 4:\n self.leg_4.move(turn, 20, 60 + self.increment)\n return None\n\n def upBack(self, leg):\n turn: int = 0\n if leg == 1:\n self.leg_1.move(turn, 20, 40)\n elif leg == 2:\n self.leg_2.move(turn, 20, 40)\n elif leg == 3:\n self.leg_3.move(turn, 20, 40)\n elif leg == 4:\n self.leg_4.move(turn, 20, 40)\n return None\n\n def downBack(self, leg):\n turn: int = 0\n if leg == 1:\n self.leg_1.move(turn,\n 20, 60 + self.increment)\n elif leg == 2:\n self.leg_2.move(turn,\n 20, 60 + self.increment)\n elif leg == 3:\n self.leg_3.move(turn,\n 20, 60 + self.increment)\n elif leg == 4:\n self.leg_4.move(turn,\n 20, 60 + self.increment)\n return None\n\n def shortDownFront(self, leg):\n turn = 30\n if leg == 1:\n self.leg_1.move(turn,\n 45 + self.increment, 60 + self.increment)\n elif leg == 2:\n self.leg_2.move(turn,\n 45 + self.increment, 60 + self.increment)\n elif leg == 3:\n self.leg_3.move(turn,\n 45 + self.increment, 60 + self.increment)\n elif leg == 4:\n self.leg_4.move(turn,\n 45 + self.increment, 60 + self.increment)\n\n def shortDownBack(self, leg):\n turn = 10\n if leg == 1:\n self.leg_1.move(turn,\n 45 + self.increment, 60 + self.increment)\n elif leg == 2:\n self.leg_2.move(turn,\n 45 + self.increment, 60 + self.increment)\n elif leg == 3:\n self.leg_3.move(turn,\n 45 + self.increment, 60 + self.increment)\n elif leg == 4:\n self.leg_4.move(turn,\n 45 + self.increment, 60 + self.increment)\n\n def smoother(self, y, box_pts):\n box = np.ones(box_pts)/box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth\n\n\nif __name__ == \"__main__\":\n robo: RobotModel = RobotModel(\"192.168.0.248\", 80)\n robo.up(1)\n print(robo.leg_1.show())\n robo.down(1, 30)\n print(robo.leg_1.show())\n robo.downBack(1)\n print(robo.leg_1.show())\n\n robo.translate(0, 200, 300)\n print(robo.leg_1.show())\n print(robo.leg_2.show())\n print(robo.leg_3.show())\n print(robo.leg_4.show())\n","sub_path":"control_software/robot_model.py","file_name":"robot_model.py","file_ext":"py","file_size_in_byte":18033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"174654648","text":"'''\nParse input and run appropriate code.\nDon't use this file for the actual work; only minimal code should be here.\nWe just parse input and call methods from other modules.\n'''\n\n#do NOT import ways. This should be done from other files\n#simply import your modules and call the appropriate functions\n\nfrom algorithms import base_with_information, better_waze_with_information\n\n\ndef base(source, target):\n 'call function to find path using uniform cost, and return list of indices'\n\n # Using base Uniform Cost Search algorithm:\n # Returns lowest cost (distance) path between source and target.\n # If not found returns None\n return base_with_information(source, target)[0]\n\n \ndef betterWaze(source, target,abstractMap=None):\n 'call function to find path using better ways algorithm, and return list of indices'\n if not abstractMap:\n raise NotImplementedError # You should load the map you were asked to pickle\n # Note: pickle might give you an error for the namedtuples, even if they\n # are imported indirectly from ways.graph. You might need to declare, for\n # example: Link = ways.graph.Link\n\n # Using our betterWaze algorithm:\n # Return lowest cost (distance) path between source and target.\n # If not found returns None\n return better_waze_with_information(source, target, abstractMap)[0]\n \n\ndef dispatch(argv):\n from sys import argv\n source, target = int(argv[2]), int(argv[3])\n if argv[1] == 'base':\n path = base(source, target)\n elif argv[1] == 'bw':\n abstractMap = None\n if len(argv)>4:\n import pickle as pkl\n abstractMap = pkl.load(open(argv[4],'rb'))\n path = betterWaze(source, target,abstractMap)\n print(' '.join(str(j) for j in path))\n\n\nif __name__ == '__main__':\n from sys import argv\n dispatch(argv)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38211420","text":"# -*- coding: UTF-8 -*-\n\nimport socket #导入包\n\ns=socket.socket() #创建socket\nhost=socket.gethostname() #获取主机名\nport=12345 #设置端口\ns.bind((\"localhost\",port)) #绑定端口\n\ns.listen(5) #等待客户端连接\n\nwhile True:\n c,addr=s.accept() #建立客户端连接\n print (\"连接地址:\",addr)\n c.send('欢迎访问,问问问')\n c.close()","sub_path":"learn_python_socket/test1_server.py","file_name":"test1_server.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10779012","text":"# coding=utf-8\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \nfrom matplotlib import pylab\n\nimport dataset\nimport datadraw\n\nclass DataDraw():\n ax=''\n colors = {\n 'aliceblue': '#F0F8FF',\n 'antiquewhite': '#FAEBD7',\n 'aqua': '#00FFFF',\n 'aquamarine': '#7FFFD4',\n 'azure': '#F0FFFF',\n 'beige': '#F5F5DC',\n 'bisque': '#FFE4C4',\n 'black': '#000000',\n 'blanchedalmond': '#FFEBCD',\n 'blue': '#0000FF',\n 'blueviolet': '#8A2BE2',\n 'brown': '#A52A2A',\n 'burlywood': '#DEB887',\n 'cadetblue': '#5F9EA0',\n 'chartreuse': '#7FFF00',\n 'chocolate': '#D2691E',\n 'coral': '#FF7F50',\n 'cornflowerblue': '#6495ED',\n 'cornsilk': '#FFF8DC',\n 'crimson': '#DC143C',\n 'cyan': '#00FFFF',\n 'darkblue': '#00008B',\n 'darkcyan': '#008B8B',\n 'darkgoldenrod': '#B8860B',\n 'darkgray': '#A9A9A9',\n 'darkgreen': '#006400',\n 'darkkhaki': '#BDB76B',\n 'darkmagenta': '#8B008B',\n 'darkolivegreen': '#556B2F',\n 'darkorange': '#FF8C00',\n 'darkorchid': '#9932CC',\n 'darkred': '#8B0000',\n 'darksalmon': '#E9967A',\n 'darkseagreen': '#8FBC8F',\n 'darkslateblue': '#483D8B',\n 'darkslategray': '#2F4F4F',\n 'darkturquoise': '#00CED1',\n 'darkviolet': '#9400D3',\n 'deeppink': '#FF1493',\n 'deepskyblue': '#00BFFF',\n 'dimgray': '#696969',\n 'dodgerblue': '#1E90FF',\n 'firebrick': '#B22222',\n 'floralwhite': '#FFFAF0',\n 'forestgreen': '#228B22',\n 'fuchsia': '#FF00FF',\n 'gainsboro': '#DCDCDC',\n 'ghostwhite': '#F8F8FF',\n 'gold': '#FFD700',\n 'goldenrod': '#DAA520',\n 'gray': '#808080',\n 'green': '#008000',\n 'greenyellow': '#ADFF2F',\n 'honeydew': '#F0FFF0',\n 'hotpink': '#FF69B4',\n 'indianred': '#CD5C5C',\n 'indigo': '#4B0082',\n 'ivory': '#FFFFF0',\n 'khaki': '#F0E68C',\n 'lavender': '#E6E6FA',\n 'lavenderblush': '#FFF0F5',\n 'lawngreen': '#7CFC00',\n 'lemonchiffon': '#FFFACD',\n 'lightblue': '#ADD8E6',\n 'lightcoral': '#F08080',\n 'lightcyan': '#E0FFFF',\n 'lightgoldenrodyellow': '#FAFAD2',\n 'lightgreen': '#90EE90',\n 'lightgray': '#D3D3D3',\n 'lightpink': '#FFB6C1',\n 'lightsalmon': '#FFA07A',\n 'lightseagreen': '#20B2AA',\n 'lightskyblue': '#87CEFA',\n 'lightslategray': '#778899',\n 'lightsteelblue': '#B0C4DE',\n 'lightyellow': '#FFFFE0',\n 'lime': '#00FF00',\n 'limegreen': '#32CD32',\n 'linen': '#FAF0E6',\n 'magenta': '#FF00FF',\n 'maroon': '#800000',\n 'mediumaquamarine': '#66CDAA',\n 'mediumblue': '#0000CD',\n 'mediumorchid': '#BA55D3',\n 'mediumpurple': '#9370DB',\n 'mediumseagreen': '#3CB371',\n 'mediumslateblue': '#7B68EE',\n 'mediumspringgreen': '#00FA9A',\n 'mediumturquoise': '#48D1CC',\n 'mediumvioletred': '#C71585',\n 'midnightblue': '#191970',\n 'mintcream': '#F5FFFA',\n 'mistyrose': '#FFE4E1',\n 'moccasin': '#FFE4B5',\n 'navajowhite': '#FFDEAD',\n 'navy': '#000080',\n 'oldlace': '#FDF5E6',\n 'olive': '#808000',\n 'olivedrab': '#6B8E23',\n 'orange': '#FFA500',\n 'orangered': '#FF4500',\n 'orchid': '#DA70D6',\n 'palegoldenrod': '#EEE8AA',\n 'palegreen': '#98FB98',\n 'paleturquoise': '#AFEEEE',\n 'palevioletred': '#DB7093',\n 'papayawhip': '#FFEFD5',\n 'peachpuff': '#FFDAB9',\n 'peru': '#CD853F',\n 'pink': '#FFC0CB',\n 'plum': '#DDA0DD',\n 'powderblue': '#B0E0E6',\n 'purple': '#800080',\n 'red': '#FF0000',\n 'rosybrown': '#BC8F8F',\n 'royalblue': '#4169E1',\n 'saddlebrown': '#8B4513',\n 'salmon': '#FA8072',\n 'sandybrown': '#FAA460',\n 'seagreen': '#2E8B57',\n 'seashell': '#FFF5EE',\n 'sienna': '#A0522D',\n 'silver': '#C0C0C0',\n 'skyblue': '#87CEEB',\n 'slateblue': '#6A5ACD',\n 'slategray': '#708090',\n 'snow': '#FFFAFA',\n 'springgreen': '#00FF7F',\n 'steelblue': '#4682B4',\n 'tan': '#D2B48C',\n 'teal': '#008080',\n 'thistle': '#D8BFD8',\n 'tomato': '#FF6347',\n 'turquoise': '#40E0D0',\n 'violet': '#EE82EE',\n 'wheat': '#F5DEB3',\n 'white': '#FFFFFF',\n 'whitesmoke': '#F5F5F5',\n 'yellow': '#FFFF00',\n 'yellowgreen': '#9ACD32'\n }\n\n def __init__(self,typex='3d'):\n if typex==\"3d\":\n fig = plt.figure() \n self.ax = fig.add_subplot(111, projection='3d') \n\n def getColorsValue(self):\n colors=self.colors\n keys=colors.keys()\n colors_tmp=[colors[key] for key in keys]\n return colors_tmp\n\n def drawline(self,data,c='r'):\n x=data[0]\n y=data[1]\n plt.plot(x,y,c=c)\n\n def drawgoal(self,data,c='r'):\n plt.scatter(data[0],data[1],c=c)\n\n def drawbatchgoal(self,data,c='r'):\n plt.scatter(data[:,0],data[:,1],c=c)\n\n def draw(self,data,fname='./data/a.png',save=False):\n x=data[0]\n y=data[1]\n plt.plot(x,y)\n if save:\n plt.savefig(fname)\n plt.clf()\n plt.close()\n else:\n plt.show()\n\n def draw3dline(self,data,ax='',c='r'):\n if ax=='':\n ax=self.ax\n X=data[0]\n Y=data[1]\n Z=data[2]\n ax.plot(X,Y,Z,c=c)\n\n def draw3dgoal(self,data,ax='',c='r'):\n if ax=='':\n ax=self.ax\n ax.scatter(data[0],data[1],data[2],c=c)\n\ndef draw3d():\n ds=dataset.DataSet()\n ds.getTrainData()\n mouses=ds.train[\"mouses\"]\n goals=ds.train[\"goals\"]\n dw=datadraw.DataDraw()\n\n START=2700\n PAIRS=2\n colors=['b','r','g','y','c','k','m']\n for i in range(PAIRS):\n dw.draw3dline(mouses[i])\n dw.draw3dline(mouses[START+i])\n dw.draw3dgoal([goals[i][0],goals[i][1],i],c=colors[i%7])\n dw.draw3dgoal([goals[START+i][0],goals[START+i][1],START+i],c=colors[(i+3)%7])\n plt.show()\n\ndef draw2d():\n ds=dataset.DataSet()\n ds.getTrainData()\n mouses=ds.train[\"mouses\"]\n goals=ds.train[\"goals\"]\n dw=datadraw.DataDraw(\"2d\")\n\n START=2700\n PAIRS=2\n colors=['b','r','g','y','c','k','m']\n for i in range(PAIRS):\n dw.drawline(mouses[i])\n dw.drawline(mouses[START+i])\n # dw.drawgoal([goals[i][0],goals[i][1],i],c=colors[i%7])\n # dw.drawgoal([goals[START+i][0],goals[START+i][1]],c=colors[(i+3)%7])\n plt.show()\n\ndef drawScatter():\n ds=dataset.DataSet()\n ds.getTrainData()\n mouses=ds.train[\"mouses\"]\n goals=ds.train[\"goals\"]\n dw=datadraw.DataDraw(\"2d\")\n mouses_start=ds.getPosOfMouse(1)\n dw.drawbatchgoal(mouses_start[:2600],'y')\n dw.drawbatchgoal(mouses_start[2600:],'b')\n\n dw.drawbatchgoal(goals[:2600],'y')\n dw.drawbatchgoal(goals[2600:],'b')\n \n plt.show()\n\ndef plot_confusion_matrix(cm, genre_list, name, title,max,save=False):\n pylab.clf()\n pylab.matshow(cm, fignum=False, cmap='Greens', vmin=0, vmax=max)\n ax = pylab.axes()\n ax.set_xticks(range(len(genre_list)))\n ax.set_xticklabels(genre_list)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.set_yticks(range(len(genre_list)))\n ax.set_yticklabels(genre_list)\n cm=cm.T\n for i in range(len(cm)):\n # t=len(cm[0])-i\n for j in range(len(cm[0])):\n if cm[i,j]<1e-2:\n continue\n pylab.text(i, j, '%.2f'%cm[i,j])\n\n pylab.title(title)\n pylab.colorbar()\n pylab.grid(True)\n pylab.show()\n pylab.xlabel('Predicted class')\n pylab.ylabel('True class')\n pylab.grid(True)\n if save==True:\n pylab.savefig(os.path.join(CHART_DIR, \"confusion_matrix_%s.png\"%name), bbox_inches=\"tight\")\n\n\nif __name__==\"__main__\":\n draw3d()\n # draw2d()\n # drawScatter()","sub_path":"ml/20170531mr/datadraw.py","file_name":"datadraw.py","file_ext":"py","file_size_in_byte":9619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611926529","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 Christoph Heuel \n# Distributed under terms of the MIT license.\n#\n\"\"\"\nUsage:\n hm-prowee -u -s -p list\n hm-prowee -u -s -p print-config \n hm-prowee -u -s -p print-temp \n hm-prowee -u -s -p set-temp \n\"\"\"\nimport xmlrpc.client\nimport ssl\nimport json\nfrom docopt import docopt\nimport getpass\n\nxmlc = None\n\nMAX_POINTS = 13\nMAX_ENDTIME = 1440\n\n# Homegear constants\nHG_FILTER_BY_TYPE_ID = 3\nHG_HEATERS_TYPE_ID = \"0x95\"\n\ndef pp(jsontext):\n \"\"\"Pretty print json text\"\"\"\n print(json.dumps(jsontext, sort_keys=True, indent=4, separators=(',', ': ')))\n\ndef list_heaters():\n \"\"\"List heater devices from server\"\"\"\n try:\n heaters = xmlc.getPeerId(HG_FILTER_BY_TYPE_ID, HG_HEATERS_TYPE_ID)\n except:\n print(\"Can't load list of devices!\")\n exit(1)\n print(\"{0:4} {1}\".format(\"ID\", \"Name\"));\n for i in heaters:\n print(\"{0:4} {1}\".format(i, xmlc.getName(i)))\n\ndef print_paramsets(id):\n \"\"\"Print parameterset for specific device id\n\n :param id: device ID to receive Parameterset for\"\"\"\n pp(xmlc.getParamset(int(id), 0, \"MASTER\"))\n \ndef print_temp_config(id):\n \"\"\"Print temp config file for specific device id\n\n :param id: device ID to receive temp config for\"\"\"\n params = xmlc.getParamset(int(id), 0, \"MASTER\")\n weekdays = [\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\"]\n\n for weekday in weekdays:\n elements = []\n \n for i in range(1, MAX_POINTS+1):\n temperature_key = \"TEMPERATURE_{0}_{1}\".format(weekday.upper(), i)\n endtime_key = \"ENDTIME_{0}_{1}\".format(weekday.upper(), i)\n \n temperature_value = params[temperature_key]\n endtime_value = params[endtime_key]\n \n elements.append(\"{0:.1f} > {1};\".format(temperature_value, calculate_timedef_from_minutes(endtime_value)))\n \n if endtime_value == MAX_ENDTIME:\n break\n \n print(\"{0} = {1}\".format(weekday.upper(), \" \".join(elements))) \n\ndef calculate_minutes_from_midnight(timedef):\n \"\"\"Calculate from time the minites from midnight\n\n :param timedef: Time to calculate minutes from midnight. Eg. '07:00'\n :returns: Minutes from midnight\"\"\"\n l = timedef.split(\":\")\n if not len(l) == 2:\n raise TypeError(\"{0} is not in format HH:MM!\".format(timedef))\n hours = int(l[0])\n minutes = hours*60 + int(l[1])\n if minutes > MAX_ENDTIME:\n minutes = MAX_ENDTIME\n return minutes\n \ndef calculate_timedef_from_minutes(minutes):\n \"\"\"Calculate time from minutes from midnight\n\n :param timedef: Minutes from midnight for calculation\"\"\"\n return \"{0:02}:{1:02}\".format(int(minutes / 60), minutes % 60)\n\ndef parse_temperature_item(item):\n \"\"\"Parse item for time and temperature\n\n :param item: Definition, eg. '17.0 > 07:00'\n :returns: dict with temperature and minutes\"\"\"\n temp_time_tupel = item.split(\">\")\n temperature = float(temp_time_tupel[0].strip())\n minutes_from_midnight = calculate_minutes_from_midnight(temp_time_tupel[1].strip())\n return { 'minutes_from_midnight' : minutes_from_midnight, 'temperature' : temperature}\n\ndef parse_temperature_definition(temp_def_raw):\n \"\"\"Parse list of temperature definitions\n\n :param temp_def_raw: List separated by ';' of temperature/time definitions\n :returns: list of hashes with temperature/time definitions\"\"\"\n temp_def_list = filter(None, temp_def_raw.split(\";\"))\n l = []\n for i in temp_def_list:\n l.append(parse_temperature_item(i))\n return l\n\ndef read_from_file(filename):\n \"\"\"Read config file\n\n :param filename: Filename to read from\n :returns: Parsed definition of each line\"\"\"\n lines = []\n with open(filename, \"r\") as config:\n lines = config.read().splitlines()\n\n deflist = {}\n for i in lines:\n l = i.split(\"=\")\n weekday = l[0].strip()\n temp_def = parse_temperature_definition(l[1])\n deflist[weekday] = temp_def\n\n return deflist\n\ndef set_temp_to_homegear(id, definition_list):\n \"\"\"Send list of definitions to ID\n\n :param id: ID to receive definition\n :param definition_list: List of temperature/time definitions\"\"\"\n send_dict = {}\n last_temperature = 17.0\n for weekday, templist in definition_list.items():\n for i in range(1, MAX_POINTS+1):\n temperature_key = \"TEMPERATURE_{0}_{1}\".format(weekday.upper(), i)\n endtime_key = \"ENDTIME_{0}_{1}\".format(weekday.upper(), i)\n if i > len(templist):\n temperature_value = last_temperature\n endtime_value = MAX_ENDTIME\n else:\n temperature_value = templist[i-1][\"temperature\"]\n endtime_value = templist[i-1][\"minutes_from_midnight\"]\n last_temperature = temperature_value\n\n send_dict[temperature_key] = float(temperature_value)\n send_dict[endtime_key] = endtime_value\n if endtime_value == MAX_ENDTIME:\n break\n print(send_dict)\n xmlc.putParamset(int(id), 0, \"MASTER\", send_dict)\n\ndef set_temp_config(id, template_file):\n \"\"\"Read file and send to server\n\n :param id: ID to receive values\n :param template_file: File to read from\"\"\"\n config_from_file = read_from_file(template_file)\n set_temp_to_homegear(id, config_from_file)\n\nif __name__ == \"__main__\":\n arguments = docopt(__doc__)\n ctx = ssl._create_unverified_context()\n\n passwd = getpass.getpass()\n\n xmlc = xmlrpc.client.ServerProxy(\n \"https://{0}:{1}@{2}:{3}/\".format(\n arguments[''], passwd, arguments[''], arguments['']\n ),\n context=ctx)\n\n try:\n version = xmlc.getVersion()\n print(\"Successfully connected to\", version)\n except:\n print (\"Connection not successful, please check your parameters.\")\n exit(1)\n\n if arguments['list']:\n list_heaters()\n elif arguments['print-config']:\n print_paramsets(arguments[\"\"])\n elif arguments['print-temp']:\n print_temp_config(arguments[\"\"])\n elif arguments['set-temp']:\n set_temp_config(arguments[\"\"], arguments[\"\"])\n\n","sub_path":"hm-prowee.py","file_name":"hm-prowee.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381859718","text":"# coding: utf-8\nimport logging\n\nfrom core.config import config\nfrom core.utils import exception_handler, api_exception_handler\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DockerContainer:\n def __init__(self, origin):\n \"\"\"\n\n :type origin: Container\n \"\"\"\n self.origin = origin\n\n @property\n @exception_handler(return_on_exc=\"\")\n def id(self):\n return self.origin.id\n\n @property\n def short_id(self):\n return self.origin.short_id\n\n @property\n @exception_handler(return_on_exc=\"\")\n def name(self):\n return self.origin.name\n\n @property\n @exception_handler(return_on_exc=\"\")\n def status(self):\n return self.origin.status\n\n @property\n def ip(self):\n if config.BIND_LOCALHOST_PORTS:\n return config.PUBLIC_IP\n else:\n networks = self.origin.attrs[\"NetworkSettings\"][\"Networks\"]\n return networks.get(\"vmmaster\", {}).get(\"IPAddress\", \"\")\n\n @property\n def ports(self):\n _ports = {}\n try:\n for original_port, bind_port in self.origin.attrs[\"NetworkSettings\"][\"Ports\"].items():\n original_port = str(original_port.replace(\"/tcp\", \"\"))\n _ports[original_port] = str(bind_port[0][\"HostPort\"])\n except:\n log.debug(\"Network settings isn't available\")\n return _ports\n\n @exception_handler()\n def exec_run(self, cmd, *args, **kwargs):\n return self.origin.exec_run(cmd=cmd, detach=True, *args, **kwargs)\n\n @exception_handler()\n def export(self):\n raise NotImplementedError\n\n @exception_handler()\n def get_archive(self):\n raise NotImplementedError\n\n @exception_handler()\n def kill(self, signal=None):\n return self.origin.kill(signal=signal)\n\n @exception_handler()\n def logs(self, **kwargs):\n return self.origin.logs(**kwargs)\n\n @api_exception_handler()\n def remove(self, **kwargs):\n kwargs[\"force\"] = True\n return self.origin.remove(**kwargs)\n\n @exception_handler()\n def rename(self):\n raise NotImplementedError\n\n @exception_handler()\n def restart(self, **kwargs):\n return self.origin.restart(**kwargs)\n\n @exception_handler()\n def stop(self, **kwargs):\n return self.origin.stop(**kwargs)\n\n @exception_handler()\n def pause(self):\n raise NotImplementedError\n\n @exception_handler()\n def unpause(self):\n raise NotImplementedError\n\n\nclass DockerManageClient:\n def __init__(self):\n from docker import DockerClient\n self.client = DockerClient(\n base_url=config.DOCKER_BASE_URL,\n timeout=config.DOCKER_TIMEOUT,\n num_pools=config.DOCKER_NUM_POOLS\n )\n\n @api_exception_handler()\n def containers(self, all=None, before=None, filters=None, limit=-1, since=None):\n return [\n DockerContainer(container) for container in self.client.containers.list(\n all=all, before=before, filters=filters, limit=limit, since=since\n )\n ]\n\n @exception_handler()\n def get_container(self, container_id):\n return DockerContainer(\n self.client.containers.get(container_id)\n )\n\n @exception_handler()\n def create_container(self, image, command=None):\n return self.get_container(\n self.client.containers.create(image=image, command=command)\n )\n\n def run_container(self, image, ports, name=None, env_vars=None, *args, **kwargs):\n \"\"\"\n\n :type image: str\n :type ports: list\n :type name: str\n :type env_vars: dict\n :rtype: DockerContainer\n \"\"\"\n if config.BIND_LOCALHOST_PORTS:\n kwargs[\"ports\"] = {\"%s/tcp\" % port: None for port in ports}\n if name:\n kwargs[\"name\"] = name\n if env_vars:\n kwargs[\"environment\"] = env_vars\n\n kwargs.update({\n \"dns\": config.DNS_LIST,\n \"dns_search\": config.DNS_SEARCH_LIST,\n \"image\": image,\n \"privileged\": True,\n \"mem_limit\": config.DOCKER_CONTAINER_MEMORY_LIMIT,\n \"cpu_period\": config.DOCKER_CONTAINER_CPU_PERIOD,\n \"cpu_quota\": config.DOCKER_CONTAINER_CPU_QUOTA,\n \"detach\": True,\n \"publish_all_ports\": True,\n \"volumes\": config.DOCKER_CONTAINER_VOLUMES,\n })\n return DockerContainer(self.client.containers.run(*args, **kwargs))\n\n @exception_handler()\n def get_image(self, name):\n from vmpool.platforms import DockerImage\n return DockerImage(self.client.images.get(name=name))\n\n @exception_handler()\n def images(self, name=None, all=None, filters=None):\n from vmpool.platforms import DockerImage\n return [\n DockerImage(image) for image in self.client.images.list(\n name=name, all=all, filters=filters) if len(image.tags)\n ]\n\n @exception_handler()\n def pull_image(self, name=None, tag=None):\n self.client.images.pull(\n name=name,\n tag=tag\n )\n\n def create_network(self, network_name):\n \"\"\"\n\n :rtype: Network\n \"\"\"\n import docker\n ipam_pool = docker.types.IPAMPool(\n subnet=config.DOCKER_SUBNET,\n gateway=config.DOCKER_GATEWAY\n )\n ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n return self.client.networks.create(\n network_name,\n check_duplicate=True,\n ipam=ipam_config\n )\n\n @exception_handler()\n def delete_network(self, network_id):\n network = self.client.networks.get(network_id)\n if network:\n network.remove()\n","sub_path":"core/clients/docker_client.py","file_name":"docker_client.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549358849","text":"#!/usr/bin/env python3.6\n# coding: utf-8\n#dict_defaultdict.PY\n# Created on 2017/12/23\n# @author: zhaoyun\n\"\"\"\ndescription:\n\n\"\"\"\nfrom collections import defaultdict\nd2 = defaultdict(list) # 函数list\nprint(d2)\nfor k in 'mnopq':\n for v in range(3):\n d2[k].append(v) # 是一个列表\nprint(d2)\nprint(type(d2))\nfor k in d2.items():\n print(k)\nprint(\"+++++++++++++++++++\")\nd1={}\nfor k in \"abcde\":\n for v in range(5):\n if k not in d1.keys():\n d1[k] = []\n d1[k].append(v)\nprint(d1)","sub_path":"数据结构/dict/dict_defaultdict.py","file_name":"dict_defaultdict.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437188263","text":"from decimal import Decimal\n\n\ndef test_builtin_constants(get_contract_with_gas_estimation):\n code = \"\"\"\n@public\ndef test_zaddress(a: address) -> bool:\n return a == ZERO_ADDRESS\n\n\n@public\ndef test_int128(a: int128) -> (bool, bool):\n return a == MAX_INT128, a == MIN_INT128\n\n\n@public\ndef test_decimal(a: decimal) -> (bool, bool):\n return a == MAX_DECIMAL, a == MIN_DECIMAL\n\n\n@public\ndef test_uint256(a: uint256) -> bool:\n return a == MAX_UINT256\n\n\n@public\ndef test_arithmetic(a: int128) -> int128:\n return MAX_INT128 - a\n \"\"\"\n\n c = get_contract_with_gas_estimation(code)\n\n assert c.test_zaddress(\"0x0000000000000000000000000000000000000000\") is True\n assert c.test_zaddress(\"0x0000000000000000000000000000000000000012\") is False\n\n assert c.test_int128(2**127 - 1) == [True, False]\n assert c.test_int128(-2**127) == [False, True]\n assert c.test_int128(0) == [False, False]\n\n assert c.test_decimal(Decimal(2**127 - 1)) == [True, False]\n assert c.test_decimal(Decimal('-170141183460469231731687303715884105728')) == [False, True]\n assert c.test_decimal(Decimal('0.1')) == [False, False]\n\n assert c.test_uint256(2**256 - 1) is True\n\n assert c.test_arithmetic(5000) == 2**127 - 1 - 5000\n\n\ndef test_reserved_keyword(get_contract, assert_compile_failed):\n code = \"\"\"\n@public\ndef test():\n ZERO_ADDRESS: address\n \"\"\"\n assert_compile_failed(lambda: get_contract(code))\n","sub_path":"tests/parser/types/numbers/test_constants.py","file_name":"test_constants.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247235741","text":"# -*- coding:utf-8 -*-\nimport pygame\nimport pygame.mixer\nfrom pygame.locals import *\nimport time\nfrom datetime import datetime, timedelta, timezone\nimport sys\n\n\"\"\" parameter \"\"\"\nWIDTH = 640 # 画面幅を設定\nHIGHT = 480 # 画面高さを設定\ndt_target1 = datetime(2020, 11, 1, 9, 0, 0, 0) # コアタイムを設定\nYAMABUKI = (248, 169, 0)\nYELLOW = (247,214,0)\nRED = (247, 16, 0)\n\n\npygame.init()\nSCREEN = pygame.display.set_mode((WIDTH, HIGHT))\n# SCREEN = pygame.display.set_mode((WIDTH, HIGHT),FULLSCREEN) # raspi用\nCLOCK = pygame.time.Clock()\n\n\ndef main():\n #=== 初期設定 ===#\n pygame.display.set_caption(\"Countdown ver EVA\") # タイトルバーに表示する文字\n font_7segL = pygame.font.Font('font/DSEG7ModernMini-BoldItalic.ttf', 80) # 7セグフォントの読み込み\n font_7segM = pygame.font.Font('font/DSEG7ModernMini-BoldItalic.ttf', 55) # 7セグフォントの読み込み\n font_7segS = pygame.font.Font('font/DSEG7ModernMini-BoldItalic.ttf', 30) # 7セグフォントの読み込み\n font_minchoL = pygame.font.Font('font/AozoraMincho-bold.ttf', 60) # 明朝体フォントの読み込み\n font_minchoM = pygame.font.Font('font/AozoraMincho-bold.ttf', 40) # 明朝体フォントの読み込み\n font_minchoS = pygame.font.Font('font/AozoraMincho-bold.ttf', 20) # 明朝体フォントの読み込み\n font_L = pygame.font.SysFont('arial', 60, bold=True)\n font_M = pygame.font.SysFont('arial', 40, bold=True)\n font_S = pygame.font.SysFont('arial', 15, bold=True) # raspi用\n cnt = 0 # カウンタの初期化\n\n #=== 音楽の再生 ===#\n pygame.mixer.init(frequency = 44100)\n # pygame.mixer.music.load(\"music/yashima.mp3\")\n # pygame.mixer.music.load(\"music/01 3EM01_EM20_Master.mp3\")\n pygame.mixer.music.load(\"music/evangelion_OST.mp3\")\n pygame.mixer.music.play(-1)\n\n #=== timedelta型から日数,時間,分,秒,ミリ秒を抽出 ===#\n def get_time(sec):\n td = timedelta(seconds=sec)\n m, s = divmod(td.seconds, 60)\n h, m = divmod(m, 60)\n h += td.days*24\n ms = td.microseconds / 1000\n if h <= -100 or h >= 100:\n h, m, s, ms = 88, 88, 88, 88\n return h, m, s\n\n while (1):\n CLOCK.tick(30) # 画面の更新頻度(fps)\n SCREEN.fill((0,0,0)) # 画面を黒色に塗りつぶし\n dt_now = datetime.now() # 現在時刻を取得\n utc_now = datetime.now(timezone.utc) # 現在時刻を標準時で取得\n dt_delta1 = (dt_target1 - dt_now).total_seconds() # 締め切りまでの時間を秒で取得\n\n # cnt += 1 # カウンタのカウント\n # if cnt == 3:cnt = 0 # 3ごとにカウンタを初期化\n\n #=== 日本標準時の描画 ===#\n title1 = font_minchoL.render(\"日本標準時\", True, YAMABUKI)\n title2 = font_S.render(\"Japan Standard Time\", True, YAMABUKI)\n title3 = font_M.render(\"Live\", True, YAMABUKI)\n date = font_7segL.render(dt_now.strftime(\"%H %M %S\"), True, YELLOW)\n unit1 = font_M.render(\" h m s\", True, YAMABUKI)\n unit2 = font_minchoM.render(\"時 分 秒\", True, YAMABUKI)\n # alert1 = font_L.render(\"DANGER\", True, (RED))\n # alert1 = font_L.render(\"WARNING\", True, (RED))\n\n pygame.draw.rect(SCREEN, (0, 0, 0), Rect(540, 0, 100, 155), 30)\n SCREEN.blit(title1, [20, 20])\n SCREEN.blit(title2, [90, 80])\n SCREEN.blit(title3, [70, 107])\n SCREEN.blit(date, [105, 140])\n SCREEN.blit(unit1, [225, 135])\n SCREEN.blit(unit2, [230, 180])\n pygame.draw.line(SCREEN, YAMABUKI, (15, 20), (15, 95), 6)\n pygame.draw.line(SCREEN, YAMABUKI, (330, 20), (330, 95), 6)\n # pygame.draw.rect(SCREEN, YAMABUKI, Rect(60, 110, 560, 100), 3)\n\n pygame.draw.line(SCREEN, YAMABUKI, (60, 110), (60, 140), 10)\n pygame.draw.line(SCREEN, YAMABUKI, (60, 140), (70, 150), 10)\n pygame.draw.line(SCREEN, YAMABUKI, (70, 150), (70, 200), 10)\n pygame.draw.line(SCREEN, YAMABUKI, (70, 200), (100, 230), 10)\n pygame.draw.line(SCREEN, YAMABUKI, (100, 229), (620, 229), 3)\n pygame.draw.line(SCREEN, YAMABUKI, (620, 230), (620, 130), 10)\n pygame.draw.line(SCREEN, YAMABUKI, (620, 131), (200, 131), 3)\n pygame.draw.line(SCREEN, YAMABUKI, (202, 132), (180, 110), 10)\n pygame.draw.line(SCREEN, YAMABUKI, (60, 111), (180, 111), 3)\n\n # if(cnt == 1):\n # SCREEN.blit(alert1, [370, 20])\n\n #=== 作戦開始時間の描画 ===#\n mst_jp = font_minchoM.render(\"作戦開始時間\", True, YAMABUKI)\n mst_en = font_S.render(\"H - hour\", True, YAMABUKI)\n mst_date = font_7segM.render(\"{0[0]:02d}:{0[1]:02d}:{0[2]:02d}\".format(get_time(dt_delta1)), True, YELLOW)\n\n SCREEN.blit(mst_jp, [20, 270])\n SCREEN.blit(mst_en, [105, 310])\n SCREEN.blit(mst_date, [310, 270])\n pygame.draw.line(SCREEN, YAMABUKI, (15, 270), (15, 325), 6)\n pygame.draw.line(SCREEN, YAMABUKI, (265, 270), (265, 325), 6)\n\n #=== 世界標準時の描画 ===#\n gmt_jp = font_minchoM.render(\"世界標準時\", True, YAMABUKI)\n gmt_en = font_S.render(\"Greenwich Mean Time\", True, YAMABUKI)\n gmt_date = font_7segM.render(utc_now.strftime(\"%H:%M:%S\"), True, YELLOW)\n\n SCREEN.blit(gmt_jp, [60, 380])\n SCREEN.blit(gmt_en, [75, 420])\n SCREEN.blit(gmt_date, [310, 380])\n pygame.draw.line(SCREEN, YAMABUKI, (55, 380), (55, 435), 6)\n pygame.draw.line(SCREEN, YAMABUKI, (265, 380), (265, 435), 6)\n\n pygame.display.update() # 画面を更新\n\n #=== イベント処理 ===#\n for event in pygame.event.get():\n if event.type == QUIT: # 閉じるボタンが押されたら終了\n pygame.quit() # Pygameの終了(画面閉じられる)\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"clock_and_countdown.py","file_name":"clock_and_countdown.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134195733","text":"'''\nLeetcode - 1bit,2bit problem.\nValid Bits:\n1 bit is 0\n2 Bits are 10, 11\n- check if the last element of the list is a 1 bit only and nothing else\n- Make sure that if the last character is 1 bit then the rest of the string is valid or not and can be contructed with 10, 11 or 0s\n'''\nfrom itertools import permutations\nclass Solution(object):\n def isOneBitCharacter(self, bits):\n \"\"\"\n :type bits: List[int]\n :rtype: bool\n \"\"\"\n if not bits:return False\n n = len(bits)\n\n index=0\n while index decimal delimter,\r\n spRatio => sprocket ratio,\r\n kMass => kart mass,\r\n outputTorque => engine output torque (Nm),\r\n dragCoefficent => coefficient of drag,\r\n frontal => kart frontal area (m^2),\r\n airDensity => density of air,\r\n forceTotal => total of forces acting on kart,\r\n clutchSlip => slip to start at (decimal %),\r\n state => engine state (from EngineState enum) }\r\n \"\"\"\r\n\r\n if (type(vars) != dict):\r\n vars = {}\r\n \r\n # Constants\r\n # Decimal Delimiter\r\n self._dLim = int(vars.get('dLim', 2))\r\n \r\n # Sprocket Ratio\r\n self._spRatio = float(vars.get('spRatio', 96 / 11))\r\n \r\n # Kart Mass\r\n self._kMass = float(vars.get('kMass', 122.472)) # With 170lb driver\r\n # self._kMass = float(vars.get('kMass', 48.9888)) # Kart Mass\r\n\r\n # Wheel Diameter\r\n self._wheelDia = float(vars.get('wheelDia', .4826))\r\n\r\n # Engine Output Torque (Nm)\r\n self._outputTorque = float(vars.get('outputTorque', 6.9))\r\n\r\n # Coefficient of Drag\r\n self._dragCoefficent = float(vars.get('dragCoefficent', .3))\r\n\r\n # Kart Frontal Area (m^2)\r\n self._frontal = float(vars.get('frontal', .56))\r\n\r\n # Air Density (constant)\r\n self._airDensity = float(vars.get('airDensity', 1.225))\r\n\r\n # Total of forces acting on kart\r\n self._forceTotal = float(vars.get('forceTotal', 10))\r\n\r\n # Clutch slip as percentage\r\n self._clutchSlip = float(vars.get('clutchSlip', 1.0))\r\n \r\n # Driven Wheel Circumfrance\r\n self._drivenWheelCir = float(vars.get('drivenWheelCir', math.pi * self._wheelDia))\r\n\r\n # Engine State\r\n self._state = vars.get('state', EngineState.ON) \r\n\r\n def simulate_distance_run(self, until:int, step:float, throttle:int)->bool:\r\n \"\"\"\r\n Method to simulate a Go-Kart run until a distance at a static throttle\r\n\r\n until: measure of meters to go\r\n step: time step (increment)\r\n throttle: throttle percentage divisible by 10\r\n \"\"\"\r\n \r\n try:\r\n fname = \"distance_run_\" + str('%.3f' % step).split('.')[1] + \"ms.txt\"\r\n\r\n #Starting Values\r\n drag = 0\r\n output = []\r\n dist = 0\r\n velSprint = 0\r\n distSprint = 0\t\r\n clutchSprint = 0\r\n rpm = 2000\r\n timeSum = 0\r\n lockup = False\r\n bsfc = 0\r\n torque = self._outputTorque\r\n\r\n # Main Loop\r\n while (dist < until):\r\n\r\n # Calculated\r\n kAccel = (((torque * self._spRatio * 2) / self._wheelDia) - self._forceTotal - drag) / self._kMass # mph\r\n velSpeed = velSprint + kAccel * step # meters / second\r\n dist += velSpeed * step # meters\r\n drag = (velSpeed ** 2) * self._airDensity * self._dragCoefficent * self._frontal / 2 # Drag Coefficient\r\n clutchSpeed = velSpeed * 60 * self._spRatio / self._drivenWheelCir \r\n slip = (rpm - clutchSprint) / rpm\r\n deltaBSFC = calcBSFC(int(rpm), int(throttle)) * calcPower(int(rpm), int(throttle)) * step\r\n bsfc += deltaBSFC\r\n\r\n # for slip < 0 we need to look up engine speeed using the clutchSpeed. Look up outputTorque == engine torque.\r\n # if lockup == true or slip below 0 look up the table.\r\n if (lockup == True or slip <= 0):\r\n lockup = True\r\n\r\n rpm = clutchSpeed\r\n \r\n # Lookup torque value\r\n torque = calcTorque(rpm, throttle)\r\n \r\n \r\n # Output\r\n output.append([round(timeSum, self._dLim), round(kAccel, self._dLim), round(velSpeed, self._dLim), round(dist, self._dLim), round(slip, self._dLim), round(bsfc, self._dLim), round(rpm, self._dLim), round(self._outputTorque, self._dLim)])\r\n\r\n # Iterate Variables\r\n velSprint = velSpeed\r\n distSprint = dist\r\n\r\n clutchSprint = clutchSpeed\r\n timeSum += step\r\n\r\n # Finally\r\n with open('runs/' + fname, 'w') as csvfile:\r\n filewriter = csv.writer(csvfile, delimiter=',',\r\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n filewriter.writerow([\"Time Step\", \"Kart Accel\", \"Vehicle Speed\", \"Total Distance\", \"Clutch Slip\", \"BSFC\", \"RPM\", \"Torque\"])\r\n for iteration in output:\r\n filewriter.writerow(iteration)\r\n return True\r\n except Exception as e:\r\n raise e\r\n print(str(e))\r\n return False\r\n\r\n def simulate_fuel_run(self, startSpeed:float, topSpeed:float, until:float, step:float, throt:int)->bool:\r\n \"\"\"\r\n Method to simulate a Go-Kart run up to full speed and coasting to a certain speed\r\n\r\n startSpeed: beginning speed\r\n topSpeed: speed to kill the engine at\r\n until: speed to stop run at\r\n step: time step (increment)\r\n throt: throttle percentage divisible by 10\r\n \"\"\"\r\n \r\n try:\r\n fname = \"fuel_run_\" + str('%.3f' % step).split('.')[1] + \"ms.txt\"\r\n\r\n #Starting Values\r\n drag = 0\r\n output = []\r\n dist = 0\r\n velSprint = startSpeed\r\n distSprint = 0\t\r\n clutchSprint = 0\r\n rpm = 2000\r\n timeSum = 0\r\n bsfc = 0\r\n torque = self._outputTorque\r\n goalReached = False\r\n state = EngineState.ON\r\n throttle = throt\r\n\r\n # Main Loop\r\n while (goalReached == False):\r\n\r\n # Calculated\r\n kAccel = (((torque * self._spRatio * 2) / self._wheelDia) - self._forceTotal - drag) / self._kMass # mph\r\n # kAccel = kAccel if (state == EngineState.ON) else -1 * kAccel\r\n velSpeed = velSprint + kAccel * step # meters / second\r\n dist += velSpeed * step # meters\r\n drag = (velSpeed ** 2) * self._airDensity * self._dragCoefficent * self._frontal / 2 # Drag Coefficient\r\n clutchSpeed = velSpeed * 60 * self._spRatio / self._drivenWheelCir \r\n slip = (rpm - clutchSprint) / rpm\r\n if (state == EngineState.ON):\r\n deltaBSFC = calcBSFC(int(rpm), int(throttle)) * calcPower(int(rpm), int(throttle)) * step\r\n bsfc += deltaBSFC\r\n\r\n # for slip < 0 we need to look up engine speeed using the clutchSpeed. Look up outputTorque == engine torque.\r\n # if slip below 0 look up the table.\r\n if (slip <= 0):\r\n rpm = int(clutchSpeed)\r\n \r\n # Lookup torque value\r\n torque = calcTorque(rpm, throttle)\r\n \r\n # Output\r\n output.append([round(timeSum, self._dLim), round(velSpeed, self._dLim), round(dist, self._dLim), round(bsfc*100, self._dLim), round(deltaBSFC, self._dLim)])\r\n\r\n # Iterate Variables\r\n velSprint = velSpeed\r\n distSprint = dist\r\n\r\n clutchSprint = clutchSpeed\r\n timeSum += step\r\n\r\n\r\n # Check Vehicle Speed\r\n if (velSpeed >= topSpeed):\r\n state = EngineState.OFF\r\n throttle = 0\r\n \r\n if (velSpeed <= until and state == EngineState.OFF):\r\n goalReached = True\r\n\r\n \r\n # Finally\r\n with open('runs/' + fname, 'w') as csvfile:\r\n filewriter = csv.writer(csvfile, delimiter=',',\r\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n filewriter.writerow([\"Time Step\", \"Vehicle Speed\", \"Total Distance\", \"Total BSFC\", \"BSFC\"])\r\n for iteration in output:\r\n filewriter.writerow(iteration)\r\n return True\r\n except Exception as e:\r\n raise e\r\n print(str(e))\r\n return False\r\n\r\n\r\n# Simulate Go-Kart Runs\r\n# Generate CSV Files in runs folder\r\ns = SimulateRun()\r\nprint(s.simulate_fuel_run(0, 10, 3, .05, 100))\r\n#print(s.simulate_distance_run(100, .5, 100))\r\n#print(s.simulate_distance_run(100, .25, 100))\r\n#print(s.simulate_distance_run(100, .05, 100))","sub_path":"TechClub/fuelConsumption/versions/test_v10.py","file_name":"test_v10.py","file_ext":"py","file_size_in_byte":9565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306306898","text":"#!/usr/bin/env python \n# -*- coding:utf-8 _*- \n# Author: Wengs\n# Time : 2/22/2019 8:30 PM \n# File : test_survey.py \n# IDE : PyCharm\n\nimport unittest\nfrom survey import AnonymousSurvey\n\n\nclass TestAnonymousSurvey(unittest.TestCase):\n \"\"\"针对AnonymousSurvey类的测试\"\"\"\n\n def setUp(self):\n \"\"\"\n 创建一个调查对象和一组答案,供使用的测试方法使用\n \"\"\"\n question = \"What language did you first learn to speak?\"\n self.my_survey = AnonymousSurvey(question)\n self.responses = ['English', 'Chinese', 'Spanish']\n\n def test_store_single_response(self):\n \"\"\"测试单个答案是否被妥善存储\"\"\"\n self.my_survey.store_response(self.responses[0])\n self.assertIn(self.responses[0], self.my_survey.responses)\n\n def test_store_three_response(self):\n \"\"\"测试三个答案是否被妥善存储\"\"\"\n for response in self.responses:\n self.my_survey.store_response(response)\n\n for response in self.responses:\n self.assertIn(response, self.my_survey.responses)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"chapter_11/test_survey.py","file_name":"test_survey.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248116347","text":"\"\"\"\r\n\r\nCreating a Data Repository of courses, students and instructors (HW09)\r\nAdding Features to update signing up for next classes for next Semester (HW10)\r\n\r\n@eltonaloysius\r\n\r\n\"\"\"\r\n\r\nfrom collections import defaultdict\r\nimport os\r\nfrom prettytable import PrettyTable\r\nimport sqlite3\r\nconn = sqlite3.connect(\"810_data.db\")\r\n\r\nclass Student:\r\n \"\"\" Initializing Student class with variables student_name, student_major and student_courses \"\"\"\r\n def __init__(self, student_name, student_major):\r\n \"\"\" Function to initialize variable student_name, student_major and student_courses \"\"\"\r\n self.student_name = student_name \r\n self.student_major = student_major \r\n self.student_completed_courses = list()\r\n self.student_remaining_required_courses = list()\r\n self.student_remaining_elective_courses = list()\r\n\r\n def __str__(self):\r\n return f\"Name: {self.student_name}, Major: {self.student_major}, Completed Courses: {self.student_completed_courses}, Remaining required courses: {self.student_remaining_required_courses}, Remaining elective courses: {self.student_remaining_elective_courses}\"\r\n\r\nclass Instructor:\r\n \"\"\" Initializing a Instructor class with variable instructor_name, instructor_dept, instructor_courses and instructor_students \"\"\"\r\n def __init__(self, instructor_name, instructor_dept):\r\n \"\"\" Function to initialize variable instructor_name, instructor_dept, instructor_courses and instructor_students \"\"\"\r\n self.instructor_name = instructor_name\r\n self.instructor_dept = instructor_dept\r\n self.instructor_courses_with_students = defaultdict(int) \r\n\r\n def __str__(self):\r\n return f\"Name: {self.instructor_name}, Dept: {self.instructor_dept}, Courses: {[key for key, value in self.instructor_courses_with_students.items()]}, Students: {[value for key, value in self.instructor_courses_with_students.items()]}\"\r\n\r\n\r\nclass Repository:\r\n \"\"\" Repository for Data Structures \"\"\"\r\n majors_summary = defaultdict(str)\r\n students_summary = defaultdict(str)\r\n instructors_summary = defaultdict(str)\r\n \r\n def __init__(self, directory):\r\n \"\"\" Function to initialize variable directory \"\"\"\r\n self.directory = directory\r\n \r\n def read_files(self):\r\n \"\"\" Function to read all the files and create summary table \"\"\"\r\n try:\r\n majors_file = open(os.path.join(self.directory, \"majors.txt\"), \"r\")\r\n students_file = open(os.path.join(self.directory, \"students.txt\"), \"r\")\r\n grades_file = open(os.path.join(self.directory, \"grades.txt\"), \"r\")\r\n instructors_file = open(os.path.join(self.directory, \"instructors.txt\"), \"r\")\r\n except FileNotFoundError:\r\n raise FileNotFoundError(\"File not Found / Cannot open File\")\r\n else:\r\n \"\"\" Reading majors.txt file and populating majors_summary dictionary \"\"\"\r\n for major, required_elective, course in self.file_reader(majors_file, 3, \"\\t\", False):\r\n if len(self.majors_summary[major]) == 0:\r\n self.majors_summary[major] = {}\r\n\r\n if required_elective.upper() == \"R\": \r\n if required_elective not in self.majors_summary[major]:\r\n self.majors_summary[major][required_elective] = [course]\r\n else:\r\n self.majors_summary[major][required_elective].append(course)\r\n self.majors_summary[major][required_elective].sort()\r\n elif required_elective.upper() == \"E\":\r\n if required_elective not in self.majors_summary[major]:\r\n self.majors_summary[major][required_elective] = [course]\r\n else:\r\n self.majors_summary[major][required_elective].append(course)\r\n self.majors_summary[major][required_elective].sort()\r\n else:\r\n raise ValueError(\"Invalid elective Field \")\r\n\r\n \"\"\"Reading students.txt file and adding student_summary in the dictionary\"\"\" \r\n for cwid, name, major in self.file_reader(students_file, 3, \"\\t\", False):\r\n self.students_summary[cwid] = Student(name, major)\r\n\r\n \"\"\"Reading instructors.txt file and adding instructors_summary in the dictionary\"\"\"\r\n for cwid, name, dept in self.file_reader(instructors_file, 3, \"\\t\", False):\r\n self.instructors_summary[cwid] = Instructor(name, dept)\r\n\r\n student_completed_courses = {}\r\n instructor_courses_with_students = {}\r\n \"\"\" Reading grades.txt file and populating student_completed_courses and instructor_courses_with_students dictionaries \"\"\"\r\n for st_cwid, course, grade, in_cwid in self.file_reader(grades_file, 4, \"\\t\", False):\r\n if grade.upper() == \"F\":\r\n continue\r\n\r\n if st_cwid not in student_completed_courses:\r\n student_completed_courses[st_cwid] = [course]\r\n else:\r\n student_completed_courses[st_cwid].append(course)\r\n\r\n if in_cwid not in instructor_courses_with_students:\r\n instructor_courses_with_students.update({in_cwid: {course: 1}})\r\n elif course not in instructor_courses_with_students[in_cwid]:\r\n instructor_courses_with_students[in_cwid].update({course: 1})\r\n else:\r\n instructor_courses_with_students[in_cwid][course] += 1\r\n\r\n \"\"\" Adding courses along with student count to Instructor object \"\"\"\r\n for key, value in self.instructors_summary.items():\r\n try:\r\n value.instructor_courses_with_students = instructor_courses_with_students[key]\r\n except KeyError:\r\n continue\r\n\r\n \"\"\" Adding completed courses to students\"\"\"\r\n for key, value in student_completed_courses.items():\r\n try:\r\n self.students_summary[key].student_completed_courses = sorted(value)\r\n except KeyError:\r\n continue\r\n\r\n\r\n for key, value in self.students_summary.items():\r\n value.student_remaining_required_courses = sorted(list(set(self.majors_summary[value.student_major][\"R\"]) - set(value.student_completed_courses)))\r\n value.student_remaining_elective_courses = sorted(list(set(self.majors_summary[value.student_major][\"E\"]) - set(value.student_completed_courses)))\r\n if len(value.student_remaining_elective_courses) < len(self.majors_summary[value.student_major][\"E\"]):\r\n value.student_remaining_elective_courses = None\r\n\r\n def file_reader(self, fp, fields, sep = \"\\t\", header = False):\r\n \"\"\" Generator that reads all the fields in the file \"\"\"\r\n for offset, line in enumerate(fp, start = 1):\r\n if not header:\r\n header = True\r\n continue\r\n\r\n line = line.strip(\"\\n\").split(sep)\r\n\r\n if(len(line) != fields):\r\n raise ValueError(f\" The file has missing fields, {len(line)} fields present, needed {fields} on the {offset} in file {fp.name}\")\r\n else:\r\n yield line\r\n\r\n\r\n def print_table(self):\r\n \"\"\" Function to print Summary Tables \"\"\"\r\n self.read_files()\r\n\r\n \"\"\" Addind rows for major summary table \"\"\"\r\n print(\"Major summary:\")\r\n table = PrettyTable(field_names = [\"Dept\",\"Required\",\"Electives\"])\r\n\r\n for key,value in self.majors_summary.items():\r\n if key == \"majors\":\r\n continue\r\n table.add_row([key, value[\"R\"], value[\"E\"]])\r\n print(table)\r\n\r\n\r\n \"\"\" Adding rows to Summary Table \"\"\"\r\n table = PrettyTable(field_names = [\"CWID\", \"Name\", \"Major\", \"Completed Courses\", \"Remaining Required\", \"Remaing elective\"])\r\n\r\n for key, value in self.students_summary.items():\r\n table.add_row([key, value.student_name, value.student_major, value.student_completed_courses, value.student_remaining_required_courses, value.student_remaining_elective_courses])\r\n print(table)\r\n\r\n\r\n \"\"\" Adding rows to instructor summary table \"\"\"\r\n table = PrettyTable(field_names = [\"CWID\", \"Name\", \"Dept\", \"Course\", \"Students\"])\r\n \r\n for key, value in self.instructors_summary.items():\r\n for k, v in value.instructor_courses_with_students.items():\r\n table.add_row([key, value.instructor_name, value.instructor_dept, k, v])\r\n\r\n print(table)\r\n\r\ndef main():\r\n \"\"\" Function that creates an object of class Repository and prints both the tables \"\"\"\r\n repository = Repository(\"D:\\PythonDev\\.vscode\\HW11\").print_table()\r\n\r\nmain()","sub_path":"SSW_810_pycodes/HW10_Elton_Aloys.py","file_name":"HW10_Elton_Aloys.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"260043463","text":"# 167. Two Sum II - Input array is sorted\nclass Solution:\n def twoSum(self, numbers, target):\n \"\"\"\n :type numbers: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n def bSearch(low, high, target):\n while low<=high:\n mid = low+(high-low)//2\n if numbers[mid] == target:\n return mid\n if numbers[mid] > target:\n high = mid-1\n else:\n low = mid+1\n return -1\n length = len(numbers)\n for i in range(length-1):\n j = bSearch(i+1, length-1, target-numbers[i])\n if j != -1:\n return [i+1,j+1]\n","sub_path":"167/lc167-solution1.py","file_name":"lc167-solution1.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631841943","text":"import socket\nfrom requests import get,post\nfrom Car.Constants import *\nfrom Car.MovementControl import move\nimport json\nfrom time import sleep\n\ndef resolveIP():\n response = post(get_addr_url, data=credentials)\n response = response.content.decode().replace(\"'\",'\"')\n print(response)\n connection_details = json.loads(response)\n return connection_details\n\n\ndef connect_to_control(connection_details):\n HOST = connection_details['local_ip']\n PORT = int(connection_details['port'])\n while True:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n try:\n s.connect((HOST, PORT))\n print(\">>> Connection established\")\n except Exception as e:\n print(\">>> Connection failed, 10 sec timeout...\")\n sleep(10)\n continue\n while True:\n raw_data = s.recv(1024).decode()\n data = json.loads(raw_data)\n print('Received', repr(raw_data))\n if '\"terminate\"' == raw_data or '' == raw_data:\n s.close()\n break\n else:\n move(data[\"speed\"],data[\"acc\"],data[\"action\"])\n\ndef main():\n connection_details = resolveIP()\n connect_to_control(connection_details)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Car/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575763621","text":"#Kyle Verdeyen\n#kverdey1@jhu.edu\n#Computer Vision EN.601.461\n#Assignment 1\n#Programming section 2, p7.py\n#Finds strong lines in an image. Paints detected lines on original image.\n#Hough threshold used to distinguish strong lines from short segments.\n\nimport cv2\nimport copy\nimport numpy as np\ndef p7(image, hough_image, hough_thresh): #return line_image\n\t#fancy variable assignments, bring your dress shoes\n\timage_dimensions, hough_dimensions = np.shape(image), np.shape(hough_image)\n\timage_rows, image_columns = image_dimensions[0], image_dimensions[1]\n\though_rows, hough_columns = hough_dimensions[0], hough_dimensions[1] #rho, theta\n\thypo = int(np.sqrt(image_rows**2 + image_columns**2))\n\tdiag = hypo / (hough_columns/2)\n\tline_image = copy.copy(image)\n\tfor a in range(hough_rows):\n\t\tfor b in range(hough_columns):\n\t\t\tif hough_image[a][b] > hough_thresh: #only care if edge is over threshold\n\t\t\t\t#recover theta from hough, undo what was done in p6\n\t\t\t\ttemp_theta = (np.pi/hough_columns) * b\n\t\t\t\ttemp_rho = int(round(a*np.cos(temp_theta) + b*np.sin(temp_theta)) + diag)\n\t\t\t\t#send to the salt mines\n\t\t\t\tvectors = linedraw(temp_theta, temp_rho)\n\t\t\t\t#draw on the original image\n\t\t\t\tcv2.line(line_image, vectors[0], vectors[1], (255, 255, 255))\n\treturn line_image\n\n\n#similar to getting 2 points in p3.getvectors()\ndef linedraw(theta, rho):\n\tx = np.sin(theta) * rho\n\ty = np.cos(theta) * rho\n\tvector1 = (np.absolute(int(x - 100 )), np.absolute(int(y - 100)))\n\tvector2 = (np.absolute(int(x + 100 )), np.absolute(int(y + 100)))\n\treturn [vector1, vector2]","sub_path":"HW1/p7.py","file_name":"p7.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463600179","text":"import glob\nimport os\nimport shutil\n\nimport pytest\nimport selenium as se\nfrom selenium import webdriver\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nfrom webdriver_manager.utils import os_type as get_os_type\n\nPATH = '.'\n\n\ndef delete_old_install(path=None):\n if path is not None:\n path = os.path.abspath(path)\n os_type = get_os_type()\n try:\n os.remove(os.path.join(path, 'edgedriver_{}.zip'.format(os_type)))\n shutil.rmtree(os.path.join(path, 'edgedriver_{}'.format(os_type)))\n except OSError:\n pass\n except Exception:\n pass\n\n\ndef test_edge_chromium_manager_with_correct_version():\n driver_path = EdgeChromiumDriverManager(\"80.0.320.0\").install()\n assert os.path.exists(driver_path)\n\n\ndef test_edge_chromium_manager_with_selenium():\n driver_path = EdgeChromiumDriverManager().install()\n options = webdriver.EdgeOptions(is_legacy=False)\n if get_os_type() == \"win64\" or \"win32\":\n path = \"C:\\\\Users\\\\{0}\\\\AppData\\\\Local\\\\Microsoft\\\\Edge SxS\\\\Application\\\\msedge.exe\".format(os.getlogin())\n\n if os.path.isfile(path) and path == \"msedge.exe\":\n options.binary_location = path\n elif (get_os_type() or \"mac64\") and not os.path.exists('/usr/bin/msedge'):\n path = \"/usr/bin/msedge\"\n if se.__version__.startswith('4'):\n edge = webdriver.ChromiumEdge(executable_path=driver_path, options=options, port=9516)\n edge.get(\"http://automation-remarks.com\")\n edge.quit()\n else:\n assert False\n\n\ndef test_edge_chromium_manager_with_wrong_version():\n with pytest.raises(ValueError) as ex:\n delete_old_install()\n driver_path = EdgeChromiumDriverManager(\"0.2\").install()\n assert \"There is no such driver by url https://msedgedriver.azureedge.net/0.2/edgedriver_win64.zip\" in \\\n ex.value.args[0]\n\n\ndef test_can_download_ff_x64():\n driver_path = EdgeChromiumDriverManager(os_type=\"win64\").install()\n assert os.path.exists(driver_path)\n\n\n@pytest.mark.parametrize('os_type', ['win32',\n 'win64',\n 'mac64'])\ndef test_can_get_driver_from_cache(os_type):\n EdgeChromiumDriverManager(os_type=os_type).install()\n driver_path = EdgeChromiumDriverManager(os_type=os_type).install()\n assert os.path.exists(driver_path)\n","sub_path":"tests/test_edge_chromium_manager.py","file_name":"test_edge_chromium_manager.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614670562","text":"\"\"\"\nfrom \"Delving Deep into Rectifiers: Surpassing Human-Level Performance on\nImageNet Classification\"\nhttp://arxiv.org/abs/1502.01852\n\"\"\"\nimport toolz\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport treeano\nimport treeano.nodes as tn\n\n\n@treeano.register_node(\"prelu\")\nclass PReLUNode(treeano.NodeImpl):\n\n hyperparameter_names = (\n \"initial_alpha\",\n # which axes should have their own independent parameters\n # only one of parameter_axes and non_parameter_axes should be set\n \"parameter_axes\",\n # which axes should not have their own independent parameters\n # only one of parameter_axes and non_parameter_axes should be set\n \"non_parameter_axes\",)\n\n def compute_output(self, network, in_vw):\n # gather hyperparameters\n initial_alpha = network.find_hyperparameter(\n [\"initial_alpha\"],\n 0.25)\n inits = list(toolz.concat(network.find_hyperparameters(\n [\"inits\"],\n [treeano.inits.ConstantInit(initial_alpha)])))\n\n # calculate_shape\n ndim = in_vw.ndim\n parameter_axes = treeano.utils.find_axes(\n network,\n ndim,\n positive_keys=[\"parameter_axes\"],\n negative_keys=[\"non_parameter_axes\"],\n positive_default=[treeano.utils.nth_non_batch_axis(network, 0)])\n broadcastable = tuple([i not in parameter_axes\n for i in range(ndim)])\n shape = tuple([1 if b else s\n for b, s in zip(broadcastable, in_vw.shape)])\n\n # create state\n alpha_vw = network.create_variable(\n \"alpha\",\n is_shared=True,\n shape=shape,\n tags={\"parameter\", \"bias\"},\n inits=inits,\n )\n alpha = T.patternbroadcast(alpha_vw.variable, broadcastable)\n\n # return output\n network.create_variable(\n \"default\",\n variable=treeano.utils.rectify(in_vw.variable,\n negative_coefficient=alpha),\n shape=in_vw.shape,\n tags={\"output\"},\n )\n","sub_path":"treeano/sandbox/nodes/prelu.py","file_name":"prelu.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566075189","text":"import unittest\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QDialog\n\nfrom ui import DisclaimerDialog\n\napp = QApplication(sys.argv)\ndisclaimer_dialog = QDialog()\ndisclaimer_dialog_ui = DisclaimerDialog.Ui_dialog()\ndisclaimer_dialog_ui.setupUi(disclaimer_dialog)\n\nclass DisclaimerDialogTests(unittest.TestCase):\n def test_defaults(self):\n '''Test the defaults'''\n self.assertEqual(disclaimer_dialog_ui.label.text(),\"Only reports supported by selected vendor will be retrieved!\")\n\n def test_button(self):\n okWidget = disclaimer_dialog_ui.buttonBox.Ok\n self.assertIsNotNone(okWidget)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_DisclaimerDialog.py","file_name":"test_DisclaimerDialog.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516700341","text":"import os\nimport copy\nimport queue\nimport xmltodict\nimport pickle\nimport json\nimport time\nimport jinja2\nfrom collections import defaultdict\nfrom botocore.exceptions import ClientError\n\nfrom .client_tasks import MturkClient\nfrom .client_tasks import CreateHits\nfrom .client_tasks import GetAssignments\nfrom .client_tasks import ApproveAssignments\nfrom .client_tasks import UpdateHITsReviewStatus\nfrom .client_tasks import ExpireHits\nfrom .client_tasks import DeleteHits\n\n\nclass Vizier:\n\n def __init__(self, **kwargs):\n \"\"\"\n initializes a vizier instance with AWS credentials and a host\n :param aws_access_key_id the access key id.\n :param aws_secret_access_key the secret access key.\n :param host the mturk host to connect to\n \"\"\"\n self.kwargs = kwargs\n self.amt = MturkClient(**self.kwargs)\n self.n_threads = kwargs['n_threads']\n self.in_production = kwargs['in_production']\n self.s3_base_path = kwargs['s3_base_path']\n self.turk_data_schemas = {\n 'html': 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd'\n }\n self.qualifications = {\n 'high_accept_rate': 95,\n 'english_speaking': ['US', 'CA', 'AU', 'NZ', 'GB'],\n 'us_only': ['US'],\n 'master': 'False'\n }\n self.print_balance()\n\n def get_num_balance(self):\n try:\n balance_response = self.amt.client.get_account_balance()\n return float(balance_response['AvailableBalance'])\n except ClientError as e:\n raise\n\n def print_balance(self):\n balance = self.get_num_balance()\n print(f'Account balance is: ${balance:.{2}f}')\n\n @classmethod\n def pickle_this(cls, this, filename='temp', protocol=pickle.HIGHEST_PROTOCOL, timestamp=False):\n \"\"\"\n Util function to pickle objects, with the option of appending a timestamp\n :param this: object to pickle\n :param filename: filename of pickle object\n :param protocol: pickle protocol to use\n :param timestamp: option to append timestamp to filename\n :return:\n \"\"\"\n if timestamp:\n timestamp = '_'.join(\n time.asctime().lower().replace(':', '_').split())\n filename = '_'.join([filename, timestamp])\n if not filename.endswith('.pkl'):\n filename += '.pkl'\n with open(filename, 'wb') as f:\n pickle.dump(this, f, protocol=protocol)\n\n @classmethod\n def unpickle_this(cls, filename):\n \"\"\"\n Util function to unpickle objects\n :param filename: pickle file path\n :return: unpickled object\n \"\"\"\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n @classmethod\n def _render_hit_html(cls, template_params, **kwargs):\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(\n template_params['template_dir']))\n template = env.get_template(template_params['template_file'])\n return template.render(**kwargs)\n\n def preview_hit_interface(self, template_params, html_dir='./interface_preview', page_name='task.html', **kwargs):\n hit_html = self._render_hit_html(template_params, **kwargs)\n html_out_file = os.path.join(html_dir, page_name)\n if not os.path.exists(html_dir):\n os.makedirs(html_dir)\n with open(html_out_file, 'w') as f:\n f.write(hit_html)\n\n def expected_cost(self, data, **kwargs):\n \"\"\"\n Computes the expected cost of a hit batch\n To adjust for subtleties of the amt fees, see:\n www.mturk.com/pricing\n :param data: task data\n :param kwargs:\n :return: cost if sufficient funds, false if not\n \"\"\"\n hit_params = kwargs['basic_hit_params']\n base_cost = float(hit_params['Reward'])\n n_assignments_per_hit = hit_params['MaxAssignments']\n min_fee_per_assignment = 0.01\n fee_percentage = 0.2 if n_assignments_per_hit < 10 else 0.4\n fee_per_assignment = max(fee_percentage * base_cost, min_fee_per_assignment) + base_cost\n cost_plus_fee = round(n_assignments_per_hit * fee_per_assignment * len(data), 2)\n current_balance = self.get_num_balance()\n if cost_plus_fee > current_balance:\n print(\n f'Insufficient funds: will cost ${cost_plus_fee:.{2}f} but only ${current_balance:.{2}f} available.')\n return False\n else:\n print(f'Batch will cost ${cost_plus_fee:.{2}f}')\n return cost_plus_fee\n\n def _build_qualifications(self, locales=None):\n \"\"\"\n builds qualifications for task\n :param locales: AMT country codes allowed to perform task\n :return: list of qualification dicts\n \"\"\"\n if locales:\n locales = [{'Country': loc} for loc in locales]\n masters_id = '2F1QJWKUDD8XADTFD2Q0G6UTO95ALH' if self.in_production else '2ARFPLSP75KLA8M8DH1HTEQVJT3SY6'\n master = {\n 'QualificationTypeId': masters_id,\n 'Comparator': 'EqualTo',\n 'RequiredToPreview': True,\n }\n high_accept_rate = {\n 'QualificationTypeId': '000000000000000000L0',\n 'Comparator': 'GreaterThanOrEqualTo',\n 'IntegerValues': [self.qualifications['high_accept_rate']],\n 'RequiredToPreview': True,\n }\n location_based = {\n 'QualificationTypeId': '00000000000000000071',\n 'Comparator': 'In',\n 'LocaleValues': locales,\n 'RequiredToPreview': True,\n }\n iconary = {\n 'QualificationTypeId': '3Z1HL5WC7LSXUFW49BUXR7ZP1IDH6M',\n 'Comparator': 'EqualTo',\n 'ActionsGuarded': 'DiscoverPreviewAndAccept',\n 'IntegerValues': [1]\n }\n return [high_accept_rate, location_based, iconary]\n\n def _create_question_xml(self, html_question, frame_height, turk_schema='html'):\n \"\"\"\n Embeds question HTML in AMT HTMLQuestion XML\n :param html_question: task html\n :param frame_height: height of mturk iframe\n :param turk_schema: schema type\n :return:\n \"\"\"\n hit_xml = f\"\"\"\\\n \n \n {html_question}\n ]]>\n \n {frame_height}\n \"\"\"\n try:\n xmltodict.parse(hit_xml)\n return hit_xml\n except xmltodict.expat.ExpatError as e:\n print(e)\n raise\n\n def _create_html_hit_params(self, basic_hit_params, template_params, **kwargs):\n \"\"\"\n creates a HIT for a question with the specified HTML\n # :param params a dict of the HIT parameters, must contain an \"html\" parameter\n # :return the created HIT object\n \"\"\"\n hit_params = copy.deepcopy(basic_hit_params)\n frame_height = hit_params.pop('frame_height')\n question_html = self._render_hit_html(template_params, **kwargs)\n hit_params['Question'] = self._create_question_xml(\n question_html, frame_height)\n hit_params['QualificationRequirements'] = self._build_qualifications(\n self.qualifications['english_speaking'])\n return hit_params\n\n def _exec_task(self, hits, task, **kwargs):\n \"\"\"\n Executes task on _batch over multiple threads\n :param hits: _batch to perform task on\n :param task: vizier task function\n :param kwargs:\n :return: AMT client responses\n \"\"\"\n hit_batches = [hits[i::self.n_threads] for i in range(self.n_threads)]\n threads = []\n res_queue = queue.Queue()\n combined_args = {**kwargs, **self.kwargs}\n for batch in hit_batches:\n t = task(batch, res_queue, **combined_args)\n threads.append(t)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n result_list = []\n while not res_queue.empty():\n result_list.append(res_queue.get())\n return [item for sl in result_list for item in sl]\n\n def create_hit_group(self, data, task_param_generator, **kwargs):\n \"\"\"\n Creates a group of HITs from data and supplied generator and pickles resultant _batch\n :param data: task data\n :param task_param_generator: user-defined function to generate task parameters\n :param kwargs:\n :return: hit objects created\n \"\"\"\n if not self.expected_cost(data, **kwargs):\n return None\n hit_params = [self._create_html_hit_params(\n **kwargs, **task_param_generator(point)) for point in data]\n hits_created = self._exec_task(hit_params, CreateHits)\n submission_type = 'production_' if self.in_production else 'sandbox_'\n self.pickle_this(\n hits_created, f'submitted_batch_{submission_type + str(len(hits_created))}', timestamp=True)\n return hits_created\n\n @classmethod\n def _get_answers(cls, assignments):\n \"\"\"\n Extracts turker answers from assignments\n :param assignments: list of amt assignment objects\n :return: turker responses\n \"\"\"\n answers = []\n for hit in assignments:\n for asg in hit['Assignments']:\n answer_raw = xmltodict.parse(asg['Answer'])\n answers.append(json.loads(\n answer_raw['QuestionFormAnswers']['Answer']['FreeText']))\n return answers\n\n @classmethod\n def _extract_responses(cls, answers):\n \"\"\"\n Extracts responses from answers\n :param answers: answers extracted from AMT assignments\n :return: dict of task results keyed on their globalID\n \"\"\"\n results = defaultdict(list)\n for ans in answers:\n results[ans['globalID']].append(ans['results'])\n return dict(results)\n\n def get_assignments(self, hits=()):\n \"\"\"\n Retrieved assignments associated with _batch\n :param hits: list of AMT _batch\n :return: dict of AMT assignments\n \"\"\"\n return self._exec_task(hits, GetAssignments)\n\n def get_and_extract_results(self, hits=()):\n \"\"\"\n Retrieves AMT assignments and extracts turker responses\n :param hits: list of AMT _batch\n :return: dict of task results keyed on their globalID\n \"\"\"\n assignments = self.get_assignments(hits)\n answers = self._get_answers(assignments)\n return self._extract_responses(answers)\n\n def approve_assignments(self, assignments):\n \"\"\"\n Approves assignments\n :param assignments: list of assignments to improve\n :return: AMT client responses\n \"\"\"\n return self._exec_task(assignments, ApproveAssignments)\n\n def approve_hits(self, hits):\n \"\"\"\n Approves all assignments associated with _batch\n :param hits : list of _batch to improve\n :return: AMT client responses\n \"\"\"\n assignments = self.get_assignments(hits)\n return self._exec_task(assignments, ApproveAssignments)\n\n def get_all_hits(self):\n \"\"\"\n Retrieves all of the current users HITs.\n This can be slow if a user has accumulated many thousands of HITs\n :return: all user HITs\n \"\"\"\n paginator = self.amt.client.get_paginator('list_hits')\n response_iterator = paginator.paginate(\n PaginationConfig={\n 'PageSize': 100,\n }\n )\n response = []\n for r in response_iterator:\n response.extend(r['HITs'])\n return response\n\n def expire_hits(self, hits):\n \"\"\"\n Sets hit expiration to a date in the past\n :param hits: _batch to expire\n :return: AMT client responses\n \"\"\"\n return self._exec_task(hits, ExpireHits)\n\n def delete_hits(self, hits):\n \"\"\"\n Deletes (permanently removes) _batch\n :param hits: _batch to delete\n :return: AMT client responses\n \"\"\"\n return self._exec_task(hits, DeleteHits)\n\n def force_delete_hits(self, hits, force=False):\n \"\"\"\n Deletes (permanently removes) _batch by first expiring them\n :param hits: _batch to delete\n :param force: flag to overcome production warning\n :return: AMT client responses\n \"\"\"\n if not force and self.in_production:\n print('Careful with this in production. Override with force=True')\n return\n response = self.expire_hits(hits)\n response += self.delete_hits(hits)\n return response\n\n def set_hits_reviewing(self, hits):\n \"\"\"\n Sets hit status to reviewing\n :param hits: _batch to set status of\n :return: AMT client responses\n \"\"\"\n return self._exec_task(hits, UpdateHITsReviewStatus, revert=False)\n\n def revert_hits_reviewable(self, hits):\n \"\"\"\n Reverts hit reviewing status\n :param hits: _batch to revert\n :return: AMT client responses\n \"\"\"\n return self._exec_task(hits, UpdateHITsReviewStatus, revert=True)\n\n def create_qualification(self, **kwargs):\n \"\"\"\n Creates a new task qualification ID\n :param kwargs: name, keywords, description, status, etc.\n :return: qualification ID string\n \"\"\"\n return self.amt.client.create_qualification_type(**kwargs)\n\n def grant_qualification_to_workers(self, qualification_id, worker_ids, notify=True):\n \"\"\"\n Grants qualification to workers\n :param qualification_id: qualification ID\n :param worker_ids: list of worker IDs\n :param notify: send notification email to workers\n :return:\n \"\"\"\n responses = []\n for w_id in worker_ids:\n responses.append(self.amt.client.associate_qualification_with_worker(\n QualificationTypeId=qualification_id,\n WorkerId=w_id,\n IntegerValue=1,\n SendNotification=notify\n ))\n return responses\n\n def remove_qualification_from_workers(self, qualification_id, worker_ids, reason=''):\n \"\"\"\n Revokes a worker's qualification\n :param qualification_id: qualification ID\n :param worker_ids: list of worker IDs\n :param reason: reason for disqualification to give workers\n :return:\n \"\"\"\n responses = []\n for w_id in worker_ids:\n responses.append(self.amt.client.disassociate_qualification_with_worker(\n QualificationTypeId=qualification_id,\n WorkerId=w_id,\n Reason=reason\n ))\n return responses\n\n def message_workers(self, worker_ids, subject, message):\n \"\"\"\n Messages a list of workers with a supplied message.\n :param worker_ids: list of worker IDs to message\n :param subject: subject to display in message\n :param message:\n :return: AMT client responses\n \"\"\"\n batch_length = 100 # this is the maximum number of workers AMT allows in one notification\n n_batches = len(worker_ids) // batch_length + \\\n bool(len(worker_ids) % batch_length)\n worker_batches = [worker_ids[i::n_batches] for i in range(n_batches)]\n response = []\n for workers in worker_batches:\n response.append(self.amt.client.notify_workers(\n Subject=subject,\n MessageText=message,\n WorkerIds=workers))\n return response\n\n def send_bonuses(self, worker_bonus_assignments, amount, reason):\n responses = []\n for worker_id, assignments in worker_bonus_assignments.items():\n for a_id in assignments:\n responses.append(self.amt.client.send_bonus(\n WorkerId=worker_id,\n BonusAmount=str(amount),\n AssignmentId=a_id,\n Reason=reason,\n # UniqueRequestToken='string'\n ))\n return responses\n\n","sub_path":"vizier/vizier.py","file_name":"vizier.py","file_ext":"py","file_size_in_byte":16369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248572443","text":"import pytest\n\nfrom football_data_dot_org_api_client.translators.raw_response_to_team_object import RawResponseToTeamObject\n\n\ndef get_minimum_valid_response():\n \"\"\"\n Should return only the required fields.\n :return:\n \"\"\"\n return {\n \"_links\": {\n \"self\": {\n \"href\": \"http://api.football-data.org/v1/teams/340\"\n },\n \"fixtures\": {\n \"href\": \"http://api.football-data.org/v1/teams/340/fixtures\"\n },\n \"players\": {\n \"href\": \"http://api.football-data.org/v1/teams/340/players\"\n }\n },\n \"name\": \"Southampton FC\",\n \"code\": \"SFC\",\n \"shortName\": \"Southampton\",\n \"squadMarketValue\": \"199,000,000 €\",\n \"crestUrl\": \"http://upload.wikimedia.org/wikipedia/de/c/c9/FC_Southampton.svg\"\n }\n\n\ndef test_get_object_with_no_data():\n \"\"\"\n Ensure we cope with no response data being passed in.\n \"\"\"\n data = None\n\n with pytest.raises(ValueError):\n RawResponseToTeamObject.get_object(data)\n\n\ndef test_get_object_with_invalid_data():\n data = get_minimum_valid_response()\n\n for key, val in data.items():\n # Remove key and value\n data.pop(key, val)\n\n # Check the validator kicks in.\n with pytest.raises(ValueError):\n RawResponseToTeamObject.get_object(data)\n\n # Put it back so we can test the next item.\n data[key] = val\n\n\ndef test_get_object_with_valid_data():\n \"\"\"\n Test using a full set of data, gleamed from an earlier live query to Football-Data.org\n \"\"\"\n data = get_minimum_valid_response()\n\n player = RawResponseToTeamObject.get_object(data)\n assert player is not None\n\n\ndef test_get_object_returns_team_with_correct_id_number():\n \"\"\"\n Test using raw data with a id supplied.\n \"\"\"\n data = get_minimum_valid_response()\n data[\"id\"] = 'Some Data'\n\n team = RawResponseToTeamObject.get_object(data)\n assert team.id == 'Some Data'\n\n\ndef test_get_object_returns_team_with_correct_name():\n \"\"\"\n Test using raw data with a name supplied.\n \"\"\"\n data = get_minimum_valid_response()\n data[\"name\"] = 'Some Name'\n\n team = RawResponseToTeamObject.get_object(data)\n assert team.name == 'Some Name'\n\n\ndef test_get_object_returns_team_with_correct_short_name():\n \"\"\"\n Test using raw data with a short name supplied.\n \"\"\"\n data = get_minimum_valid_response()\n data[\"shortName\"] = 'Some short name'\n\n team = RawResponseToTeamObject.get_object(data)\n assert team.short_name == 'Some short name'\n\n\ndef test_get_object_returns_team_with_correct_squad_market_value():\n \"\"\"\n Test using raw data with a squad_market_value supplied.\n \"\"\"\n data = get_minimum_valid_response()\n data[\"squadMarketValue\"] = 'Some value'\n\n team = RawResponseToTeamObject.get_object(data)\n assert team.squad_market_value == 'Some value'\n\n\ndef test_get_object_returns_team_with_correct_crest_url():\n \"\"\"\n Test using raw data with a crest_url supplied.\n \"\"\"\n data = get_minimum_valid_response()\n data[\"crestUrl\"] = 'Some url'\n\n team = RawResponseToTeamObject.get_object(data)\n assert team.crest_url == 'Some url'\n","sub_path":"tests/unit/football_data_dot_org_api_client/translators/test_raw_response_to_team_object.py","file_name":"test_raw_response_to_team_object.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278914070","text":"l = {}\n\ndef create_dict():\n\tn = int(input(\"Enter the no. of elements you want to add \"))\n\tfor i in range(0, int(n)):\n\t\tx = int(input(\"Enter the index no. \"))\n\t\ty = str(input(\"Enter the name of the student\"))\n\t\tl[x] = y\n\treturn l\n\ndef print_dict(newdict):\n\tfor x in l:\n\t\tprint(f\"S.no = {x}\tName = {l[x]}\")\n\ndef main():\n\tnewdict = create_dict()\n\tprint_dict(newdict)\n\nif __name__ == '__main__':\n\tmain()\n\t#create_dict(l,n)\n\t#print_dict(l)","sub_path":"Data_structure_conversion.py","file_name":"Data_structure_conversion.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173207823","text":"# -*- coding: utf-8 -*-\n'''\ndasist.contrarch.urls\n'''\n\nfrom django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\n\nfrom . import views\n\nurlpatterns = (\n # 'contrarch.views',\n url(r'^$', login_required(views.ContrarchList.as_view()), name='contrarch_list'),\n url(r'^(?P\\d+)/$', login_required(views.ContrarchDetail.as_view()), name='contrarch_view'),\n url(r'^lpp/(?P\\d+)/$', views.contrarch_set_lpp, name='contrarch_set_lpp'),\n url(r'^filter/$', views.contrarch_set_filter, name='contrarch_set_filter'),\n url(r'^get_subjs/$', views.contrarch_get_subjects, name='contrarch_get_subjects'),\n)\n","sub_path":"contrarch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595200090","text":"\"\"\"\n Non-Local Spatial Propagation Network for Depth Completion\n Jinsun Park, Kyungdon Joo, Zhe Hu, Chi-Kuei Liu and In So Kweon\n\n European Conference on Computer Vision (ECCV), Aug 2020\n\n Project Page : https://github.com/zzangjinsun/NLSPN_ECCV20\n Author : Jinsun Park (zzangjinsun@kaist.ac.kr)\n\n ======================================================================\n\n NLSPNSummary implementation\n\"\"\"\n\n\nfrom . import BaseSummary\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom PIL import Image\nimport cv2\n\ncm = plt.get_cmap('plasma')\n\ndef visualize(im, sizeb, size, normalize = False):\n\n cm = plt.get_cmap('plasma')\n\n Wb, Hb = sizeb\n W,H = size\n im = im.reshape(Hb, Wb)\n im = cv2.resize(im, (W,H), interpolation=cv2.INTER_LINEAR)\n\n if normalize:\n im = (im - np.min(im)) / (np.max(im) - np.min(im) + 1e-6)\n im = 255.0 * im \n im = cm(im.astype('uint8'))\n im = np.transpose(im[:, :, :3], (2, 0, 1))\n\n return im \n\ndef norm_cm_transpose(im, cm, max_):\n\n im = 255.0 * im / max_\n\n if cm is None:\n im = np.stack([im,im,im]).astype('uint8')\n else:\n im = cm(im.astype('uint8'))\n im = np.transpose(im[:, :, :3], (2, 0, 1))\n\n return im\n\nclass Summary(BaseSummary):\n def __init__(self, log_dir, mode, args, loss_name, metric_name):\n assert mode in ['train', 'val', 'test'], \\\n \"mode should be one of ['train', 'val', 'test'] \" \\\n \"but got {}\".format(mode)\n\n super(Summary, self).__init__(log_dir, mode, args)\n\n self.log_dir = log_dir\n self.mode = mode\n self.args = args\n\n self.loss = []\n self.metric = []\n\n self.loss_name = loss_name\n self.metric_name = metric_name\n\n self.path_output = None\n\n # ImageNet normalization\n self.img_mean = torch.tensor((0.485, 0.456, 0.406)).view(1, 3, 1, 1)\n self.img_std = torch.tensor((0.229, 0.224, 0.225)).view(1, 3, 1, 1)\n\n def update(self, global_step, sample, output):\n if self.loss_name is not None:\n self.loss = np.concatenate(self.loss, axis=0)\n self.loss = np.mean(self.loss, axis=0, keepdims=True)\n\n msg = [\" {:<9s}| \".format('Loss')]\n for idx, loss_type in enumerate(self.loss_name):\n val = self.loss[0, idx]\n self.add_scalar('Loss/' + loss_type, val, global_step)\n\n msg += [\"{: self.args.num_summary:\n num_summary = self.args.num_summary\n\n if self.args.dep_src in ['slam', 'sgbm']:\n dep = sample['dep'].detach().data.cpu().numpy()\n else:\n dep0 = sample['dep0'].detach().data.cpu().numpy()\n dep1 = sample['dep1'].detach().data.cpu().numpy()\n\n gt = sample['gt'].detach().data.cpu().numpy()\n pred = output['pred'].detach().data.cpu().numpy()\n\n if 'confidence' in output:\n confidence = output['confidence'].data.cpu().numpy()\n confidence = confidence[0:num_summary, :, :, :]\n else:\n confidence = None\n\n\n if 'seg' in output:\n seg = output['seg'].data.cpu().numpy()\n seg = seg[0:num_summary, :, :, :]\n else:\n seg = None\n\n if 'pred_rgb' in output:\n pred_rgb = output['pred_rgb'].data.cpu().numpy()\n pred_rgb = pred_rgb[0:num_summary, :, :, :]\n pred_rgb = np.clip(pred_rgb, a_min=0, a_max=self.args.max_depth)\n else:\n pred_rgb = None\n\n if 'confidence_rgb' in output:\n confidence_rgb = output['confidence_rgb'].data.cpu().numpy()\n confidence_rgb = confidence_rgb[0:num_summary, :, :, :]\n else:\n confidence_rgb = None\n\n if self.args.dep_src in ['slam', 'sgbm']:\n dep = dep[0:num_summary, :, :, :]\n dep = np.clip(dep, a_min=0, a_max=self.args.max_depth)\n else:\n dep0 = dep0[0:num_summary, :, :, :]\n dep1 = dep1[0:num_summary, :, :, :]\n\n dep0 = np.clip(dep0, a_min=0, a_max=self.args.max_depth)\n dep1 = np.clip(dep1, a_min=0, a_max=self.args.max_depth)\n\n rgb = rgb[0:num_summary, :, :, :]\n rgb = np.clip(rgb, a_min=0, a_max=1.0)\n\n gt = gt[0:num_summary, :, :, :]\n gt = np.clip(gt, a_min=0, a_max=self.args.max_depth)\n\n pred = pred[0:num_summary, :, :, :]\n pred = np.clip(pred, a_min=0, a_max=self.args.max_depth)\n\n abs_err = abs(gt - pred)\n abs_err[gt == 0] = 0\n\n list_img = []\n \n for b in range(0, num_summary):\n \n img = []\n\n img.append(rgb[b, :, :, :])\n if self.args.dep_src in ['slam', 'sgbm']:\n img.append(norm_cm_transpose(dep[b, 0, :, :], cm, self.args.max_depth))\n else:\n img.append(norm_cm_transpose(dep0[b, 0, :, :], cm, self.args.max_depth))\n img.append(norm_cm_transpose(dep1[b, 0, :, :], cm, self.args.max_depth))\n\n img.append(norm_cm_transpose(gt[b, 0, :, :], cm, self.args.max_depth))\n img.append(norm_cm_transpose(pred[b, 0, :, :], cm, self.args.max_depth))\n if confidence is not None:\n img.append(norm_cm_transpose(confidence[b, 0, :, :], cm, np.max(confidence)))\n\n if seg is not None:\n img.append(norm_cm_transpose(seg[b, 0, :, :], None, np.max(seg)))\n\n if pred_rgb is not None:\n img.append(norm_cm_transpose(pred_rgb[b, 0, :, :], cm, self.args.max_depth))\n\n if confidence_rgb is not None:\n img.append(norm_cm_transpose(confidence_rgb[b, 0, :, :], cm, np.max(confidence_rgb)))\n\n img.append(norm_cm_transpose(abs_err[b, 0, :, :], cm, np.max(abs_err)))\n\n img = np.concatenate(img, axis=1)\n list_img.append(img)\n\n img_total = np.concatenate(list_img, axis=2)\n img_total = torch.from_numpy(img_total)\n self.add_image(self.mode + '/images', img_total, global_step)\n\n self.flush()\n\n # Reset\n self.loss = []\n self.metric = []\n\n def save(self, epoch, idx, sample, output):\n with torch.no_grad():\n if self.args.save_result_only:\n self.path_output = '{}/{}/epoch{:04d}'.format(self.log_dir,\n self.mode, epoch)\n os.makedirs(self.path_output, exist_ok=True)\n\n path_save_pred = '{}/{:010d}.png'.format(self.path_output, idx)\n\n pred = output['pred'].detach()\n pred = torch.clamp(pred, min=0)\n pred = pred[0, 0, :, :].data.cpu().numpy()\n pred = (pred*256.0).astype(np.uint16)\n pred = Image.fromarray(pred)\n pred.save(path_save_pred)\n else:\n # Parse data\n if self.args.model_name.lower() == 'nlspn':\n guidance = output['guidance'].data.cpu().numpy()\n offset = output['offset'].data.cpu().numpy()\n aff = output['aff'].data.cpu().numpy()\n gamma = output['gamma'].data.cpu().numpy()\n feat_init = output['pred_init']\n list_feat = output['pred_inter']\n\n feat_init = feat_init[0, 0, :, :].data.cpu().numpy()\n feat_init = feat_init / self.args.max_depth\n feat_init = (255.0*cm(feat_init)).astype('uint8')\n feat_init = Image.fromarray(feat_init[:, :, :3], 'RGB')\n\n for k in range(0, len(list_feat)):\n feat_inter = list_feat[k]\n feat_inter = feat_inter[0, 0, :, :].data.cpu().numpy()\n feat_inter = feat_inter / self.args.max_depth\n feat_inter = (255.0*cm(feat_inter)).astype('uint8')\n feat_inter = Image.fromarray(feat_inter[:, :, :3], 'RGB')\n\n list_feat[k] = feat_inter\n\n rgb = sample['rgb'].detach()\n rgb.mul_(self.img_std.type_as(rgb)).add_(self.img_mean.type_as(rgb))\n rgb = rgb[0, :, :, :].data.cpu().numpy()\n rgb = 255.0*np.transpose(rgb, (1, 2, 0))\n rgb = np.clip(rgb, 0, 256).astype('uint8')\n rgb = Image.fromarray(rgb, 'RGB')\n \n pred = output['pred'].detach()\n pred = torch.clamp(pred, min=0)\n pred = pred[0, 0, :, :].data.cpu().numpy()\n\n gt = sample['gt'].detach()\n gt = gt[0, 0, :, :].data.cpu().numpy()\n\n #calculate abs error\n abs_err = abs(gt - pred)\n abs_err[gt == 0] = 0\n abs_err = (255.0*cm(abs_err)).astype('uint8')\n abs_err = Image.fromarray(abs_err[:, :, :3], 'RGB')\n\n pred = pred / self.args.max_depth\n pred = (255.0*cm(pred)).astype('uint8')\n pred = Image.fromarray(pred[:, :, :3], 'RGB')\n\n gt = gt / self.args.max_depth\n gt = (255.0*cm(gt)).astype('uint8')\n gt = Image.fromarray(gt[:, :, :3], 'RGB')\n\n self.path_output = '{}/{}/epoch{:04d}/{:08d}'.format(\n self.log_dir, self.mode, epoch, idx)\n os.makedirs(self.path_output, exist_ok=True)\n\n if self.args.dep_src in ['slam', 'sgbm']:\n dep = sample['dep'].detach()\n dep = dep[0, 0, :, :].data.cpu().numpy()\n dep = dep / self.args.max_depth\n dep = (255.0*cm(dep)).astype('uint8')\n dep = Image.fromarray(dep[:, :, :3], 'RGB')\n path_save_dep = '{}/02_dep.png'.format(self.path_output)\n dep.save(path_save_dep)\n else:\n dep0 = sample['dep0'].detach()\n dep1 = sample['dep1'].detach()\n dep0 = dep0[0, 0, :, :].data.cpu().numpy()\n dep1 = dep1[0, 0, :, :].data.cpu().numpy()\n dep0 = dep0 / self.args.max_depth\n dep1 = dep1 / self.args.max_depth\n dep0 = (255.0*cm(dep0)).astype('uint8')\n dep1 = (255.0*cm(dep1)).astype('uint8')\n dep0 = Image.fromarray(dep0[:, :, :3], 'RGB')\n dep1 = Image.fromarray(dep1[:, :, :3], 'RGB')\n\n path_save_dep0 = '{}/02_dep0.png'.format(self.path_output)\n path_save_dep1 = '{}/02_dep1.png'.format(self.path_output)\n \n dep0.save(path_save_dep0)\n dep1.save(path_save_dep1)\n \n path_save_rgb = '{}/01_rgb.png'.format(self.path_output)\n path_save_init = '{}/03_pred_init.png'.format(self.path_output)\n path_save_pred = '{}/05_pred_final.png'.format(self.path_output)\n path_save_gt = '{}/06_gt.png'.format(self.path_output)\n path_save_abs_error = '{}/07_abs_error.png'.format(self.path_output)\n\n rgb.save(path_save_rgb)\n pred.save(path_save_pred)\n gt.save(path_save_gt)\n abs_err.save(path_save_abs_error)\n\n if self.args.model_name.lower() == 'nlspn':\n\n feat_init.save(path_save_init)\n\n for k in range(0, len(list_feat)):\n path_save_inter = '{}/04_pred_prop_{:02d}.png'.format(\n self.path_output, k)\n list_feat[k].save(path_save_inter)\n\n np.save('{}/guidance.npy'.format(self.path_output), guidance)\n np.save('{}/offset.npy'.format(self.path_output), offset)\n np.save('{}/aff.npy'.format(self.path_output), aff)\n np.save('{}/gamma.npy'.format(self.path_output), gamma)\n","sub_path":"src/summary/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":13339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239054998","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'shop'\n\nurlpatterns = [\n\n path('', views.home, name='home'),\n\n path('products/', views.ProductItemDetailView.as_view(), name='product_page'),\n path('products//', views.ProductsGroupView.as_view(), name='product_group_page'),\n path('products///', views.GroupProductTypeView.as_view(), name='product_type_page'),\n path(\n 'products////',\n views.GroupSubProductTypeView.as_view(), name='sub_product_type_page'\n ),\n\n path(\n 'liked_products',\n views.LikedProductsView.as_view(),\n name='liked_products'\n ),\n path(\n 'liked_products/',\n views.LikedProductTypeView.as_view(),\n name='liked_product_type'\n ),\n path(\n 'liked_products//',\n views.LikedSubProductsTypeView.as_view(),\n name='liked_sub_product_type'\n ),\n\n path(\n 'like/',\n views.LikeProduct.as_view(),\n name='like_product'\n ),\n path(\n 'remove_like/',\n views.RemoveLike.as_view(),\n name='remove_like'\n ),\n path(\n 'update_shopping_cart/',\n views.UpdateShoppingCart.as_view(),\n name='update_shopping_cart'\n ),\n path(\n 'update_product_description/',\n views.UpdateProductDescription.as_view(),\n name='update_product_description'\n ),\n\n path('brands/', views.BrandsListView.as_view(), name='brands_page'),\n path('brand//', views.BrandProductsView.as_view(), name='brand_page'),\n path('brand//', views.BrandProductTypeView.as_view(), name='brand_product_type_page'),\n path(\n 'brand///',\n views.BrandSubProductTypeView.as_view(), name='brand_sub_product_type_page'\n ),\n\n path('authentication/signup/', views.signup, name='signup'),\n path('authentication/login/', views.signin, name='login'),\n path('authentication/logout/', views.soho_logout, name='logout'),\n path('authentication/logged_in/', views.logged_in, name='logged_in'),\n\n]\n","sub_path":"soho/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187924838","text":"import json\nimport requests\nimport pandas as pd\n\ndef main():\n\n resp = requests.get(\"https://services3.arcgis.com/nIl76MjbPamkQiu8/arcgis/rest/services/date_wise_corona_with_positive_percentage/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=date%20asc&outSR=102100&resultOffset=0&resultRecordCount=32000&resultType=standard&cacheHint=true\")\n data = json.loads(resp.text)\n data = data[\"features\"]\n dates = [day[\"attributes\"][\"date\"] for day in data]\n tested = [day[\"attributes\"][\"tested\"] for day in data]\n \n df = pd.DataFrame({\n \"Date\": pd.to_datetime(dates, unit=\"ms\").date,\n \"Daily change in cumulative total\": tested\n }).dropna()\n df = df[df[\"Daily change in cumulative total\"] != 0]\n \n df.loc[:, \"Daily change in cumulative total\"] = df[\"Daily change in cumulative total\"].astype(int)\n df.loc[:, \"Country\"] = \"Bangladesh\"\n df.loc[:, \"Units\"] = \"tests performed\"\n df.loc[:, \"Testing type\"] = \"PCR only\"\n df.loc[:, \"Source label\"] = \"Government of Bangladesh\"\n df.loc[:, \"Source URL\"] = \"https://covid19bd.idare.io/\"\n df.loc[:, \"Notes\"] = pd.NA\n\n # Manual fix for error in data\n df.loc[\n (df[\"Date\"] == pd.to_datetime(\"2020-03-16\")) &\n (df[\"Daily change in cumulative total\"] == 39),\n \"Date\"\n ] = \"2020-03-17\"\n\n df.to_csv(\"automated_sheets/Bangladesh.csv\", index=False)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/scripts/testing/automations/batch/bangladesh.py","file_name":"bangladesh.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"642245988","text":"# config of AttentionRNN\ndef config_loading():\n\tword_padded_length_in_notes_dict = {'12h': 0, }\n\n\tconfig_cnn_rnn = {\n\t 'DATAPATH' : './data_unconcated',\n\t 'concat': True, # if the loaded data is splitted by sentences, concat = True, will concat all sentences into a note.\n\t 'to_sentences':True, # if you would like to keep only the sentence and patient level \n\t 'words_dim': 300, \n\t 'embed_mode': 'random', \n\t 'output_channel': 100, # number of feature maps for CNN \n\t 'dropout':0,\n\t 'target_class':2,\n\t 'note_gru_hidden': 200,\n\t 'bidirection_gru': True,\n\t 'batch_size': 16,\n\t 'learning_rate': 0.01,\n\t 'num_epochs':150,\n\t 'filter_width':8,\n\t 'cuda': True,\n\t 'attention': True,\n\t 'early_stop': 3,\n\t 'val_per_epoch': 5,\n\t 'data_portion': 1,\n\t 'optimizer': 'SGD', # Adam or SGD, the learning rate for Adam is default by 0.001\n\t 'padding_before_batch': True, \n\t 'padding_max': True, \n\t 'word_padded_length_in_notes': None, #None for default \n\t 'savepath': './model/24h_words_dim_300_output_cha_100_hidden_200_filter_width_8_batch_16_SGD_lr0.01_drop_0_attention/',\n\t 'time_name': '24h',\n\t 'split_points': [12*60, 24*60 ,48*60, 72*60, 96*60, 120*60, 240*60 ]\n\t}\n\n\tif config_cnn_rnn['word_padded_length_in_notes'] == None:\n\t config_cnn_rnn['word_padded_length_in_notes'] = word_padded_length_in_notes_dict[config['time_name']]\n\n\treturn config_cnn_rnn\n\n\n\n","sub_path":"model/testpart_correction/config_AttentionRNN.py","file_name":"config_AttentionRNN.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"271137532","text":"import logging\nimport mock\nimport pytest\n\nfrom twindb_backup import INTERVALS\nfrom twindb_backup.destination.ssh import Ssh\nfrom twindb_backup.source.mysql_source import MySQLConnectInfo\nfrom twindb_backup.source.remote_mysql_source import RemoteMySQLSource\n\n\n@mock.patch.object(RemoteMySQLSource, \"_save_cfg\")\n@mock.patch.object(RemoteMySQLSource, \"_get_root_my_cnf\")\ndef test__clone_config(mock_get_root, mock_save):\n mock_get_root.return_value = \"/etc/my.cnf\"\n dst = Ssh()\n rmt_sql = RemoteMySQLSource({\n \"run_type\": INTERVALS[0],\n \"full_backup\": INTERVALS[0],\n \"mysql_connect_info\": MySQLConnectInfo(\"/\"),\n \"ssh_connection_info\": None\n })\n rmt_sql.clone_config(dst)\n mock_get_root.assert_called_with()\n mock_save.assert_called_with(dst, \"/etc/my.cnf\")\n\n\ndef test___mem_available():\n mock_stdout = mock.Mock()\n mock_stdout.read.return_value = \"100500\"\n\n mock_client = mock.Mock()\n mock_client.execute.return_value = (None, mock_stdout, None)\n\n rmt_sql = RemoteMySQLSource({\n \"run_type\": INTERVALS[0],\n \"full_backup\": INTERVALS[0],\n \"mysql_connect_info\": MySQLConnectInfo(\"/\"),\n \"ssh_connection_info\": None\n })\n rmt_sql._ssh_client = mock_client\n assert rmt_sql._mem_available() == 100500 * 1024\n\n\ndef test__mem_available_raise_exception():\n mock_stdout = mock.Mock()\n mock_stdout.read.return_value = \"\"\n\n mock_client = mock.Mock()\n mock_client.execute.return_value = (None, mock_stdout, None)\n\n rmt_sql = RemoteMySQLSource({\n \"run_type\": INTERVALS[0],\n \"full_backup\": INTERVALS[0],\n \"mysql_connect_info\": MySQLConnectInfo(\"/\"),\n \"ssh_connection_info\": None\n })\n rmt_sql._ssh_client = mock_client\n with pytest.raises(OSError):\n rmt_sql._mem_available()\n\n","sub_path":"tests/unit/source/test_remote_mysql_source.py","file_name":"test_remote_mysql_source.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620306293","text":"#!/usr/bin/env python3\n\nimport argparse\nimport itertools\nimport json\nimport logging\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport glob\nfrom pathlib import Path\nfrom math import floor, ceil, log\nfrom fractions import Fraction\n\nUPLOAD_KEY_REQUEST_ENDPOINT = 'https://api.gfycat.com/v1/gfycats?'\nFILE_UPLOAD_ENDPOINT = 'https://filedrop.gfycat.com'\nAUTHENTICATION_ENDPOINT = 'https://api.gfycat.com/v1/oauth/token'\n\n__version__ = '3.6.5'\n\nsettings = {}\n\noutPaths = []\nfileNames = []\nlinks = []\nmarkdown = ''\n\nffmpegPath = 'ffmpeg'\nffprobePath = 'ffprobe'\nffplayPath = 'ffplay'\nwebmsPath = './webms'\nlogger = None\n\nif getattr(sys, 'frozen', False):\n ffmpegPath = './bin/ffmpeg'\n ffprobePath = './bin/ffprobe'\n ffplayPath = './bin/ffplay'\n if sys.platform == 'win32':\n ffmpegPath += '.exe'\n ffprobePath += '.exe'\n ffplayPath += '.exe'\n if sys.platform == 'darwin':\n os.environ['SSL_CERT_FILE'] = \"certifi/cacert.pem\"\n\n\ndef main():\n global settings, webmsPath\n args, unknown = buildArgParser()\n if args.cropMultiple != 1:\n args.cropMultipleX = args.cropMultiple\n args.cropMultipleY = args.cropMultiple\n\n args = vars(args)\n\n args = {k: v for k, v in args.items() if v is not None}\n\n args[\"videoStabilization\"] = getVidstabPreset(\n args[\"videoStabilization\"], args[\"videoStabilizationDynamicZoom\"])\n args[\"denoise\"] = getDenoisePreset(args[\"denoise\"])\n settings = {'markerPairMergeList': '', 'rotate': 0,\n 'overlayPath': '', 'delay': 0, 'color_space': None, **args}\n\n settings[\"isDashVideo\"] = False\n settings[\"isDashAudio\"] = False\n if \"enableSpeedMaps\" not in settings:\n settings[\"enableSpeedMaps\"] = not settings[\"noSpeedMaps\"]\n\n with open(settings[\"json\"], 'r', encoding='utf-8-sig') as file:\n markersJson = file.read()\n settings = loadMarkers(markersJson, settings)\n settings[\"videoTitle\"] = re.sub('\"', '', settings[\"videoTitle\"])\n settings[\"markersDataFileStem\"] = Path(settings[\"json\"]).stem\n settings[\"titleSuffix\"] = settings[\"markersDataFileStem\"]\n webmsPath += f'/{settings[\"titleSuffix\"]}'\n\n os.makedirs(f'{webmsPath}', exist_ok=True)\n setUpLogger()\n\n logger.info(f'Version: {__version__}')\n logger.info('-' * 80)\n\n settings[\"downloadVideoNameStem\"] = f'{settings[\"titleSuffix\"]}-full'\n settings[\"downloadVideoPath\"] = f'{webmsPath}/{settings[\"downloadVideoNameStem\"]}'\n pivpat = r'^' + re.escape(settings[\"downloadVideoNameStem\"]) + r'\\.[^.]+$'\n potentialInputVideos = [\n f'{webmsPath}/{iv}' for iv in os.listdir(webmsPath) if re.search(pivpat, iv)]\n\n settings[\"automaticFetching\"] = not settings[\"inputVideo\"] and not settings[\"downloadVideo\"]\n\n if settings[\"automaticFetching\"] and not settings[\"preview\"] and not settings[\"noAutoFindInputVideo\"]:\n if len(potentialInputVideos) > 0:\n logger.info(\n f'Found potential input video at path {potentialInputVideos[0]}.')\n if len(potentialInputVideos) > 1:\n logger.warning(\n f'Also found the following other potential input videos {potentialInputVideos[1:]}.')\n settings[\"inputVideo\"] = potentialInputVideos[0]\n\n if settings[\"automaticFetching\"] and settings[\"preview\"]:\n logger.warning(\n \"Preview mode was enabled without providing a local input video and video downloading disabled.\")\n logger.warning(\n \"Automatic fetching of video stream chunks provides a poor preview experience.\")\n logger.warning(\n \"Automatically fetched video previews can only loop up to 32767 frames (~9 min at 60fps).\")\n logger.warning(\n \"When previewing, a local video file uses less memory and does not require re-streaming from the internet on seek with right-click.\")\n logger.warning(\n \"A local video also enables toggling of video correction filters with W.\")\n if not settings[\"noAutoFindInputVideo\"]:\n if len(potentialInputVideos) > 0:\n logger.info(\n f'Found potential input video at path {potentialInputVideos[0]}.')\n useFoundInputVideo = input(\n r'Would you like to use this input video? (y/n): ')\n if useFoundInputVideo == 'yes' or useFoundInputVideo == 'y':\n settings[\"inputVideo\"] = potentialInputVideos[0]\n\n if not settings[\"inputVideo\"]:\n try:\n logger.info(\n \"You may be able to drag and drop the input video file at the following prompt.\")\n settings[\"inputVideo\"] = input(\n f'Specify an input video path OR press ENTER to continue without doing so: ')\n if settings[\"inputVideo\"] == '':\n logger.info(\n f'The video can also be downloaded before previewing to the path: \"{settings[\"downloadVideoPath\"]}\"')\n logger.info(\n \"Note the file extension will be automatically determined.\")\n logger.info(\n \"If the file already exists it will be used as is without re-downloading.\")\n downloadVideo = input(\n f'Would you like to automatically download the video? (y/n): ')\n if downloadVideo == 'yes' or downloadVideo == 'y':\n settings[\"downloadVideo\"] = True\n except:\n pass\n\n if settings[\"inputVideo\"]:\n if not Path(settings[\"inputVideo\"]).is_file():\n logger.error(\n f'Input video file \"{settings[\"inputVideo\"]}\" does not exist or is not a file.')\n logger.error(f'Exiting...')\n sys.exit(1)\n else:\n logger.info(\n f'Automatically using found input video file \"{settings[\"inputVideo\"]}\".')\n\n settings = getVideoInfo(settings, {})\n else:\n settings = prepareGlobalSettings(settings)\n\n if not settings[\"preview\"]:\n for markerPairIndex, marker in enumerate(settings[\"markerPairs\"]):\n settings[\"markerPairs\"][markerPairIndex] = makeMarkerPairClip(\n settings, markerPairIndex)\n if settings[\"markerPairMergeList\"] != '':\n makeMergedClips(settings)\n else:\n while True:\n try:\n inputStr = input(\n f'Enter a valid marker pair number (between {1} and {len(settings[\"markerPairs\"])}) or quit(q): ')\n if inputStr == 'quit' or inputStr == 'q':\n break\n markerPairIndex = int(inputStr)\n markerPairIndex -= 1\n except ValueError:\n logger.error(f'{inputStr} is not a valid number.')\n continue\n if 0 <= markerPairIndex < len(settings[\"markerPairs\"]):\n makeMarkerPairClip(settings, markerPairIndex)\n else:\n logger.error(\n f'{markerPairIndex + 1} is not a valid marker pair number.')\n continue\n\n\ndef setUpLogger():\n global logger\n loggerHandlers = [logging.StreamHandler()]\n if not settings[\"preview\"]:\n loggerHandlers.append(logging.FileHandler(\n filename=f'{webmsPath}/{settings[\"titleSuffix\"]}.log', mode='a', encoding='utf-8'))\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt=\"%y-%m-%d %H:%M:%S\",\n handlers=loggerHandlers)\n logger = logging.getLogger()\n\n\ndef buildArgParser():\n parser = argparse.ArgumentParser(\n description='Generate trimmed webms from input video.')\n parser.add_argument('--input-video', '-i', dest='inputVideo', default='',\n help='Input video path.')\n parser.add_argument('--download-video', '-dv', action='store_true', dest='downloadVideo',\n help='Download video from the internet and use as input video for processing marker data.')\n parser.add_argument('--markers-json', '-j', required=True, dest='json',\n help=('Specify markers json path for generating webms from input video.' +\n 'Automatically streams required portions of input video from the internet if it is not otherwise specified.'))\n parser.add_argument('--overlay', '-o', dest='overlay',\n help='overlay image path')\n parser.add_argument('--multiply-crop', '-mc', type=float, dest='cropMultiple', default=1,\n help=('Multiply all crop dimensions by an integer. ' +\n '(Helpful if you change resolutions: eg 1920x1080 * 2 = 3840x2160(4k)).'))\n parser.add_argument('--multiply-crop-x', '-mcx', type=float, dest='cropMultipleX', default=1,\n help='Multiply all x crop dimensions by an integer.')\n parser.add_argument('--multiply-crop-y', '-mcy', type=float, dest='cropMultipleY', default=1,\n help='Multiply all y crop dimensions by an integer.')\n parser.add_argument('--gfycat', '-gc', action='store_true',\n help='upload all output webms to gfycat and print reddit markdown with all links')\n parser.add_argument('--audio', '-a', action='store_true',\n help='Enable audio in output webms.')\n parser.add_argument('--format', '-f', default='bestvideo+(bestaudio[acodec=opus]/bestaudio[acodec=vorbis]/bestaudio)',\n help='Specify format string passed to youtube-dl.')\n parser.add_argument('--extra-video-filters', '-evf', dest='extraVideoFilters', default='',\n help='Specify any extra video filters to be passed to ffmpeg.')\n parser.add_argument('--delay', '-d', type=float, dest='delay', default=0,\n help='Add a fixed delay to both the start and end time of each marker. Can be negative.')\n parser.add_argument('--gamma', '-ga', type=float, dest='gamma', default=1,\n help='Apply luminance gamma correction. Pass in a value between 0 and 1 to brighten shadows and reveal darker details.')\n parser.add_argument('--rotate', '-r', choices=['clock', 'cclock'],\n help='Rotate video 90 degrees clockwise or counter-clockwise.')\n parser.add_argument('--denoise', '-dn', type=int, default=0, choices=range(0, 6),\n help='Apply the hqdn3d denoise filter using a preset strength level from 0-5 where 0 is disabled and 5 is very strong.')\n parser.add_argument('--video-stabilization', '-vs', dest='videoStabilization', type=int, default=0, choices=range(0, 7),\n help='Apply video stabilization using a preset strength level from 0-6 where 0 is disabled and 6 is strongest.')\n parser.add_argument('--video-stabilization-dynamic-zoom', '-vsdz', dest='videoStabilizationDynamicZoom', type=bool, default=False,\n help='Enable video stabilization dynamic zoom. Unlike a static zoom the zoom in can vary with time to reduce cropping of video.')\n parser.add_argument('--deinterlace', '-di', action='store_true',\n help='Apply bwdif deinterlacing.')\n parser.add_argument('--expand-color-range', '-ecr', dest='expandColorRange', action='store_true',\n help='Expand the output video color range to full (0-255).')\n parser.add_argument('--loop', '-l', dest='loop', choices=['none', 'fwrev', 'fade'], default='none',\n help='Apply special looping effect to marker pair clips. For a forward-reverse or ping-pong loop use fwrev. For a cross-fading loop use fade.')\n parser.add_argument('--fade-duration', '-fd', type=float, dest='fadeDuration', default=0.5,\n help=('When cross-fading loop is enabled set the duration over which to cross-fade the end and start of the clip. '\n + 'The fade duration is clamped to a minimum of 0.1 seconds and a maximum of 40%% of the output clip duration.'))\n parser.add_argument('--encode-speed', '-s', type=int, dest='encodeSpeed', choices=range(0, 6),\n help='Set the vp9 encoding speed.')\n parser.add_argument('--crf', type=int,\n help=('Set constant rate factor (crf). Default is 30 for video file input.' +\n 'Automatically set to a factor of the detected video bitrate'))\n parser.add_argument('--two-pass', '-tp', dest='twoPass', action='store_true',\n help='Enable two-pass encoding. Improves quality at the cost of encoding speed.')\n parser.add_argument('--target-max-bitrate', '-b', dest='targetMaxBitrate', type=int,\n help=('Set target max bitrate in kilobits/s. Constrains bitrate of complex scenes.' +\n 'Automatically set based on detected video bitrate.'))\n parser.add_argument('--no-auto-scale-crop-res', '-nascr', dest='noAutoScaleCropRes', action='store_true',\n help=('Disable automatically scaling the crop resolution when a mismatch with video resolution is detected.'))\n parser.add_argument('--preview', '-p', action='store_true',\n help=('Pass in semicolon separated lists of marker pairs.'\n + 'Lists of marker pairs are comma-separated numbers or dash separated ranges. (eg 1-3,7;4-6,9)'))\n parser.add_argument('--no-auto-find-input-video', '-nafiv', dest='noAutoFindInputVideo', action='store_true',\n help='Disable automatic detection and usage of input video when not in preview mode.')\n parser.add_argument('--no-speed-maps', '-nsm', dest='noSpeedMaps', action='store_true',\n help='Disable speed maps for time-variable speed.')\n return parser.parse_known_args()\n\n\ndef loadMarkers(markersJson, settings):\n markersDict = json.loads(markersJson)\n settings = {**settings, **markersDict}\n if \"markers\" in settings and not \"markerPairs\" in settings:\n settings[\"markerPairs\"] = settings[\"markers\"]\n settings[\"videoURL\"] = 'https://www.youtube.com/watch?v=' + \\\n settings[\"videoID\"]\n\n return settings\n\n\ndef getVideoURL(settings):\n from youtube_dl import YoutubeDL\n\n ydl_opts = {'format': settings[\"format\"], 'forceurl': True,\n 'ffmpeg_location': ffmpegPath, 'merge_output_format': 'mkv',\n 'outtmpl': f'{settings[\"downloadVideoPath\"]}.%(ext)s', \"cachedir\": False}\n ydl = YoutubeDL(ydl_opts)\n if settings[\"downloadVideo\"]:\n ydl_info = ydl.extract_info(settings[\"videoURL\"], download=True)\n settings[\"downloadVideoPath\"] = f'{settings[\"downloadVideoPath\"]}.mkv'\n else:\n ydl_info = ydl.extract_info(settings[\"videoURL\"], download=False)\n\n if 'requested_formats' in ydl_info:\n rf = ydl_info[\"requested_formats\"]\n videoInfo = rf[0]\n else:\n videoInfo = ydl_info\n\n dashFormatIDs = []\n dashVideoFormatID = None\n dashAudioFormatID = None\n\n if settings[\"downloadVideo\"]:\n settings[\"inputVideo\"] = settings[\"downloadVideoPath\"]\n else:\n if videoInfo[\"protocol\"] == 'http_dash_segments':\n settings[\"isDashVideo\"] = True\n dashVideoFormatID = videoInfo[\"format_id\"]\n dashFormatIDs.append(dashVideoFormatID)\n else:\n settings[\"videoURL\"] = videoInfo[\"url\"]\n\n if 'requested_formats' in ydl_info:\n audioInfo = rf[1]\n settings[\"audiobr\"] = int(audioInfo[\"tbr\"])\n\n if audioInfo[\"protocol\"] == 'http_dash_segments':\n settings[\"isDashAudio\"] = True\n dashAudioFormatID = audioInfo[\"format_id\"]\n dashFormatIDs.append(dashAudioFormatID)\n else:\n settings[\"audioURL\"] = audioInfo[\"url\"]\n\n if dashFormatIDs:\n filteredDashPath = filterDash(videoInfo[\"url\"], dashFormatIDs)\n if settings[\"isDashVideo\"]:\n settings[\"videoURL\"] = filteredDashPath\n if settings[\"isDashAudio\"]:\n settings[\"audioURL\"] = filteredDashPath\n\n return getVideoInfo(settings, videoInfo)\n\n\ndef getVideoInfo(settings, videoInfo):\n if settings[\"inputVideo\"]:\n probedSettings = ffprobeVideoProperties(settings[\"inputVideo\"])\n else:\n probedSettings = ffprobeVideoProperties(settings[\"videoURL\"])\n\n if probedSettings is not None:\n settings = {**settings, **videoInfo, **probedSettings}\n else:\n if not videoInfo:\n logger.error(\n \"Could not fetch local input video info with ffprobe.\")\n settings = {**settings, **videoInfo}\n\n if settings[\"isDashVideo\"] or not \"bit_rate\" in settings:\n settings[\"bit_rate\"] = int(videoInfo[\"tbr\"])\n\n if not \"r_frame_rate\" in settings:\n settings[\"r_frame_rate\"] = videoInfo[\"fps\"]\n\n logger.info(f'Video Title: {settings[\"videoTitle\"]}')\n logger.info(f'Video Width: {settings[\"width\"]}')\n logger.info(f'Video Height: {settings[\"height\"]}')\n logger.info(f'Video fps: {settings[\"r_frame_rate\"]}')\n logger.info(f'Detected Video Bitrate: {settings[\"bit_rate\"]}kbps')\n\n settings = autoSetCropMultiples(settings)\n\n return settings\n\n\ndef prepareGlobalSettings(settings):\n logger.info(f'Video URL: {settings[\"videoURL\"]}')\n logger.info(\n f'Merge List: {settings[\"markerPairMergeList\"] if settings[\"markerPairMergeList\"] else \"None\"}')\n\n settings = getVideoURL(settings)\n encodeSettings = getDefaultEncodeSettings(settings[\"bit_rate\"])\n\n logger.info('-' * 80)\n unknownColorSpaceMsg = \"unknown (bt709 will be assumed for color range operations)\"\n logger.info((f'Automatically determined encoding settings: CRF: {encodeSettings[\"crf\"]} (0-63), ' +\n f'Auto Target Max Bitrate: {encodeSettings[\"autoTargetMaxBitrate\"]}kbps, ' +\n f'Detected Color Space: {settings[\"color_space\"] if settings[\"color_space\"] else unknownColorSpaceMsg}, ' +\n f'Two-pass Encoding Enabled: {encodeSettings[\"twoPass\"]}, ' +\n f'Encoding Speed: {encodeSettings[\"encodeSpeed\"]} (0-5)'))\n\n settings = {**encodeSettings, **settings}\n\n logger.info('-' * 80)\n logger.info((f'Global Encoding Settings: CRF: {settings[\"crf\"]} (0-63), ' +\n f'Detected Bitrate: {settings[\"bit_rate\"]}kbps, ' +\n f'Global Target Max Bitrate: {str(settings[\"targetMaxBitrate\"]) + \"kbps\" if \"targetMaxBitrate\" in settings else \"None\"}, ' +\n f'Two-pass Encoding Enabled: {settings[\"twoPass\"]}, Encoding Speed: {settings[\"encodeSpeed\"]} (0-5), ' +\n f'Audio Enabled: {settings[\"audio\"]}, Denoise: {settings[\"denoise\"][\"desc\"]}, Rotate: {settings[\"rotate\"]}, ' +\n f'Expand Color Range Enabled: {settings[\"expandColorRange\"]}, ' +\n f'Speed Maps Enabled: {settings[\"enableSpeedMaps\"]}, ' +\n f'Special Looping: {settings[\"loop\"]}, ' +\n (f'Fade Duration: {settings[\"fadeDuration\"]}, ' if settings[\"loop\"] == 'fade' else '') +\n f'Video Stabilization: {settings[\"videoStabilization\"][\"desc\"]}, ' +\n f'Video Stabilization Dynamic Zoom: {settings[\"videoStabilizationDynamicZoom\"]}'))\n return settings\n\n\ndef getMarkerPairSettings(settings, markerPairIndex):\n mp = markerPair = {**(settings[\"markerPairs\"][markerPairIndex])}\n\n cropString = mp[\"crop\"]\n crops = mp[\"cropComponents\"] = cropString.split(':')\n crops[0] = settings[\"cropMultipleX\"] * int(crops[0])\n if crops[2] != 'iw':\n crops[2] = settings[\"cropMultipleX\"] * int(crops[2])\n else:\n crops[2] = settings[\"width\"]\n crops[1] = settings[\"cropMultipleY\"] * int(crops[1])\n if crops[3] != 'ih':\n crops[3] = settings[\"cropMultipleY\"] * int(crops[3])\n else:\n crops[3] = settings[\"height\"]\n\n bitrateCropFactor = (crops[2] * crops[3]) / \\\n (settings[\"width\"] * settings[\"height\"])\n markerPairEncodeSettings = getDefaultEncodeSettings(\n settings[\"bit_rate\"] * bitrateCropFactor)\n settings = {**markerPairEncodeSettings, **settings}\n\n if \"targetMaxBitrate\" in settings:\n settings[\"autoTargetMaxBitrate\"] = getDefaultEncodeSettings(\n settings[\"targetMaxBitrate\"] * bitrateCropFactor)[\"autoTargetMaxBitrate\"]\n else:\n settings[\"autoTargetMaxBitrate\"] = markerPairEncodeSettings[\"autoTargetMaxBitrate\"]\n\n mps = markerPairSettings = {**settings, **(mp[\"overrides\"])}\n\n mp[\"exists\"] = False\n if not mps[\"preview\"]:\n if \"titlePrefix\" in mps:\n mps[\"titlePrefix\"] = cleanFileName(mps[\"titlePrefix\"])\n mp[\"fileNameStem\"] = f'{mps[\"titlePrefix\"] + \"-\" if \"titlePrefix\" in mps else \"\"}{mps[\"titleSuffix\"]}-{markerPairIndex + 1}'\n mp[\"fileName\"] = f'{mp[\"fileNameStem\"]}.webm'\n mp[\"filePath\"] = f'{webmsPath}/{mp[\"fileName\"]}'\n if checkWebmExists(mp[\"fileName\"], mp[\"filePath\"]):\n mp[\"exists\"] = True\n return (markerPair, markerPairSettings)\n\n mp[\"start\"] = mp[\"start\"] + mps[\"delay\"]\n mp[\"end\"] = mp[\"end\"] + mps[\"delay\"]\n mp[\"duration\"] = mp[\"end\"] - mp[\"start\"]\n\n mp[\"isVariableSpeed\"] = False\n if mps[\"enableSpeedMaps\"] and \"speedMap\" in mp:\n for left, right in zip(mp[\"speedMap\"][:-1], mp[\"speedMap\"][1:]):\n if left[\"y\"] != right[\"y\"]:\n mp[\"isVariableSpeed\"] = True\n break\n else:\n mp[\"speedMap\"] = [{\"x\": mp[\"start\"], \"y\":mp[\"speed\"]}, {\n \"x\": mp[\"end\"], \"y\":mp[\"speed\"]}]\n\n if mps[\"loop\"] == 'fwrev':\n mp[\"isVariableSpeed\"] = False\n\n mp[\"speedFilter\"], mp[\"outputDuration\"] = getSpeedFilterAndDuration(\n mp[\"speedMap\"], mps, mps[\"r_frame_rate\"])\n\n titlePrefixLogMsg = f'Title Prefix: {mps[\"titlePrefix\"] if \"titlePrefix\" in mps else \"\"}'\n logger.info('-' * 80)\n logger.info((f'Marker Pair {markerPairIndex + 1} Settings: {titlePrefixLogMsg}, ' +\n f'CRF: {mps[\"crf\"]} (0-63), Bitrate Crop Factor: {bitrateCropFactor}, ' +\n f'Crop Adjusted Target Max Bitrate: {mps[\"autoTargetMaxBitrate\"]}kbps, ' +\n f'Two-pass Encoding Enabled: {mps[\"twoPass\"]}, Encoding Speed: {mps[\"encodeSpeed\"]} (0-5), ' +\n f'Expand Color Range Enabled: {mps[\"expandColorRange\"]}, ' +\n f'Audio Enabled: {mps[\"audio\"]}, Denoise: {mps[\"denoise\"][\"desc\"]}, ' +\n f'Marker Pair {markerPairIndex + 1} is of variable speed: {mp[\"isVariableSpeed\"]}, ' +\n f'Speed Maps Enabled: {mps[\"enableSpeedMaps\"]}, ' +\n f'Special Looping: {mps[\"loop\"]}, ' +\n (f'Fade Duration: {mps[\"fadeDuration\"]}s' if mps[\"loop\"] == 'fade' else '') +\n f'Final Output Duration: {mp[\"outputDuration\"]}, ' +\n f'Video Stabilization: {mps[\"videoStabilization\"][\"desc\"]}, ' +\n f'Video Stabilization Dynamic Zoom: {mps[\"videoStabilizationDynamicZoom\"]}'))\n logger.info('-' * 80)\n\n return (markerPair, markerPairSettings)\n\n\ndef makeMarkerPairClip(settings, markerPairIndex):\n mp, mps = getMarkerPairSettings(settings, markerPairIndex)\n\n if mp[\"exists\"]:\n return {**(settings[\"markerPairs\"][markerPairIndex]), **mp}\n\n inputs = ''\n audio_filter = ''\n video_filter = ''\n\n if mp[\"isVariableSpeed\"] or mps[\"loop\"] != 'none':\n mps[\"audio\"] = False\n\n reconnectFlags = r'-reconnect 1 -reconnect_at_eof 1 -reconnect_streamed 1 -reconnect_delay_max 5'\n if mps[\"audio\"]:\n # ffplay previewing does not support multiple inputs\n # if an input video is provided, a dash xml is used, or previewing is on, there is only one input\n if not mps[\"inputVideo\"] and not settings[\"isDashAudio\"] and not settings[\"preview\"]:\n inputs += reconnectFlags\n inputs += f' -ss {mp[\"start\"]} -i \"{mps[\"audioURL\"]}\" '\n\n # preview mode does not start each clip at time 0 unlike encoding mode\n if settings[\"preview\"] and (settings[\"inputVideo\"] or settings[\"isDashAudio\"]):\n audio_filter += f'atrim={mp[\"start\"]}:{mp[\"end\"]},atempo={mp[\"speed\"]}'\n # encoding mode starts each clip at time 0\n elif not settings[\"preview\"]:\n audio_filter += f'atrim=0:{mp[\"duration\"]},atempo={mp[\"speed\"]}'\n # when streaming the required chunks from the internet the video and audio inputs are separate\n else:\n mps[\"audio\"] = False\n logger.warning(\n 'Audio disabled when previewing without an input video over non-dash protocol.')\n\n if not mps[\"inputVideo\"] and not settings[\"isDashVideo\"]:\n inputs += reconnectFlags\n\n if mps[\"inputVideo\"]:\n inputs += f' -ss {mp[\"start\"]} -i \"{mps[\"inputVideo\"]}\" '\n else:\n inputs += f' -ss {mp[\"start\"]} -i \"{mps[\"videoURL\"]}\" '\n\n ffmpegCommand = ' '.join((\n ffmpegPath,\n f'-hide_banner',\n inputs,\n f'-benchmark',\n f'-c:v libvpx-vp9 -pix_fmt yuv420p',\n f'-c:a libopus -b:a 128k',\n f'-slices 8 -row-mt 1 -tile-columns 6 -tile-rows 2',\n f'-crf {mps[\"crf\"]} -b:v {mps[\"autoTargetMaxBitrate\"]}k',\n f'-metadata title=\"{mps[\"videoTitle\"]}\"',\n f'-r ({mps[\"r_frame_rate\"]}*{mp[\"speed\"]})' if not mp[\"isVariableSpeed\"] and mp[\"speed\"] > 1 else '',\n f'-af {audio_filter}' if mps[\"audio\"] else '-an',\n f'-f webm ',\n ))\n\n if not mps[\"preview\"]:\n video_filter += f'trim=0:{mp[\"duration\"]}'\n else:\n video_filter += f'trim={mp[\"start\"]}:{mp[\"end\"]}'\n\n if mps[\"preview\"] and not settings[\"inputVideo\"]:\n video_filter += f',loop=loop=-1:size=(32767)'\n\n crops = mp[\"cropComponents\"]\n video_filter += f',crop=x={crops[0]}:y={crops[1]}:w={crops[2]}:h={crops[3]}'\n if mps[\"preview\"]:\n video_filter += f',scale=w=iw/2:h=ih/2'\n crops[2] /= 2\n crops[3] /= 2\n\n if mps[\"rotate\"]:\n video_filter += f',transpose={mps[\"rotate\"]}'\n crops[2], crops[3] = crops[3], crops[2]\n\n if mps[\"preview\"]:\n video_filter_before_correction = video_filter\n\n if 0 <= mps[\"gamma\"] <= 4 and mps[\"gamma\"] != 1:\n video_filter += f',lutyuv=y=gammaval({mps[\"gamma\"]})'\n if mps[\"extraVideoFilters\"]:\n video_filter += f',{mps[\"extraVideoFilters\"]}'\n if mps[\"deinterlace\"]:\n video_filter += f',bwdif'\n if mps[\"expandColorRange\"]:\n video_filter += f',colorspace=all={settings[\"color_space\"] if settings[\"color_space\"] else \"bt709\"}:range=pc'\n if mps[\"denoise\"][\"enabled\"]:\n video_filter += f',hqdn3d=luma_spatial={mps[\"denoise\"][\"lumaSpatial\"]}'\n # if mps[\"overlayPath\"]:\n # video_filter += f'[1:v]overlay=x=W-w-10:y=10:alpha=0.5'\n # inputs += f'-i \"{mps[\"overlayPath\"]}\"'\n\n if mps[\"loop\"] != 'fwrev':\n video_filter += f',{mp[\"speedFilter\"]}'\n if mps[\"loop\"] == 'fwrev':\n reverseSpeedMap = [{\"x\": speedPoint[\"x\"], \"y\":speedPointRev[\"y\"]}\n for speedPoint, speedPointRev in zip(mp[\"speedMap\"], reversed(mp[\"speedMap\"]))]\n reverseSpeedFilter, _ = getSpeedFilterAndDuration(\n reverseSpeedMap, mps, mps[\"r_frame_rate\"])\n loop_filter = ''\n loop_filter += f',split=2[f1][f2];'\n loop_filter += f'[f1]{mp[\"speedFilter\"]}[f];'\n loop_filter += f'''[f2]{reverseSpeedFilter},select='gt(n,0)',reverse,select='gt(n,0)',setpts=(PTS-STARTPTS)[r];'''\n loop_filter += f'[f][r]concat=n=2'\n if mps[\"loop\"] == 'fade':\n dur = mp[\"outputDuration\"]\n fadeDur = mps[\"fadeDuration\"] = max(\n 0.1, min(mps[\"fadeDuration\"], 0.4*mp[\"outputDuration\"]))\n\n easeA = f'1'\n easeB = f'0'\n easeP = f'(T/{fadeDur})'\n alphaEase = getEasingExpression('easeInOutCubic', easeA, easeB, easeP)\n\n loop_filter = ''\n loop_filter += f''',split=3[1][2][3];'''\n loop_filter += f'''[1]select='lte(t,{fadeDur})',setpts=(PTS-STARTPTS)[fi];'''\n loop_filter += f'''[2]select='gt(t,{fadeDur})*lt(t,{dur}-{fadeDur})',setpts=(PTS-STARTPTS)[m];'''\n loop_filter += f'''[3]select='gte(t,{dur}-{fadeDur})',setpts=(PTS-STARTPTS)[3b];'''\n loop_filter += f'''[3b]format=yuva420p,geq=lum='p(X,Y)':a='{alphaEase}*alpha(X,Y)'[fo];'''\n loop_filter += f'''[fi][fo]overlay=eof_action=pass,setpts=(PTS-STARTPTS)[cf];'''\n loop_filter += f'''[m][cf]concat=n=2'''\n\n if mps[\"preview\"]:\n return runffplayCommand(inputs, video_filter, video_filter_before_correction, audio_filter, markerPairIndex, mp, mps)\n\n vidstabEnabled = mps[\"videoStabilization\"][\"enabled\"]\n if vidstabEnabled:\n vidstab = mps[\"videoStabilization\"]\n shakyPath = f'{webmsPath}/shaky'\n os.makedirs(shakyPath, exist_ok=True)\n transformPath = f'{shakyPath}/{mp[\"fileNameStem\"]}.trf'\n shakyWebmPath = f'{shakyPath}/{mp[\"fileNameStem\"]}-shaky.webm'\n video_filter += '[shaky];[shaky]'\n vidstabdetectFilter = video_filter + \\\n f'''vidstabdetect=result='{transformPath}':shakiness={vidstab[\"shakiness\"]}'''\n\n vidstabtransformFilter = video_filter + \\\n f'''vidstabtransform=input='{transformPath}':smoothing={vidstab[\"smoothing\"]}'''\n if mps[\"videoStabilizationDynamicZoom\"]:\n vidstabtransformFilter += f':optzoom=2:zoomspeed={vidstab[\"zoomspeed\"]}'\n vidstabtransformFilter += r',unsharp=5:5:0.8:3:3:0.4'\n\n if mps[\"loop\"] != 'none':\n vidstabdetectFilter += loop_filter\n vidstabtransformFilter += loop_filter\n\n ffmpegVidstabdetect = ffmpegCommand + f'-vf \"{vidstabdetectFilter}\" '\n ffmpegVidstabdetect += f' -y '\n ffmpegVidstabtransform = ffmpegCommand + \\\n f'-vf \"{vidstabtransformFilter}\" '\n ffmpegVidstabtransform += f' -n '\n else:\n ffmpegCommand += f' -n '\n\n ffmpegCommands = []\n if mps[\"twoPass\"] and not vidstabEnabled:\n if mps[\"loop\"] != 'none':\n video_filter += loop_filter\n ffmpegCommand += f' -vf \"{video_filter}\" '\n ffmpegPass1 = ffmpegCommand + ' -pass 1 -'\n ffmpegPass2 = ffmpegCommand + \\\n f' -speed {mps[\"encodeSpeed\"]} -pass 2 \"{mp[\"filePath\"]}\"'\n\n ffmpegCommands = [ffmpegPass1, ffmpegPass2]\n elif vidstabEnabled:\n if mps[\"twoPass\"]:\n ffmpegVidstabdetect += f' -pass 1'\n else:\n ffmpegVidstabdetect += f' -speed 5'\n ffmpegVidstabdetect += f' \"{shakyWebmPath}\"'\n\n if mps[\"twoPass\"]:\n ffmpegVidstabtransform += f' -pass 2'\n ffmpegVidstabtransform += f' -speed {mps[\"encodeSpeed\"]} \"{mp[\"filePath\"]}\"'\n\n ffmpegCommands = [ffmpegVidstabdetect, ffmpegVidstabtransform]\n else:\n if mps[\"loop\"] != 'none':\n video_filter += loop_filter\n ffmpegCommand += f' -vf \"{video_filter}\" '\n ffmpegCommand += f' -speed {mps[\"encodeSpeed\"]} \"{mp[\"filePath\"]}\"'\n\n ffmpegCommands = [ffmpegCommand]\n\n if not (1 <= len(ffmpegCommands) <= 2):\n logger.error(f'ffmpeg command could not be built.\\n')\n logger.error(f'Failed to generate: \"{mp[\"fileName\"]}\"\\n')\n return {**(settings[\"markerPairs\"][markerPairIndex])}\n\n return runffmpegCommand(ffmpegCommands, markerPairIndex, mp)\n\n\ndef runffmpegCommand(ffmpegCommands, markerPairIndex, mp):\n ffmpegPass1 = ffmpegCommands[0]\n if len(ffmpegCommands) == 2:\n logger.info('Running first pass...')\n\n logger.info('Using ffmpeg command: ' +\n re.sub(r'(&a?itags?.*?\")', r'\"', ffmpegPass1) + '\\n')\n ffmpegProcess = subprocess.run(shlex.split(ffmpegPass1))\n\n if len(ffmpegCommands) == 2:\n ffmpegPass2 = ffmpegCommands[1]\n\n logger.info('Running second pass...')\n logger.info('Using ffmpeg command: ' +\n re.sub(r'(&a?itags?.*?\")', r'\"', ffmpegPass2) + '\\n')\n ffmpegProcess = subprocess.run(shlex.split(ffmpegPass2))\n\n if ffmpegProcess.returncode == 0:\n logger.info(f'Successfuly generated: \"{mp[\"fileName\"]}\"\\n')\n return {**(settings[\"markerPairs\"][markerPairIndex]), **mp}\n else:\n logger.error(f'Failed to generate: \"{mp[\"fileName\"]}\"\\n')\n return {**(settings[\"markerPairs\"][markerPairIndex])}\n\n\ndef getSpeedFilterAndDuration(speedMap, mps, fps):\n logger.info('-' * 80)\n video_filter_speed_map = ''\n setpts = ''\n outputDuration = 0\n\n fps = Fraction(fps)\n frameDur = 1 / fps\n nSects = len(speedMap) - 1\n # Account for marker pair start time as trim filter sets start time to ~0\n speedMapStartTime = speedMap[0][\"x\"]\n # Account for first input frame delay due to potentially imprecise trim\n startt = ceil(speedMapStartTime/frameDur) * frameDur - speedMapStartTime\n logger.info(f'First Input Frame Time: {startt}')\n\n for sect, (left, right) in enumerate(zip(speedMap[:-1], speedMap[1:])):\n startSpeed = left[\"y\"]\n endSpeed = right[\"y\"]\n speedChange = endSpeed - startSpeed\n\n sectStart = left[\"x\"] - speedMapStartTime - startt\n sectEnd = right[\"x\"] - speedMapStartTime - startt\n # Account for last input frame delay due to potentially imprecise trim\n if sect == nSects - 1:\n logger.info(\n f'Last Input Frame Time: {right[\"x\"] - speedMapStartTime - startt}')\n sectEnd = floor(right[\"x\"]/frameDur) * frameDur\n # When trim is frame-precise, the frame that begins at the marker pair end time is not included\n if right[\"x\"] - sectEnd < 1e-10:\n sectEnd = sectEnd - frameDur\n sectEnd = sectEnd - speedMapStartTime - startt\n sectEnd = floor(sectEnd*1000000) / 1000000\n logger.info(f'Last Input Frame Time (Rounded): {sectEnd}')\n\n sectDuration = sectEnd - sectStart\n if sectDuration == 0:\n continue\n\n m = speedChange / sectDuration\n b = startSpeed - m * sectStart\n\n if speedChange == 0:\n # Duration is time multiplied by slowdown (or time divided by speed)\n sliceDuration = f'(min((T-STARTT-{sectStart}),{sectDuration})/{endSpeed})'\n outputDuration += sectDuration/endSpeed\n else:\n # Integrate the reciprocal of the linear time vs speed function for the current section\n sliceDuration = f'(1/{m})*(log(abs({m}*min((T-STARTT),{sectEnd})+{b}))-log(abs({m}*{sectStart}+{b})))'\n outputDuration += (1/m) * (log(abs(m * sectEnd\n + b)) - log(abs(m*sectStart + b)))\n sliceDuration = f'if(gte((T-STARTT),{sectStart}), {sliceDuration},0)'\n\n if sect == 0:\n setpts += f'(if(eq(N,0),0,{sliceDuration}))'\n else:\n setpts += f'+({sliceDuration})'\n\n video_filter_speed_map += f'''setpts='({setpts})/TB' '''\n\n logger.info(f'Last Output Frame Time: {outputDuration}')\n # Each output frame time is rounded to the nearest multiple of a frame's duration at the given fps\n outputDuration = round(outputDuration/frameDur)*frameDur\n # The last included frame is held for a single frame's duration\n outputDuration += frameDur\n outputDuration = round(outputDuration*1000) / 1000\n\n return video_filter_speed_map, outputDuration\n\n\ndef getEasingExpression(easingFunc, easeA, easeB, easeP):\n easeT = f'(2*{easeP})'\n easeM = f'({easeP}-1)'\n\n ease = '1' # linear ease by default\n if easingFunc == 'easeInOutCubic':\n ease = f'if(lt({easeT},1), {easeP}*{easeT}^2, 1+({easeM}^3)*4)'\n if easingFunc == 'easeInOutSine':\n ease = f'0.5*(1-cos({easeP}*PI))'\n if easingFunc == 'easeOutCircle':\n ease = f'sqrt(1-{easeM}^2)'\n\n easingExpression = f'({easeA}+({easeB}-{easeA})*{ease})'\n return easingExpression\n\n\ndef runffplayCommand(inputs, video_filter, video_filter_before_correction, audio_filter, markerPairIndex, mp, mps):\n logger.info('running ffplay command')\n if 0 <= markerPairIndex < len(settings[\"markerPairs\"]):\n ffplayOptions = f'-hide_banner -fs -sync video -fast -genpts -infbuf '\n ffplayVideoFilter = f'-vf \"{video_filter}\"'\n if settings[\"inputVideo\"]:\n ffplayOptions += f' -loop 0'\n ffplayVideoFilter += f' -vf \"{video_filter_before_correction}\"'\n\n ffplayAudioFilter = f'-af {audio_filter}'\n\n ffplayCommand = ' '.join((\n ffplayPath,\n inputs,\n ffplayOptions,\n ffplayVideoFilter,\n ffplayAudioFilter if mps[\"audio\"] else '-an'\n ))\n\n logger.info('Using ffplay command: ' +\n re.sub(r'(&a?itags?.*?\")', r'\"', ffplayCommand) + '\\n')\n ffplayProcess = subprocess.run(shlex.split(ffplayCommand))\n\n\nclass MissingMergeInput(Exception):\n pass\n\n\nclass MissingMarkerPairFilePath(Exception):\n pass\n\n\ndef makeMergedClips(settings):\n markerPairMergeList = settings[\"markerPairMergeList\"]\n markerPairMergeList = markerPairMergeList.split(';')\n inputsTxtPath = ''\n\n mergeListGen = createMergeList(markerPairMergeList)\n for merge, mergeList in mergeListGen:\n inputs = ''\n logger.info('-' * 80)\n try:\n for i in mergeList:\n markerPair = settings[\"markerPairs\"][i-1]\n if 'fileName' in markerPair and 'filePath' in markerPair:\n if Path(markerPair[\"filePath\"]).is_file():\n inputs += f'''file '{settings[\"markerPairs\"][i-1][\"fileName\"]}'\\n'''\n else:\n raise MissingMergeInput\n else:\n raise MissingMarkerPairFilePath\n except IndexError:\n logger.error(\n f'Aborting generation of webm with merge list {mergeList}.')\n logger.error(f'Missing required marker pair number {i}.')\n continue\n except MissingMergeInput:\n logger.error(\n f'Aborting generation of webm with merge list {mergeList}.')\n logger.error(\n f'Missing required input webm with path {markerPair[\"filePath\"]}.')\n continue\n except MissingMarkerPairFilePath:\n logger.error(\n f'Aborting generation of webm with merge list {mergeList}')\n logger.error(f'Missing file path for marker pair {i}')\n continue\n\n inputsTxtPath = f'{webmsPath}/inputs.txt'\n with open(inputsTxtPath, \"w+\", encoding='utf-8') as inputsTxt:\n inputsTxt.write(inputs)\n mergedFileName = f'{settings[\"titleSuffix\"]}-({merge}).webm'\n mergedFilePath = f'{webmsPath}/{mergedFileName}'\n ffmpegConcatCmd = f' \"{ffmpegPath}\" -n -hide_banner -f concat -safe 0 -i \"{inputsTxtPath}\" -c copy \"{mergedFilePath}\"'\n\n if not Path(mergedFilePath).is_file():\n logger.info('-' * 80)\n logger.info(f'Generating \"{mergedFileName}\"...\\n')\n logger.info(f'Using ffmpeg command: {ffmpegConcatCmd}')\n ffmpegProcess = subprocess.run(shlex.split(ffmpegConcatCmd))\n if ffmpegProcess.returncode == 0:\n logger.info(f'Successfuly generated: \"{mergedFileName}\"\\n')\n else:\n logger.info(f'Failed to generate: \"{mergedFileName}\"\\n')\n else:\n logger.info(f'Skipped existing file: \"{mergedFileName}\"\\n')\n\n try:\n os.remove(inputsTxtPath)\n except (OSError, FileNotFoundError):\n pass\n\n\ndef checkWebmExists(fileName, filePath):\n if not Path(filePath).is_file():\n logger.info(f'Generating \"{fileName}\"...\\n')\n return False\n else:\n logger.info(f'Skipped existing file: \"{fileName}\"\\n')\n return True\n\n\ndef createMergeList(markerPairMergeList):\n for merge in markerPairMergeList:\n mergeCSV = merge.split(',')\n mergeList = []\n for mergeRange in mergeCSV:\n if '-' in mergeRange:\n mergeRange = mergeRange.split('-')\n startPair = int(mergeRange[0])\n endPair = int(mergeRange[1])\n if (startPair <= endPair):\n for i in range(startPair, endPair + 1):\n mergeList.append(i)\n else:\n for i in range(startPair, endPair - 1 if endPair >= 1 else 0, -1):\n mergeList.append(i)\n else:\n mergeList.append(int(mergeRange))\n yield merge, mergeList\n\n\ndef ffprobeVideoProperties(video):\n try:\n ffprobeCommand = f'\"{ffprobePath}\" \"{video}\" -v quiet -select_streams v -print_format json -show_streams -show_format'\n ffprobeOutput = subprocess.check_output(shlex.split(ffprobeCommand))\n except subprocess.CalledProcessError as cpe:\n logger.error(f'Could not fetch video properties with ffprobe')\n logger.error(f'{cpe}')\n return None\n\n ffprobeOutput = ffprobeOutput.decode('utf-8')\n logger.info('-' * 80)\n logger.info('Detecting video properties with ffprobe')\n ffprobeData = json.loads(ffprobeOutput)\n\n ffprobeData[\"streams\"][0][\"bit_rate\"] = int(\n int(ffprobeData[\"format\"][\"bit_rate\"]) / 1000)\n return ffprobeData[\"streams\"][0]\n\n\ndef autoSetCropMultiples(settings):\n cropMultipleX = (settings[\"width\"] / settings[\"cropResWidth\"])\n cropMultipleY = (settings[\"height\"] / settings[\"cropResHeight\"])\n if settings[\"cropResWidth\"] != settings[\"width\"] or settings[\"cropResHeight\"] != settings[\"height\"]:\n logger.info('-' * 80)\n logger.warning('Crop resolution does not match video resolution.')\n if settings[\"cropResWidth\"] != settings[\"width\"]:\n logger.warning(\n f'Crop resolution width ({settings[\"cropResWidth\"]}) not equal to video width ({settings[\"width\"]})')\n if settings[\"cropResHeight\"] != settings[\"height\"]:\n logger.warning(\n f'Crop resolution height ({settings[\"cropResHeight\"]}) not equal to video height ({settings[\"height\"]})')\n logger.info(\n f'Crop X offset and width will be multiplied by {cropMultipleX}')\n logger.info(\n f'Crop Y offset and height will be multiplied by {cropMultipleY}')\n if not settings[\"noAutoScaleCropRes\"]:\n return {**settings, 'cropMultipleX': cropMultipleX, 'cropMultipleY': cropMultipleY}\n else:\n logger.info(f'Auto scale crop resolution disabled in settings.')\n return settings\n else:\n return settings\n\n\ndef filterDash(dashManifestUrl, dashFormatIDs):\n from xml.dom import minidom\n from urllib import request\n\n with request.urlopen(dashManifestUrl) as dash:\n dashdom = minidom.parse(dash)\n\n reps = dashdom.getElementsByTagName('Representation')\n for rep in reps:\n id = rep.getAttribute('id')\n if id not in dashFormatIDs:\n rep.parentNode.removeChild(rep)\n\n filteredDashPath = f'{webmsPath}/filtered-dash.xml'\n with open(filteredDashPath, 'w+', encoding='utf-8') as filteredDash:\n filteredDash.write(dashdom.toxml())\n\n return filteredDashPath\n\n\ndef getDefaultEncodeSettings(videobr):\n if videobr is None:\n encodeSettings = {'crf': 30, 'autoTargetMaxBitrate': 0,\n 'encodeSpeed': 2, 'twoPass': False}\n elif videobr <= 4000:\n encodeSettings = {'crf': 20, 'autoTargetMaxBitrate': int(\n 1.6 * videobr), 'encodeSpeed': 2, 'twoPass': False}\n elif videobr <= 6000:\n encodeSettings = {'crf': 22, 'autoTargetMaxBitrate': int(\n 1.5 * videobr), 'encodeSpeed': 3, 'twoPass': False}\n elif videobr <= 10000:\n encodeSettings = {'crf': 24, 'autoTargetMaxBitrate': int(\n 1.4 * videobr), 'encodeSpeed': 4, 'twoPass': False}\n elif videobr <= 15000:\n encodeSettings = {'crf': 26, 'autoTargetMaxBitrate': int(\n 1.3 * videobr), 'encodeSpeed': 5, 'twoPass': False}\n elif videobr <= 20000:\n encodeSettings = {'crf': 30, 'autoTargetMaxBitrate': int(\n 1.2 * videobr), 'encodeSpeed': 5, 'twoPass': False}\n else:\n encodeSettings = {'crf': 35, 'autoTargetMaxBitrate': int(\n 1.1 * videobr), 'encodeSpeed': 5, 'twoPass': False}\n return encodeSettings\n\n\ndef uploadToGfycat(settings):\n # auto gfycat uploading\n if (settings[\"gfycat\"]):\n import urllib3\n import json\n from urllib.parse import urlencode\n http = urllib3.PoolManager()\n\n for outPath in outPaths:\n with open(outPath, 'rb', encoding='utf-8') as fp:\n file_data = fp.read()\n encoded_args = urlencode({'title': f'{outPath}'})\n url = UPLOAD_KEY_REQUEST_ENDPOINT + encoded_args\n r_key = http.request('POST', url)\n print(r_key.status)\n gfyname = json.loads(r_key.data.decode('utf-8'))[\"gfyname\"]\n links.append(f'https://gfycat.com/{gfyname}')\n print(gfyname)\n fields = {'key': gfyname, 'file': (\n gfyname, file_data, 'multipart/formdata')}\n r_upload = http.request(\n 'POST', FILE_UPLOAD_ENDPOINT, fields=fields)\n print(r_upload.status)\n print(r_upload.data)\n\n for fileName, link in zip(fileNames, links):\n markdown += f'({fileName})[{link}]\\n\\n'\n print('\\n==Reddit Markdown==')\n print(markdown)\n\n\ndef cleanFileName(fileName):\n if sys.platform == 'win32':\n fileName = re.sub(r'[*?\"<>\\0]', '', fileName)\n fileName = re.sub(r'[/|\\\\:]', '_', fileName)\n elif sys.platform == 'darwin':\n fileName = re.sub(r'[:\\0]', '_', fileName)\n elif sys.platform.startswith('linux'):\n fileName = re.sub(r'[/\\0]', '_', fileName)\n return fileName\n\n\ndef getVidstabPreset(level, videoStabilizationDynamicZoom):\n vidstabPreset = {\"enabled\": False, \"desc\": \"Disabled\"}\n if level == 1:\n vidstabPreset = {\"enabled\": True, \"shakiness\": 2,\n \"zoomspeed\": 0.05, \"smoothing\": 2, \"desc\": \"Very Weak\"}\n elif level == 2:\n vidstabPreset = {\"enabled\": True, \"shakiness\": 4,\n \"zoomspeed\": 0.1, \"smoothing\": 4, \"desc\": \"Weak\"}\n elif level == 3:\n vidstabPreset = {\"enabled\": True, \"shakiness\": 6,\n \"zoomspeed\": 0.2, \"smoothing\": 6, \"desc\": \"Medium\"}\n elif level == 4:\n vidstabPreset = {\"enabled\": True, \"shakiness\": 8,\n \"zoomspeed\": 0.3, \"smoothing\": 10, \"desc\": \"Strong\"}\n elif level == 5:\n vidstabPreset = {\"enabled\": True, \"shakiness\": 10,\n \"zoomspeed\": 0.4, \"smoothing\": 16, \"desc\": \"Very Strong\"}\n elif level == 6:\n vidstabPreset = {\"enabled\": True, \"shakiness\": 10,\n \"zoomspeed\": 0.5, \"smoothing\": 22, \"desc\": \"Strongest\"}\n return vidstabPreset\n\n\ndef getDenoisePreset(level):\n denoisePreset = {\"enabled\": False, \"desc\": \"Disabled\"}\n if level == 1:\n denoisePreset = {\"enabled\": True,\n \"lumaSpatial\": 1, \"desc\": \"Very Weak\"}\n elif level == 2:\n denoisePreset = {\"enabled\": True, \"lumaSpatial\": 2, \"desc\": \"Weak\"}\n elif level == 3:\n denoisePreset = {\"enabled\": True, \"lumaSpatial\": 4, \"desc\": \"Medium\"}\n elif level == 4:\n denoisePreset = {\"enabled\": True, \"lumaSpatial\": 6, \"desc\": \"Strong\"}\n elif level == 5:\n denoisePreset = {\"enabled\": True,\n \"lumaSpatial\": 8, \"desc\": \"Very Strong\"}\n return denoisePreset\n\n\nmain()\n","sub_path":"src/clipper/yt_clipper.py","file_name":"yt_clipper.py","file_ext":"py","file_size_in_byte":47716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319322410","text":"import numpy as np\nimport pandas as pd\nimport pickle\nfrom pathlib import Path\nimport sys\n\nimport statsmodels.api as sm\nfrom bokeh.layouts import row\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.io import export_png\n\n###############################################################################\ndir = Path(__file__).resolve().parents[2]\ncurrent_week = \"week\" + str(sys.argv[1])\n\npath = dir / \"output\" / current_week\npath.mkdir(parents=True, exist_ok=True)\n\n\n###############################################################################\n\ndef var_by_method(dataf, variable):\n \n dataf_out = pd.DataFrame()\n dataf_out[\"pid\"] = dataf[\"pid\"]\n dataf_out[\"year\"] = dataf[\"year\"]\n dataf_out[\"hid\"] = dataf[\"hid_real\"]\n\n for m in [\"real\", \"standard\", \"ext\"]:\n\n dataf_out[m] = dataf[variable + \"_\" + m]\n\n return dataf_out\n\ndef var_by_age(dataf, variable):\n \n dataf_out = pd.DataFrame()\n dataf_out[\"age\"] = np.sort(dataf[\"age_real\"].unique())\n \n for m in [\"real\", \"standard\", \"ext\"]:\n dataf_out[m] = dataf.groupby(\"age_real\")[variable + \"_\" + m].mean().values\n return dataf_out\n \ndef plot_by_age(dataf, variable):\n \n dataf_plot = var_by_age(dataf, variable)\n \n source = ColumnDataSource(dataf_plot)\n \n p = figure(title = variable)\n \n p.line(x=\"age\", y=\"real\", source=source,\n line_color=\"black\", line_dash = \"solid\", line_width=2,\n legend_label = \"Real\")\n \n p.line(x=\"age\", y=\"standard\", source=source,\n line_color=\"black\", line_dash = \"dashed\", line_width=2,\n legend_label = \"Standard\")\n \n p.line(x=\"age\", y=\"ext\", source=source,\n line_color=\"black\", line_dash = \"dotted\", line_width=2,\n legend_label = \"Ext\")\n \n p.xaxis.axis_label = \"Age\"\n \n p = make_pretty(p)\n str_path = variable + \".png\"\n export_png(p, filename=str(path/ str_path))\n\ndef make_pretty(p):\n p.xgrid.grid_line_color = None\n p.yaxis.minor_tick_line_width=0\n p.xaxis.minor_tick_line_width=0\n \n # p.legend.location = \"bottom_right\"\n\n return p \n\n##########\n\n# df_child = var_by_method(df, \"child\")\n# tmp = df_child.groupby(\"pid\").max()\n# sum(tmp[\"real\"]==tmp[\"standard\"])/len(tmp)\n# sum(tmp[\"real\"]==tmp[\"ext\"])/len(tmp)\n# ##########\n\n# len(df[\"hid_real\"].unique())\n# len(df[\"hid_standard\"].unique())\n# len(df[\"hid_ext\"].unique())\n# ##########\n\n# df_married = var_by_method(df, \"married\")\n# tmp = df_married.groupby(\"pid\").max()\n# sum(tmp[\"real\"]==tmp[\"standard\"])/len(tmp)\n# sum(tmp[\"real\"]==tmp[\"ext\"])/len(tmp)\n##########\n\n\nif __name__ == \"__main__\":\n \n df = pd.read_pickle(path / \"df_analysis_cohort\")\n\n plot_by_age(df, \"married\")\n plot_by_age(df, \"in_couple\")\n plot_by_age(df, \"hh_income\")\n plot_by_age(df, \"hh_youngest_age\")\n plot_by_age(df, \"n_people\")\n plot_by_age(df, \"n_children\")\n plot_by_age(df, \"hh_frac_working\")","sub_path":"src/validation/validation_01.py","file_name":"validation_01.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510464448","text":"from cslvr import *\nfrom scipy import random\nfrom fenics import *\nfrom dolfin_adjoint import *\nimport sys\n\n# set the relavent directories :\nvar_dir = 'dump/vars_jakobshavn_small/' # directory from gen_vars.py\nout_dir = 'dump/jakob_small/inversion_Wc_0.03/'\n\n# create HDF5 files for saving and loading data :\nfmeshes = HDF5File(mpi_comm_world(), var_dir + 'submeshes.h5', 'r')\nfdata = HDF5File(mpi_comm_world(), var_dir + 'state.h5', 'r')\n\n# create 3D model for stokes solves :\nd3model = D3Model(fdata, out_dir)\n\n# init subdomains and boundary meshes :\nd3model.set_subdomains(fdata)\nd3model.set_srf_mesh(fmeshes)\nd3model.set_bed_mesh(fmeshes)\nd3model.set_dvd_mesh(fmeshes)\n\n# initialize the 3D model vars :\nd3model.init_S(fdata)\nd3model.init_B(fdata)\nd3model.init_mask(fdata)\nd3model.init_q_geo(d3model.ghf)\nd3model.init_T_surface(fdata)\nd3model.init_adot(fdata)\nd3model.init_U_ob(fdata, fdata)\nd3model.init_U_mask(fdata)\nd3model.init_time_step(1e-6)\nd3model.init_E(1.0)\nd3model.init_W(0.0)\nd3model.init_Wc(0.03)\nd3model.init_T(d3model.T_surface)\nd3model.init_k_0(1e-3)\nd3model.solve_hydrostatic_pressure()\nd3model.form_energy_dependent_rate_factor()\n\n#frstrt = HDF5File(mpi_comm_world(), out_dir + '01/inverted.h5', 'r')\n#d3model.init_T(frstrt)\n#d3model.init_W(frstrt)\n#d3model.init_Fb(frstrt)\n#d3model.init_alpha(frstrt)\n#d3model.init_U(frstrt)\n#d3model.init_p(frstrt)\n#d3model.init_theta(frstrt)\n\n# create a 2D model for balance-velocity :\nbedmodel = D2Model(d3model.bedmesh, out_dir)\n\nbedmodel.assign_submesh_variable(bedmodel.S, d3model.S)\nbedmodel.assign_submesh_variable(bedmodel.B, d3model.B)\nbedmodel.assign_submesh_variable(bedmodel.adot, d3model.adot)\n\n# solve the balance velocity :\nbv = BalanceVelocity(bedmodel, kappa=5.0)\nbv.solve(annotate=False)\n\n# assign the balance velocity to the 3D model's bed :\nd3model.assign_submesh_variable(d3model.d_x, bedmodel.d_x)\nd3model.assign_submesh_variable(d3model.d_y, bedmodel.d_y)\nd3model.assign_submesh_variable(d3model.Ubar, bedmodel.Ubar)\n\n# extrude the bed values up the column : \nd_x_e = d3model.vert_extrude(d3model.d_x, d='up')\nd_y_e = d3model.vert_extrude(d3model.d_y, d='up')\nUbar_e = d3model.vert_extrude(d3model.Ubar, d='up')\n\n# set the appropriate variable to be the function extruded :\nd3model.init_d_x(d_x_e)\nd3model.init_d_y(d_y_e)\nd3model.init_Ubar(Ubar_e)\n\n# generate initial traction field :\nd3model.init_beta_SIA()\n\nmom = MomentumDukowiczBP(d3model, linear=False)\nmomTMC = MomentumDukowiczBrinkerhoffStokes(d3model, linear=False)\nnrg = Enthalpy(d3model, momTMC, transient=False, use_lat_bc=True)\n\n#frstrt = HDF5File(mpi_comm_world(), out_dir + '02/u_opt.h5', 'r')\n#d3model.set_out_dir(out_dir + '02/')\n#d3model.init_U(frstrt)\n#d3model.init_beta(frstrt)\n\n# thermo-solve callback function :\ndef tmc_cb_ftn():\n nrg.calc_PE()#avg=True)\n nrg.calc_vert_avg_strain_heat()\n nrg.calc_vert_avg_W()\n nrg.calc_temp_rat()\n nrg.solve_basal_melt_rate()\n\n# post-adjoint-iteration callback function :\ndef adj_post_cb_ftn():\n # solve for optimal vertical velocity :\n mom.solve_vert_velocity(annotate=False)\n\n# after every completed adjoining, save the state of these functions :\nadj_save_vars = [d3model.T,\n d3model.W,\n d3model.Fb,\n d3model.Mb,\n d3model.alpha,\n d3model.alpha_int,\n d3model.PE,\n d3model.Wbar,\n d3model.Qbar,\n d3model.temp_rat,\n d3model.U3,\n d3model.p,\n d3model.beta,\n d3model.theta]\n\nu_opt_save_vars = [d3model.beta, d3model.U3]\nw_opt_save_vars = [d3model.Fb, d3model.theta]\n\n# form the cost functional :\nmom.form_obj_ftn(integral=d3model.GAMMA_U_GND, kind='log_L2_hybrid', \n g1=0.01, g2=5000)\n\n# form the regularization functional :\nmom.form_reg_ftn(d3model.beta, integral=d3model.GAMMA_B_GND,\n kind='TV_Tik_hybrid', alpha_tik=1e-1, alpha_tv=10.0)\n#mom.form_reg_ftn(d3model.beta, integral=d3model.GAMMA_B_GND,\n# kind='TV', alpha=10.0)\n#mom.form_reg_ftn(d3model.beta, integral=d3model.GAMMA_B_GND,\n# kind='Tikhonov', alpha=1e-6)\n\n# form the objective functional for water-flux optimization :\nnrg.form_cost_ftn(kind='L2')\n\nwop_kwargs = {'max_iter' : 350, \n 'bounds' : (0.0, 100.0),\n 'method' : 'ipopt',\n 'adj_save_vars' : w_opt_save_vars,\n 'adj_callback' : None}\n \ntmc_kwargs = {'momentum' : momTMC,\n 'energy' : nrg,\n 'wop_kwargs' : wop_kwargs,\n 'callback' : tmc_cb_ftn,\n 'atol' : 1e2,\n 'rtol' : 1e0,\n 'max_iter' : 5,\n 'iter_save_vars' : None,\n 'post_tmc_save_vars' : None,\n 'starting_i' : 1}\n\nuop_kwargs = {'control' : d3model.beta,\n 'bounds' : (1e-5, 1e7),\n 'method' : 'ipopt',\n 'max_iter' : 1000,\n 'adj_save_vars' : u_opt_save_vars,\n 'adj_callback' : None,\n 'post_adj_callback' : adj_post_cb_ftn}\n \nass_kwargs = {'momentum' : mom,\n 'beta_i' : d3model.beta.copy(True),\n 'max_iter' : 10,\n 'tmc_kwargs' : tmc_kwargs,\n 'uop_kwargs' : uop_kwargs,\n 'atol' : 1.0,\n 'rtol' : 1e-4,\n 'initialize' : True,\n 'incomplete' : True,\n 'post_iter_save_vars' : adj_save_vars,\n 'post_ini_callback' : None,\n 'starting_i' : 1}\n\n# assimilate ! :\nd3model.assimilate_U_ob(**ass_kwargs) \n\n# or assimilate only once without thermo : \n#mom.optimize_U_ob(**uop_kwargs)\n \n# or thermo_solve :\n#d3model.thermo_solve(**tmc_kwargs)\n\n \n\n","sub_path":"simulations/greenland/data_assimilation/jakobshavn/data_assimilation.py","file_name":"data_assimilation.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486202480","text":"import numpy as np\nfrom PIL import Image\nfrom utils import * \nimport argparse\nfrom scipy import fftpack\nimport sys\n#https://github.com/ghallak/jpeg-python\n#https://stackoverflow.com/questions/7762948/how-to-convert-an-rgb-image-to-numpy-array\n\ndef main():\n\tfichier = open(\"data.txt\", \"w\")\n\t#Partie 0 : ouverture de l'image\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"input\", help=\"path to the input image\")\n\tparser.add_argument(\"output\", help=\"path to the output image\")\n\t#for optional input\n\t#parser.add_argument(\"-i\", help=\"path to the input image\",default=\"input.jpg\")\n\t#parser.add_argument(\"-o\", help=\"path to the output image\",default=\"output.jpg\")\n\targs = parser.parse_args()\n\tinput_file = args.input\n\toutput_file = args.output\n\timage = load_image(input_file)\n\t#Partie 1 : conversion RGB/Y'CbCr\n\t#YCbCr = image.convert('YCbCr')\n\tarray = convert_to_array(image)\n\tlgOrignal = sys.getsizeof(array)\n\t\n\t#Partie 2/3 : Découpage en bloc de pixel + DCT\n\t#On récupère la dimension de l'image et on vérifie que ce sont des multiples de 8 \n\trows, cols = array.shape[0], array.shape[1]\n\tif rows % 8 == cols % 8 == 0:\n\t\tblocks_count = rows // 8 * cols // 8 \n\t\t#le nombre de bloc est égal à la largeur divisée par 8 multiplié par la hauteur divisé par 8\n\telse:\n\t\traise ValueError((\"La hauteur et la largeur de l'image doivent être des multiples de 8\"))\n\t\t\n\tDictionnaireFull = []\n\tMessageRLEFinal=[]\n\timageDecompres = \"\"\n\t#On parcourt chaque block, et chaque canal de chaque bloc\n\tfor i in range(0, rows, 8):\n\t\tfor j in range(0, cols, 8):\n\t\t\tfor k in range(3):\n\t\t\t\tblock = array[i:i+8, j:j+8, k]\n\t\t\t\t\n\t\t\t\t#on effectue la dct sur le bloc, voir https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html\n\t\t\t\tarray[i:i+8, j:j+8, k] = fftpack.dct(fftpack.dct(block.T, axis=0, norm='ortho').T,axis = 1, norm='ortho')\n\t\t\t\t#on quantifie chaque bloc\n\t\t\t\tarray[i:i+8, j:j+8, k] = quantize(array[i:i+8, j:j+8, k],'lum' if k == 0 else 'chrom')\n\t\t\t\tMessage, Dictionnaire = encodeHuffman(zigzag_single(array[i:i+8, j:j+8, k]))\n\t\t\t\tDictionnaireFull.append(Dictionnaire)\n\t\t\t\tMessageRLEFinal.append(encodeRLE(Message))\n\t\n\t#La taille de l'image compressée est égale au code RLE + le dictionnaire\n\tlgFinal = sys.getsizeof(MessageRLEFinal)\n\tlgFinal = lgFinal + sys.getsizeof(DictionnaireFull)\n\t\n\tprint(\"Le taux de compression est de \"+str(1-(lgFinal/lgOrignal)))\n\t\n\t#Partie 6 bis : Décodage RLE / Huffman\n\t\n\tdecodedRLE = []\n\tdecodedHuffman = []\n\tfor i in range(0, len(MessageRLEFinal),1):\n\t\tdecodedRLE.append(decodeRLE(MessageRLEFinal[i]))\n\t\t#print(decodeHuffman(decodedRLE[0], DictionnaireFull[0]))\n\t\tdecodedHuffman.append(decodeHuffman(decodedRLE[i], DictionnaireFull[i]))\n\timageDecompres = np.zeros((rows,cols,3))\n\t\n\tz = 0\n\t#Partie 2/3/4 bis : Retour en Y'CbCr, déquantification et IDCT\n\tfor i in range(0, rows, 8):\n\t\tfor j in range(0, cols, 8):\n\t\t\tfor k in range(3): \n\t\t\t\timageDecompres[i:i+8, j:j+8,k] = inverse_zigzag_single(decodedHuffman[z])\n\t\t\t\t\n\t\t\t\t#on déquantifie\n\t\t\t\timageDecompres[i:i+8, j:j+8, k] = dequantize(imageDecompres[i:i+8, j:j+8, k],'lum' if k == 0 else 'chrom')\n\t\t\t\t\n\t\t\t\t#on effectue la dct inverse sur chaque bloc\n\t\t\t\timageDecompres[i:i+8, j:j+8, k] = fftpack.idct(fftpack.idct(imageDecompres[i:i+8, j:j+8, k].T,axis=0, norm='ortho').T,axis=1,norm='ortho')\n\t\t\t\tz=z+1\n\t\t\n\t#Partie 1 bis : conversion Y'CbCr/RGB\n\timage = Image.fromarray(np.uint8(imageDecompres))\n\t#image = image.convert(\"RGB\")\n\tsave_image(image, output_file)\n\t\n\treturn 1\n\n\t\n\t\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"TP2/Rendu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338129307","text":"from PIL import Image\r\nimport numpy as np\r\n\r\n\r\ndef zad_1(w, h, dzielnik, zmiana_koloru): \r\n t = (h, w) \r\n tab = np.ones(t, dtype=np.uint8)\r\n grub = int(h / dzielnik ) \r\n for k in range(dzielnik): \r\n for g in range(grub):\r\n i = k * grub + g \r\n for j in range(w):\r\n tab[i, j] = (k * zmiana_koloru) % 256 \r\n return tab\r\n\r\ndef zad_11(tab):\r\n tab_neg = tab.copy()\r\n for i in range(tab.shape[0]):\r\n for j in range(tab.shape[1]):\r\n tab_neg[i, j] = 255 - tab[i, j]\r\n return tab_neg\r\n\r\nImage.fromarray(zad_1(300, 200, 100, 10)).save(\"zad_1.jpg\")\r\nImage.fromarray(zad_11(zad_1(300, 200, 100, 10))).save(\"zad_11.jpg\")\r\n\r\ndef zad_2(w, h, dzielnik, r,g,b):\r\n tab = np.zeros((h, w,3), dtype=np.uint8)\r\n pas = int(min(w, h)/dzielnik)\r\n ile_pelnych_ramek = int(min(w, h)/(2*pas))\r\n black = False\r\n for i in range (0, ile_pelnych_ramek):\r\n z1 = h - (i+1)*pas\r\n z2 = w - (i+1)*pas\r\n if black == False:\r\n tab[(i+1)*pas:z1, (i+1)*pas:z2] = [255,255,255]\r\n black = True\r\n else:\r\n tab[(i+1)*pas:z1, (i+1)*pas:z2] = [0,0,0]\r\n black = False\r\n\r\n return tab\r\n\r\ndef zad_21(tab):\r\n tab_neg = tab.copy()\r\n for i in range(tab.shape[0]):\r\n for j in range(tab.shape[1]):\r\n tab_neg[i, j, 0] = 255 - tab[i, j, 0]\r\n tab_neg[i, j, 1] = 255 - tab[i, j, 1]\r\n tab_neg[i, j, 2] = 255 - tab[i, j, 2]\r\n return tab_neg\r\n\r\nImage.fromarray(zad_2(120, 60, 8, 0, 200, 100)).save(\"zad2.jpg\")\r\nImage.fromarray(zad_21(zad_2(120, 60, 8, 0, 200, 100))).save(\"zad21.jpg\")\r\n\r\nobrazek = np.array((np.asarray(Image.open(\"inicjaly.bmp\"))*1), dtype=np.uint8)\r\ndef zad_3(obrazek):\r\n h = obrazek.shape[0]\r\n w = obrazek.shape[1]\r\n obrazekjpg = np.ones((h, w, 3), dtype=np.uint8)\r\n for i in range(0, h):\r\n for j in range(0, w):\r\n if (obrazek[i, j] == 0):\r\n obrazekjpg[i, j, 0] = 0\r\n obrazekjpg[i, j, 1] = 0\r\n obrazekjpg[i, j, 2] = 0\r\n else:\r\n obrazekjpg[i, j, 0] = 255\r\n obrazekjpg[i, j, 1] = 255\r\n obrazekjpg[i, j, 2] = 255\r\n for i in range (0, h):\r\n for j in range(0, w):\r\n if(obrazekjpg[i, j, 0] != 255):\r\n if(i % 3 == 0):\r\n obrazekjpg[i, j] = [255, 255, 0]\r\n if(i % 3 == 1):\r\n obrazekjpg[i, j] = [21, 255, 37]\r\n if(i % 3 == 2):\r\n obrazekjpg[i, j] = [255, 0, 255]\r\n return obrazekjpg\r\n\r\n\r\nImage.fromarray(zad_3(obrazek)).save(\"zad3.jpg\")\r\n","sub_path":"lab4/zad.py","file_name":"zad.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388651412","text":"\"\"\"\nThis script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in\n'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,\nthis script is a no-op.\nNote: we ping the merger only, not the reviewers, as the reviewers can sometimes be external to torchaudio\nwith no labeling responsibility, so we don't want to bother them.\n\"\"\"\n\nimport sys\nfrom typing import Any, Optional, Set, Tuple\n\nimport requests\n\n# For a PR to be properly labeled it should have one primary label and one secondary label\n# For a PR with primary label \"other\", it does not require an additional secondary label\nPRIMARY_LABELS = {\n \"BC-breaking\",\n \"deprecation\",\n \"bug fix\",\n \"new feature\",\n \"improvement\",\n \"example\",\n \"prototype\",\n \"other\",\n}\n\nSECONDARY_LABELS = {\n \"module: I/O\",\n \"module: ops\",\n \"module: models\",\n \"module: pipelines\",\n \"module: datasets\",\n \"module: docs\",\n \"module: tests\",\n \"build\",\n \"style\",\n \"perf\",\n \"other\",\n}\n\n\ndef query_torchaudio(cmd: str, *, accept) -> Any:\n response = requests.get(f\"https://api.github.com/repos/pytorch/audio/{cmd}\", headers=dict(Accept=accept))\n return response.json()\n\n\ndef get_pr_number(commit_hash: str) -> Optional[int]:\n # See https://docs.github.com/en/rest/reference/repos#list-pull-requests-associated-with-a-commit\n data = query_torchaudio(f\"commits/{commit_hash}/pulls\", accept=\"application/vnd.github.groot-preview+json\")\n if not data:\n return None\n return data[0][\"number\"]\n\n\ndef get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]:\n # See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request\n data = query_torchaudio(f\"pulls/{pr_number}\", accept=\"application/vnd.github.v3+json\")\n merger = data[\"merged_by\"][\"login\"]\n labels = {label[\"name\"] for label in data[\"labels\"]}\n return merger, labels\n\n\nif __name__ == \"__main__\":\n commit_hash = sys.argv[1]\n pr_number = get_pr_number(commit_hash)\n if not pr_number:\n sys.exit(0)\n\n merger, labels = get_pr_merger_and_labels(pr_number)\n is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))\n\n if not is_properly_labeled:\n print(f\"@{merger}\")\n","sub_path":".github/process_commit.py","file_name":"process_commit.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374122033","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'BlogApp.views.index', name='index'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^post/', 'BlogApp.views.newpost', name='newpost'),\n url(r'^posts/', 'BlogApp.views.posts', name='posts'),\n)\n","sub_path":"Blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419134751","text":"#!/usr/bin/env python3\n#Created by Yudan Chen for Programming Course\n\nimport sys\n\npdbname=sys.argv[1]\nf=open(pdbname,'r')\nlines=f.readlines()\n\ncolumn=[]\nfor line in lines:\n\twords=line.split()\n\tatominfo=str(words[0]),int(words[1]),str(words[2]),str(words[3]),str(words[4]),int(words[5]),float(words[6]),float(words[7]),float(words[8]),float(words[9]),float(words[10]),str(words[11])\n\tprint(\"ATOM\",atominfo)\n\tcolumn.append(atominfo)\nf.close()\n\n\nf=open(\"hw.out\",'w')\nfor atom in column:\n\ts=\"{0:6} {1:5} {2:4} {3:3} {4:1} {5:3} {6:8.3f} {7:8.3f} {8:8.3f} {9:6} {10:6} {11:2}\\n\"\n\tf.write(s.format(atom[0],atom[1],atom[2],atom[3],atom[4],atom[5],atom[6],atom[7],atom[8],atom[9],atom[10],atom[11]))\nf.close()\n\n\n\n\nprint(\"Done!\")","sub_path":"20oct06/homework/hw.py","file_name":"hw.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66386119","text":"import sys, os, socket, datetime, time, traceback, shutil, subprocess, multiprocessing\nimport psutil\nimport xmlrpc.client\nimport smtplib\nimport wmi, collections\nfrom email.mime.text import MIMEText\nimport apps_params, common\nimport wmi_stop_servs\nfrom common import BUILD_MSG, unicode_to_str\n\ndef to_wait(param_key, t=3): \n time.sleep(t)\n return True, '', ''\n\n# move to common\ndef get_time():\n #return str(time.asctime( time.localtime(time.time())))\n format_str = '%Y-%m-%d %H:%M:%S'\n t = datetime.datetime.now()\n return t.strftime(format_str)\n\ndef run_apps_servers(param_key, account=None):\n output, err_msg, to_continue = '', '', True\n param_obj = common.get_param_obj(param_key) # new \n if not account:\n cur_dir = param_obj.apps_exe_dir\n elif account == apps_params.DEV:\n cur_dir = param_obj.apps_exe_import_dir\n else:\n to_continue, output, err_msg = False, '', 'Optional Parameter not recoginized'\n return to_continue, output, err_msg\n \n try: \n args = ['RunServers.bat'] \n os.chdir(cur_dir) \n p = subprocess.Popen(args, cwd=cur_dir, shell=True) \n p.wait() \n except:\n err_msg = traceback.format_exc()\n to_continue = False\n time.sleep(2) \n return to_continue, output, err_msg\n\ndef kill_prs(procs, setup_path):\n \"\"\"Before it kills a process, it checks again if exe file has a proper path \n procs is a list of psutil.Process classes.\n used in stop_apps_servers\"\"\"\n log_kill_msg = '' \n is_done = False\n j = 0\n while (not is_done) and j < 30:\n is_done = True\n j += 1\n for pr in procs:\n try:\n to_kill = (psutil.pid_exists(pr.pid) and\n is_to_kill(setup_path, pr.exe))\n except:\n to_kill = False\n \n if to_kill:\n #### debug \n #print(pr.status, pr.exe) \n #### debug end\n print(' terminating: {}'.format(pr.exe))\n #kill_msg = ''.join((' ', 'psutil terminating: {}'.format(pr.exe)))\n kill_msg = ' '.join((' ', pr.exe))\n log_kill_msg = '\\n'.join((log_kill_msg, kill_msg)) \n pr.kill()\n is_done = False\n if not is_done:\n time.sleep(10)\n return log_kill_msg \n \ndef is_to_kill(setup_path, exe_path):\n \"\"\"used in stop_apps_servers and kill_prs\n It checks if elements of setup_path are in exe_path.\n Should check for order?\"\"\"\n \n setup_path_list = common.path_to_list(setup_path.lower(), is_reversed=True)\n exe_path_list = common.path_to_list(exe_path.lower(), is_reversed=True)\n # Both Lists are reversed !!!\n \n for n in range(len(setup_path_list)-1): # except Drive (no N:\\\\)\n dir_name = setup_path_list[n]\n #ind = exe_path_list.index(dir_name)\n if dir_name not in exe_path_list:\n return False\n return True\n\ndef is_any_server_running(exe_path):\n for pr in psutil.process_iter():\n try:\n if is_to_kill(exe_path, pr.exe):\n return True \n except:\n # pr.exe raises exception for: System Idle Process, System, \n # csrss.exe, svchost.exe, msdtc.exe, miprvse.exe, ... \n pass \n return False \n\ndef wmi_subprocess_stop_servers(schema, exe_path):\n for i in range(1, 20):\n if not is_any_server_running(exe_path):\n return # no need to run WMI\n print(get_time(), ' ', 'WMI run # ' + str(i))\n p = multiprocessing.Process(target = wmi_stop_servs.wmi_stop_servers_one_run_no_error,\n args=(schema,))\n p.start()\n time.sleep(15)\n if p.is_alive():\n p.terminate()\n time.sleep(3)\n\ndef stop_apps_servers(param_key, account=None):\n print('stop_apps_servers')\n output, err_msg, to_continue = '', '', True \n param_obj = common.get_param_obj(param_key) # new \n build_batch_output, build_batch_err, psutil_output = '', '', ''\n if not account:\n cur_dir = param_obj.apps_exe_dir\n schema = param_obj.db_schema \n elif account == apps_params.DEV:\n cur_dir = param_obj.apps_exe_import_dir\n schema = param_obj.db_import_target_schema\n else:\n to_continue, output, err_msg = False, '', 'Optional Parameter not recoginized'\n return to_continue, output, err_msg \n #_, setup_exe_dir = os.path.split(cur_dir)\n \n wmi_subprocess_stop_servers(schema, cur_dir)\n print(get_time(), ' ', 'Processes are terminated with psutil')\n prs_kill = []\n for pr in psutil.process_iter(): \n try:\n if is_to_kill(cur_dir, pr.exe):\n prs_kill.append(pr)\n \n except:\n # pr.exe raises exception for: System Idle Process, System, \n # csrss.exe, svchost.exe, msdtc.exe, miprvse.exe, ... \n pass\n try:\n psutil_output = kill_prs(prs_kill, cur_dir) \n except:\n err_msg = traceback.format_exc()\n to_continue = False # ???\n print(err_msg) \n time.sleep(1)\n psutil_msg= ' '.join((get_time(), 'Processes are terminated with psutil:'))\n #output = ''.join((build_batch_output, '\\n', psutil_msg, psutil_output))\n if psutil_output:\n output = ''.join((psutil_msg, psutil_output)) \n return to_continue, output, err_msg\n\n\n#def get_comp_path():\n #comp = socket.gethostname()\n ##socket.gethostbyaddr(socket.gethostname())[0]\n #path = os.path.abspath(sys.path[0])\n #return comp, path\n\n#def copy_exe_files(param_key, param_obj, account=None):\n #output, err_msg, to_continue = '', '', True\n #try:\n ## 'Optional Parameter not recoginized'\n #for path_param_seqs in param_obj.copy_files_setup:\n #common.del_copy_files(param_obj, path_param_seqs, 1, account)\n #except: \n #err_msg = traceback.format_exc() \n #to_continue = False\n #return to_continue, output, err_msg\n\n \n#def get_time():\n ##return str(time.asctime( time.localtime(time.time())))\n #format_str = '%Y-%m-%d %H:%M:%S'\n #t = datetime.datetime.now()\n #return t.strftime(format_str) \n\n \n \ndef delete_old_logs(param_obj):\n common.arhive_log_files(param_obj.apps_build_exe_shared_dir)\n \n for _, file_name in apps_params.LOG_FILES.items():\n file = os.path.join(param_obj.apps_build_exe_shared_dir, file_name)\n if os.path.exists(file) and (file_name != apps_params.LOG_FILES['starteam_checkout_time']):\n # starteam_checkout_time file is replaced on a next starteam checkout \n os.remove(file)\n \n \ndef write_log(param_obj, main_log, write_to_log, msg):\n log_name = apps_params.LOG_FILES.get(write_to_log)\n if log_name and (write_to_log != 'main'):\n file = os.path.join(param_obj.apps_build_exe_shared_dir, log_name)\n if log_name == apps_params.LOG_FILES['starteam_checkout_time']:\n log = open(file, 'w')\n else:\n log = open(file, 'a')\n else:\n log = main_log \n log.write(''.join((msg, '\\n')))\n log.flush() \n if log != main_log:\n log.close() \n \ndef send_email(param_obj, log_file, subject):\n try:\n with open(log_file, 'r') as f:\n log_text = f.read()\n #log_text = apps_params.unicode_to_str(log_text) \n common.send_email(email_msg = log_text,\n subject = ''.join((subject, ' - ', param_obj.db_schema)),\n email_to = param_obj.email_to)\n except Exception as e: \n with open(log_file, 'a') as f:\n f.write('\\n\\nError in \"send_email\" \\n')\n f.write(str(e))\n #traceback.print_tb(traceback, limit=None, file=f)\n \n \nif __name__ == \"__main__\": # for debug only\n pass \n #param_key = 'SWQA_101010'\n \n #params = apps_params.get_apps_params() \n #param_obj = params[key] \n \n #stop_apps_servers(param_key, account=None)\n #stop_apps_servers(param_key, account=None)\n \n #print('done')","sub_path":"RTC Build/apps_serv_main.py","file_name":"apps_serv_main.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"284437245","text":"import pygame\nimport sys\nimport random\nimport os\n# from options import *\nimport time\n# from functions import *\n# from questions import *\n# from Class import*\n\n\npygame.init()\ndef resource_path(relative):\n if hasattr(sys, \"_MEIPASS\"):\n return os.path.join(sys._MEIPASS, relative)\n return os.path.join(relative)\n# display\ndisplay_width = 800\ndisplay_height = 600\nbg = pygame.image.load(resource_path(os.path.join('pigs','bg.png')))\nbg_2 = pygame.image.load(resource_path(os.path.join('pigs','bg2.jpg')))\nbg_3 = pygame.image.load(resource_path(os.path.join('pigs', 'bg3.jpg')))\ndisplay = pygame.display.set_mode((display_width, display_height))\n# pygame.display.set_caption(\"My game\")\n# player_counter: int = 0\nclock = pygame.time.Clock()\n\n\n# user\nx = 50\ny = 376\nwidth = 128\nheight = 128\n\nplayer_img = [pygame.image.load(resource_path(os.path.join('pigs', 'personage_1_right.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.2_right.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.3_right.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.2_right.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1_right.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.2_right.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.3_right.png')))]\n\nplayer2_img = [pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_2.1.png')))]\n\nplayer3_img = [pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))), pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png'))),\n pygame.image.load(resource_path(os.path.join('pigs', 'personage_1.png')))]\n\n# Jump\nisJump = False\njumpCount = 11\n\nleft = False\nright = False\nanimCount = 0\nlastMove = \"right\"\n# sound_jump = pygame.mixer.Sound('./sound/jump.mp3')\n\n# Barriers\n# pc_width = 20\npc_height = 70\npc_x = display_width - 50\npc_y = display_height - pc_height - 100\npc_img = [pygame.image.load(resource_path('./pigs/pc.png')), pygame.image.load(resource_path('./pigs/tf.png'))]\npc_options = [32, 449, 20, 469]\n\nscores = 0\nmax_scores = 0\nmax_above = 0\n\n\nbutton_sound = pygame.mixer.Sound(resource_path(os.path.join('sound', 'button.wav')))\n\ndisplay = pygame.display.set_mode((display_width, display_height))\n\npygame.display.set_caption(\"My game\")\npygame.display.set_caption(\"My game\")\n\n\nbutton_sound = pygame.mixer.Sound(resource_path(os.path.join('sound', 'button.wav')))\n\nsound_game = pygame.mixer.Sound(resource_path(os.path.join('sound', 'super-mario-saundtrek (online-audio-converter.com).wav')))\ndeath_sound = pygame.mixer.Sound(resource_path(os.path.join('sound', 'death.wav')))\n\n\n\nplayer_counter: int = 0\n\nclock = pygame.time.Clock()\n\npaused_game = False\n\n\nclass Button:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.inactive_color = (13, 162, 58)\n self.active_color = (23, 204, 58)\n\n def draw(self, x, y, message, action=None, font_size=30):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x < mouse[0] + self.width and y < mouse[1] < y + self.height:\n pygame.draw.rect(display, self.active_color, (x, y, self.width, self.height))\n\n if click[0] == 1:\n pygame.mixer.Sound.play(button_sound)\n pygame.time.delay(300)\n if action is not None:\n action()\n\n else:\n pygame.draw.rect(display, self.inactive_color, (x, y, self.width, self.height))\n\n print_text(message, x + 10, y + 10, font_size=font_size)\n\n\nclass Cactus:\n def __init__(self, x, y, width, image, speed):\n self.x = x\n self.y = y\n self.width = width\n self.image = image\n self.speed = speed\n\n def move(self):\n if self.x >= -self.width:\n display.blit(self.image, (self.x, self.y))\n # pygame.draw.rect(display, (224, 121, 31), (self.x, self.y, self.width, self.height))\n self.x -= self.speed\n return True\n\n else:\n self.x = display_width + 100 + random.randrange(-80, 60)\n return False\n\n def return_self(self, radius, y, width, image):\n self.x = radius\n self.y = y\n self.width = width\n self.image = image\n display.blit(self.image, (self.x, self.y))\n\n\ndef draw_player():\n global player_counter\n\n if player_counter == 20:\n player_counter = 0\n\n display.blit(player_img[player_counter // 10], (x, y))\n player_counter += 1\n\n\ndef print_text(message, x, y, font_color=(0, 0, 0), font_type='./pigs/20050.ttf', font_size=30):\n font_type = pygame.font.Font(font_type, font_size)\n text = font_type.render(message, True, font_color)\n display.blit(text, (x, y))\n\n\ndef pause():\n global paused_game\n paused = True\n counter = 0\n while paused:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n print_text('Paused! Press Enter to play agein, Esc to exit', 50, 300)\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_RETURN]:\n paused = False\n\n if keys[pygame.K_ESCAPE]:\n if counter >= 10:\n if paused_game == True:\n paused_game = False\n paused = False\n show_menu()\n\n pygame.display.update()\n clock.tick(15)\n counter = counter + 1\n\n\ndef create_cactus_arr(array):\n choice = random.randrange(0, 2)\n img = pc_img[choice]\n width = pc_options[choice * 2]\n height = pc_options[choice * 2 + 1]\n array.append(Cactus(display_width + 20, height, width, img, 13))\n\n choice = random.randrange(0, 2)\n img = pc_img[choice]\n width = pc_options[choice * 2]\n height = pc_options[choice * 2 + 1]\n array.append(Cactus(display_width + 300, height, width, img, 13))\n\n choice = random.randrange(0, 2)\n img = pc_img[choice]\n width = pc_options[choice * 2]\n height = pc_options[choice * 2 + 1]\n array.append(Cactus(display_width + 700, height, width, img, 13))\n\n\ndef find_radius(array):\n maximum = max(array[0].x, array[1].x)\n\n if maximum < display_width:\n radius = display_width\n if radius - maximum < 50:\n radius += 280\n else:\n radius = maximum\n\n choice = random.randrange(0, 5)\n if choice == 0:\n radius += random.randrange(50, 150)\n else:\n radius += random.randrange(250, 400)\n\n return radius\n\n\ndef draw_array(array):\n for cactus in array:\n check = cactus.move()\n if not check:\n radius = find_radius(array)\n\n choice = random.randrange(0, 2)\n image = pc_img[choice]\n width = pc_options[choice * 2]\n height = pc_options[choice * 2 + 1]\n\n cactus.return_self(radius, height, width, image)\n\n\ndef check_collision(barrier):\n for Cactus in barrier:\n if y + width >= Cactus.y:\n if Cactus.x - 70 <= x <= Cactus.x + Cactus.width:\n game_over()\n elif Cactus.x <= x + width <= Cactus.x + Cactus.width:\n return False\n\n\ndef count_scores(barriers):\n global scores, max_above\n above_pc = 0\n\n if -10 <= jumpCount < 7:\n for Cactus in barriers:\n if y + height - 5 <= Cactus.y:\n if Cactus.x <= x <= Cactus.x + Cactus.width:\n above_pc += 1\n elif Cactus.x <= x + width <= Cactus.x + Cactus.width:\n above_pc += 1\n\n max_above = max(max_above, above_pc)\n\n else:\n if jumpCount == -11:\n scores += max_above\n max_above = 0\n\n\n# def bg_change():\n# if scores >= 15:\n# display.blit(bg_2, (0, 0))\n#\n# pygame.display.update()\n\n\n\n\ndef game_over():\n global scores, max_scores\n if scores > max_scores:\n max_scores = scores\n\n stopped = True\n display.blit(bg, (0, 0))\n pygame.mixer.Sound.stop(death_sound)\n pygame.mixer.Sound.stop(sound_game)\n pygame.mixer.Sound.play(death_sound)\n while stopped:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n print_text('Game OVER! Press Enter to play agein, Esc to exit', 50, 300)\n print_text('Max scores: ' + str(max_scores), 300, 350)\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_RETURN]:\n pygame.mixer.Sound.stop(death_sound)\n pygame.mixer.Sound.stop(sound_game)\n startgame1()\n\n if keys[pygame.K_ESCAPE]:\n pygame.mixer.Sound.stop(death_sound)\n pygame.mixer.Sound.stop(sound_game)\n show_menu()\n\n pygame.display.update()\n clock.tick(15)\n\n\ndef waitThree():\n counter = 0\n display.blit(bg, (0, 0))\n while True:\n print_text('3', 400, 300, (255, 255, 255), font_size=50)\n\n if counter > 500 and counter < 1000:\n display.blit(bg, (0, 0))\n print_text('2', 400, 300, (255, 255, 255), font_size=50)\n\n elif counter > 1500 and counter < 2000:\n display.blit(bg, (0, 0))\n print_text('1', 400, 300, (255, 255, 255), font_size=50)\n\n elif counter > 2500 and counter < 3000:\n break\n counter = counter + 10\n\n pygame.display.update()\n\n\ndef question_1():\n global scores\n if scores == 5 and 6:\n\n print_text('СТОП! Ты не побежишь дальше, пока не ответишь на вопрос!', 20, 50, (255, 255, 255))\n print_text('В чем чаще всег�� измеряется кол-во информации?', 50, 90, (255, 255, 255))\n print_text('A. Литры', 50, 130, (255, 255, 255))\n print_text('B. Байты', 50, 170, (255, 255, 255))\n print_text('C. Метры', 50, 210, (255, 255, 255))\n\n stopped_game = True\n while stopped_game:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_b]:\n waitThree()\n scores += 1\n stopped_game = False\n elif keys[pygame.K_a]:\n game_over()\n elif keys[pygame.K_c]:\n game_over()\n\n pygame.display.update()\n\n\ndef question_2():\n global scores\n if scores == 10:\n print_text(\"Стоп еще один вопрос!\", 50, 50, (255, 255, 255))\n print_text(\"Сколько мегабайт в одном гигобайте?\", 50, 90, (23, 32, 42))\n print_text('A. 100', 50, 130, (255, 255, 255))\n print_text('B.10000', 50, 170, (255, 255, 255))\n print_text('C. 1000', 50, 210, (255, 255, 255))\n\n stopped_game = True\n while stopped_game:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_c]:\n waitThree()\n scores += 1\n stopped_game = False\n elif keys[pygame.K_a]:\n game_over()\n elif keys[pygame.K_b]:\n game_over()\n\n pygame.display.update()\n\n\ndef question_3():\n global scores\n\n if scores == 15:\n print_text(\"Стоп еще один вопрос!\", 50, 50, (255, 255, 255))\n print_text(\"Посоянная память - это?\", 50, 90, (255, 255, 255))\n print_text('A. BIOS', 50, 130, (23, 32, 42))\n print_text('B. SSD', 50, 170, (23, 32, 42))\n print_text('C. HDD', 50, 210, (23, 32, 42))\n\n stopped_game = True\n while stopped_game:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a]:\n waitThree()\n scores += 1\n stopped_game = False\n elif keys[pygame.K_b]:\n game_over()\n elif keys[pygame.K_c]:\n game_over()\n\n pygame.display.update()\n\n\n\ndef startgame1():\n global run, isJump, x, jumpCount, y, button_sound, button_sound, display, player_counter, \\\n animCount, paused_game, scores, above_pc, game, max_above, sound_jump, sound_game\n\n game = True\n cactus_arr = []\n create_cactus_arr(cactus_arr)\n scores = 0\n speed = 13\n\n\n pygame.mixer.Sound.play(sound_game)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n while run:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n sys.exit()\n\n keys = pygame.key.get_pressed()\n\n if not isJump:\n if keys[pygame.K_SPACE]:\n\n # pygame.time.delay(50)\n isJump = True\n\n else:\n if jumpCount >= -11:\n if jumpCount < 0:\n y += (jumpCount ** 2) / 2\n else:\n y -= (jumpCount ** 2) / 2\n jumpCount -= 1\n else:\n isJump = False\n jumpCount = 11\n\n if keys[pygame.K_ESCAPE]:\n paused_game = True\n pause()\n\n drawWindow()\n\n\n\n check_collision(cactus_arr)\n\n draw_array(cactus_arr)\n\n count_scores(cactus_arr)\n\n if scores % 10 == 0:\n if scores != 0:\n speed = speed + 0.05\n for cactus in cactus_arr:\n cactus.speed = round(speed, 1)\n\n print_text('Scores: ' + str(scores), 600, 20, (23, 32, 42))\n\n if check_collision(cactus_arr):\n game = False\n\n question_1()\n question_2()\n question_3()\n\n # bg_change()\n\n pygame.display.update()\n clock.tick(30)\n\n return game_over()\n\n\ndef show_menu():\n menu_background = pygame.image.load('./pigs/menu.jpg')\n\n\n start_btn = Button(180, 70)\n quit_btn = Button(180, 70)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n sys.exit(1)\n\n show = True\n while show:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n sys.exit(1)\n\n display.blit(menu_background, (0, 0))\n start_btn.draw(290, 290, \"start\", startgame1, 50)\n quit_btn.draw(290, 390, \"Quit\", quit, 50)\n\n pygame.display.update()\n clock.tick(80)\n\n\ndef drawWindow():\n global animCount\n display.blit(bg, (0, 0))\n\n if animCount + 1 >= 30:\n animCount = 0\n\n draw_player()\n\n pygame.display.update()\n\n\nrun = True\nbullets = []\n\nshow_menu()\n\npygame.quit()\nquit()\n","sub_path":"pers.py","file_name":"pers.py","file_ext":"py","file_size_in_byte":16308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187092417","text":"#!/usr/bin/python -tt\n#**********************\n#* Author: Jigar S. Rudani\n#* Progam Name: Dictionary for Vocab.txt\n#* Current file: rudani2_apriori.py\n#* Version: 1.0\n#* \n#***********************\nimport sys\nimport math\nimport re\nimport os\nfrom collections import defaultdict\nfrom collections import Counter\nfrom itertools import combinations\nfrom operator import itemgetter\n\n# Define a parseInputfile() function which parsed the Vocab file and get the Output as Key-Value Pair.\ndef parseInputfile(inputFilename):\n dictVocab = {}\n filePointer = open(inputFilename,\"r\")\n inputLine = filePointer.read().splitlines()\n for keyValue in inputLine:\n key = keyValue.split('\\t')[0]\n dictVocab[key] = keyValue.split('\\t')[1]\n filePointer.close()\n return dictVocab\n\n# Define a parseTopicFile() function which parsed the TopicFile and get the Frequent Pattern out of each Topic.\ndef parseTopicFile(inputFilename,min_support_count):\n\n print(\"Topic file Name\", inputFilename)\n transactionList = list()\n filePointer = open(inputFilename,\"r\")\n inputLine = filePointer.read().splitlines()\n for value in inputLine:\n transactions = frozenset([int(number) for number in value.rstrip().split(' ')])\n transactionList.append(transactions)\n listLen = len(transactionList)\n print(\"Item->Frequency\",listLen)\n min_support = func_Mul(min_support_count,listLen)\n print(\"Item->Frequency\",min_support)\n return transactionList,min_support,listLen\n\n# Define a func_Mul() function which performs multiplication.\ndef func_Mul(min_support_count,listLen):\n a = (min_support_count * listLen)\n return int(a)\n\n# Define a func_Powerset() function which produces all subsets for a given set.\ndef func_Powerset(iterable,itemSize):\n tempSet = list(iterable) \n return list(combinations((tempSet), itemSize)) \n\n# Define a func_PruneItems() function which performs pruning of ItemSet.\ndef func_PruneItems(transactionList,itemSet,listItemwithFreq,min_support):\n\n tempItemSet = set()\n itemsDict = defaultdict(int)\n\n for itm in itemSet: \n for trans in transactionList:\n if(itm.issubset(trans)):\n itemsDict[itm] += 1\n\n for itm,freq in itemsDict.items():\n if(freq >= min_support):\n tempItemSet.add(itm)\n listItemwithFreq[itm] = freq\n \n return tempItemSet,listItemwithFreq\n\n# Define a func_JoinItems() function which performs self join operation\ndef func_JoinItems(itemSet,tempItemSet,itemSize):\n\n tempSet = set()\n for item_1 in itemSet:\n for item_2 in itemSet:\n if (item_1 != item_2):\n if (len(item_1.intersection(item_2)) == (itemSize - 2)):\n pwrSet = func_Powerset(item_1.union(item_2),itemSize - 1)\n for eachSubset in pwrSet:\n chkPwrSet = set(eachSubset).difference(set([]))\n if (chkPwrSet in tempItemSet and chkPwrSet not in item_1 and chkPwrSet not in item_2):\n tempSet.add(item_1.union(item_2)) \n return tempSet\n\n# Define a funcAprioriFreqPattern() function which parsed the Transaction and get the Frequent Patterns out of each Topic.\ndef funcAprioriFreqPattern(transactionList,min_support):\n\n itemSet = set()\n listItemwithFreq = defaultdict(int)\n itemSize = 2\n tempItemSet = set()\n tempCandidateItem = set()\n \n for trans in transactionList:\n for items in trans:\n itemSet.add(frozenset([items]))\n \n #print \"\\n<-- C1 Candidate Set -->\\n\"\n #func_DisplayCandidate(itemSet)\n LItemSet,listItemwithFreq = func_PruneItems(transactionList,itemSet,listItemwithFreq,min_support)\n #print \"\\n<-- L1 ItemSet -->\\n\"\n #func_DisplayListItemFreq(LItemSet,listItemwithFreq,itemSize - 1)\n while (LItemSet != set([])):\n tempItemSet = LItemSet\n tempCandidateItem = func_JoinItems(LItemSet,tempItemSet,itemSize)\n #print \"\\n<-- C(%d) Candidate Set -->\\n\" % itemSize\n #func_DisplayCandidate(tempCandidateItem)\n LItemSet,listItemwithFreq = func_PruneItems(transactionList,tempCandidateItem,listItemwithFreq,min_support)\n #print \"<-- L(%d) ItemSet -->\\n\" % (itemSize)\n #func_DisplayListItemFreq(LItemSet,listItemwithFreq,itemSize)\n itemSize += 1\n return tempItemSet,listItemwithFreq\n\n# Define a func_DisplayListItemFreq() function which displays the Frequent Item Sets.\ndef func_DisplayListItemFreq(LItemSet,listItemwithFreq,itemSize):\n if (LItemSet != set([])):\n print(\"<-- L(%d) ItemSet -->\\n\" % (itemSize))\n for item in LItemSet:\n for itm,freq in listItemwithFreq.items():\n if (list(item) == list(itm)):\n print(itm,freq)\n print(\"\\n\")\n\n# Define a func_DisplayCandidate() function which displays the Candidate Item Sets.\ndef func_DisplayCandidate(tempCandidateItem):\n if (tempCandidateItem != set([])):\n for candidateItem in tempCandidateItem:\n print(candidateItem)\n print(\"\\n\")\n\n# Define a func_FormatFreqPattern() function which formats the Frequent Pattern [s] (space) [t1 (space) t2 (space) t3 (space) ...]\ndef func_FormatFreqPattern(listItemwithFreq,dt,indx,outputFreqPatternName,dictVocab):\n\n tempList = []\n patternDigitObj = re.compile('\\d+')\n for items,freq in listItemwithFreq.items():\n tempList.append([freq,items])\n sorted_items= sorted(tempList,key=itemgetter(0),reverse=True)\n tempList = []\n\n for listItems in sorted_items:\n tempList.append([listItems[0],patternDigitObj.findall(str(listItems[1]))])\n func_redirecttoOutputFolder(tempList,dt,indx,outputFreqPatternName,dictVocab)\n\n# Define a func_ClosedMaxPattern() function to find Closed and Maximal Pattern\ndef func_ClosedMaxPattern(listItemwithFreq,dt,indx,outputClosedPatternName,outputMaxPatternName,dictVocab):\n\n closedPattern = []\n maxPattern = []\n tempList = []\n isAppendClosedPattern = None\n isAppendMaxPattern = None\n patternDigitObj = re.compile('\\d+')\n for items,freq in listItemwithFreq.items():\n tempList.append([freq,items])\n sorted_items= sorted(tempList,key=itemgetter(0),reverse=True)\n\n for listItems_1 in sorted_items:\n isAppendClosedPattern = True\n isAppendMaxPattern = True\n for listItems_2 in sorted_items:\n if (listItems_1[1] not in listItems_2[1]):\n if (listItems_2[1] > listItems_1[1]):\n isAppendMaxPattern = False\n if (listItems_2[0] == listItems_1[0]):\n isAppendClosedPattern = False\n if (isAppendClosedPattern):\n closedPattern.append([listItems_1[0],patternDigitObj.findall(str(listItems_1[1]))])\n if (isAppendMaxPattern):\n maxPattern.append([listItems_1[0],patternDigitObj.findall(str(listItems_1[1]))])\n func_redirecttoOutputFolder(closedPattern,dt,indx,outputClosedPatternName,dictVocab)\n func_redirecttoOutputFolder(maxPattern,dt,indx,outputMaxPatternName,dictVocab)\n \n# Define a func_Purity() function which purify the frequent pattern\ndef func_Purity(allTopicFreqPattern,indx,dtList,outputPurityFileName,dictVocab):\n\n purityDict = {}\n d_t = dtList[indx][0]\n maxLogTerm = 0.0\n pattern2bsearch = []\n f_t_p = 0.0\n topicIndex = 0\n f_tdash_p = 0.0\n d_t_tdashdict = []\n d_t_tdash = 1.0\n tempMaxLogTerm = 0.0\n purity = 0.0\n tempTopicPurityList = []\n sortbysupport = []\n finalpuritylist = []\n puritylist = []\n isAppend = False\n purtiy_support_combine = 0.0\n\n for items in allTopicFreqPattern:\n maxLogTerm = 0.0\n firstfactor = 0.0\n purity = 0.0\n if (items[0] == indx):\n pattern2bsearch = items[2]\n f_t_p = items[1]\n for allItems in allTopicFreqPattern:\n if (allItems[0] != indx):\n topicIndex = allItems[0]\n if ((len(frozenset(pattern2bsearch).intersection(frozenset(allItems[2])))) == len(pattern2bsearch)):\n f_tdash_p = allItems[1]\n else:\n f_tdash_p = 0.0\n d_t_tdashdict = dtList[indx][1]\n for value in d_t_tdashdict:\n for key,val in value.items():\n if(key == topicIndex):\n d_t_tdash = val\n break\n tempMaxLogTerm = float((float(f_t_p) + float(f_tdash_p))/float(d_t_tdash))\n if(float(tempMaxLogTerm) > float(maxLogTerm)):\n maxLogTerm = float(tempMaxLogTerm)\n firstfactor = float(f_t_p)/float(d_t)\n purity = math.log(float(firstfactor)) - math.log(float(maxLogTerm))\n purtiy_support_combine = (float(purity) * math.log10(float(f_t_p)))\n tempTopicPurityList.append([round(purity,4),purtiy_support_combine,items[2]])\n #puritylist.append(round(purity,4))\n #purityDict = Counter(puritylist)\n #for key in purityDict.keys():\n # sortbysupport = []\n # for (index, tlist) in enumerate(tempTopicPurityList):\n # if index < len(tempTopicPurityList) - 1:\n # current = tlist\n # if(key == current[0]):\n # sortbysupport.append(current)\n # elif(key == tlist[0]):\n # sortbysupport.append(tlist)\n tempTopicPurityList = sorted(tempTopicPurityList, key=itemgetter(1), reverse=True)\n for items in tempTopicPurityList:\n finalpuritylist.append([items[0],items[2]])\n #finalpuritylist = sorted(finalpuritylist,reverse=True)\n func_redirecttoOutputFolder(finalpuritylist,dtList,indx,outputPurityFileName,dictVocab)\n\n# Define a func_Completeness() function which finds the completeness of the pattern.\ndef func_Completeness(listItemwithFreq,dt,indx,outputCompletenessFileName,dictVocab):\n\n completenessList = []\n patternDigitObj = re.compile('\\d+')\n tempsortbysupportList = []\n completenessDict = {}\n tempcompletenessList = []\n sortbysupport = []\n\n for item1,freq1 in listItemwithFreq.items():\n f_t_p = freq1\n freqList = []\n for item2,freq2 in listItemwithFreq.items():\n if (item1 not in item2):\n if (item2 > item1):\n freqList.append(freq2)\n if (len(freqList) == 0):\n maxfreq = 0\n else:\n maxfreq = max(freqList)\n result = 1 - (maxfreq/f_t_p)\n tempcompletenessList.append([float(result), f_t_p, patternDigitObj.findall(str(item1))])\n tempsortbysupportList.append(float(result))\n tempsortbysupportList = sorted(tempsortbysupportList,reverse=True)\n #print(\"tempsortbysupportList\",tempsortbysupportList)\n completenessDict = Counter(tempsortbysupportList)\n #print(completenessDict)\n for key in completenessDict.keys():\n sortbysupport = []\n for (index, tlist) in enumerate(tempcompletenessList):\n if index < len(tempcompletenessList) - 1:\n current = tlist\n if(key == current[0]):\n sortbysupport.append(current)\n elif(key == tlist[0]):\n sortbysupport.append(tlist)\n sortbysupport = sorted(sortbysupport,key=itemgetter(1),reverse=True)\n #print(sortbysupport)\n for items in sortbysupport:\n completenessList.append([round(items[0],4),items[2]])\n #print(completenessList)\n completenessList = sorted(completenessList,key=itemgetter(0),reverse=True)\n #print(completenessList)\n func_redirecttoOutputFolder(completenessList,dt,indx,outputCompletenessFileName,dictVocab)\n\n# Define a func_Phraseness() function which re-rank the frequent pattern based on phraseness.\ndef func_Phraseness(allTopicFreqPattern,indx,dtList,outputPhrasenessFileName,dictVocab):\n\n d_t = dtList[indx][0]\n f_t_w = 0.0\n firstfactor = 0.0\n secondfactor = 0.0\n sumsecondfactor = 0.0\n result = 0.0\n f_t_p = 0.0\n f_t_w = 0.0\n phraseness = []\n tempphraseness = []\n sortbysupport = []\n phrasenessDict = {}\n for items in allTopicFreqPattern:\n firstfactor = 0.0\n if (items[0] == indx):\n pattern2bsearch = items[2]\n f_t_p = items[1]\n for word in pattern2bsearch:\n word2bsearch = [word]\n for trans in allTopicFreqPattern:\n if (trans[0] == indx):\n if ((len(frozenset(trans[2]).difference(frozenset(word2bsearch)))) == 0):\n f_t_w = trans[1]\n secondfactor = math.log((float(f_t_w)/float(d_t)))\n sumsecondfactor = float(float(sumsecondfactor) + float(secondfactor))\n firstfactor = math.log((float(f_t_p)/float(d_t)))\n result = round(float(firstfactor) - float(sumsecondfactor),4)\n secondfactor = 0.0\n sumsecondfactor = 0.0\n tempphraseness.append([float(result), f_t_p, items[2]])\n sortbysupport.append(float(result))\n phrasenessDict = Counter(sortbysupport)\n for key in phrasenessDict.keys():\n sortbysupport = []\n for (index, tlist) in enumerate(tempphraseness):\n if index < len(tempphraseness) - 1:\n current = tlist\n if(key == current[0]):\n sortbysupport.append(current)\n elif(key == tlist[0]):\n sortbysupport.append(tlist)\n sortbysupport = sorted(sortbysupport, key=itemgetter(1), reverse=True)\n for items in sortbysupport:\n phraseness.append([items[0], items[2]])\n phraseness = sorted(phraseness,key = itemgetter(0),reverse=True)\n func_redirecttoOutputFolder(phraseness, dtList, indx, outputPhrasenessFileName, dictVocab)\n\n# Define a func_redirecttoOutputFolder() function which redirects the output to respective folder\ndef func_redirecttoOutputFolder(listItemwithFreq,dt,indx,outputFileName,dictVocab):\n\n phrase = \"phrase\"\n tempVocabList = []\n listlength = dt[indx][0]\n\n if(re.search(r'\\bpattern\\b',outputFileName)):\n filePath = os.getcwd()+\"/Pattern/\"+outputFileName\n filePathPhrase = os.getcwd()+\"/Pattern/\"+outputFileName+phrase\n dirPath = os.getcwd()+\"/Pattern/\"\n elif(re.search(r'\\bclosed\\b',outputFileName)):\n filePath = os.getcwd()+\"/Closed/\"+outputFileName\n filePathPhrase = os.getcwd()+\"/Closed/\"+outputFileName+phrase\n dirPath = os.getcwd()+\"/Closed/\"\n elif(re.search(r'\\bmax\\b',outputFileName)):\n filePath = os.getcwd()+\"/Max/\"+outputFileName\n filePathPhrase = os.getcwd()+\"/Max/\"+outputFileName+phrase\n dirPath = os.getcwd()+\"/Max/\"\n elif(re.search(r'\\bpurity\\b',outputFileName)):\n filePath = os.getcwd()+\"/Purity/\"+outputFileName\n filePathPhrase = os.getcwd()+\"/Purity/\"+outputFileName+phrase\n dirPath = os.getcwd()+\"/Purity/\"\n elif(re.search(r'\\bpurityphraseness\\b',outputFileName)):\n filePath = os.getcwd()+\"/PhrasenessCompleteness/\"+outputFileName\n filePathPhrase = os.getcwd()+\"/PhrasenessCompleteness/\"+outputFileName+phrase\n dirPath = os.getcwd()+\"/PhrasenessCompleteness/\"\n elif(re.search(r'\\bcompleteness\\b',outputFileName)):\n filePath = os.getcwd()+\"/PhrasenessCompleteness/\"+outputFileName\n filePathPhrase = os.getcwd()+\"/PhrasenessCompleteness/\"+outputFileName+phrase\n dirPath = os.getcwd()+\"/PhrasenessCompleteness/\"\n else:\n print(\"Not a valid expected file name\\n\")\n print(\"Expected either 1. pattern-0.txt 2. closed-0.txt 3. max-0.txt\\n\")\n \n if not os.path.exists(os.path.dirname(dirPath)):\n os.makedirs(dirPath)\n if((re.search(r'\\bpurity\\b',outputFileName)) or (re.search(r'\\bpurityphraseness\\b',outputFileName)) or (re.search(r'\\bcompleteness\\b',outputFileName))):\n with open(filePath, \"w+\") as f:\n for listItems in listItemwithFreq:\n f.write(\"%s\\n\" % listItems)\n\n with open(filePathPhrase, \"w+\") as f:\n for listItems in listItemwithFreq:\n tempVocabList = []\n for listSet in listItems[1]:\n tempVocabList.append(dictVocab[listSet])\n f.write(\"%s %s\\n\" % (listItems[0],tempVocabList))\n else:\n with open(filePath, \"w+\") as f:\n for listItems in listItemwithFreq:\n f.write(\"%s %s\\n\" % (round(listItems[0]/listlength,4),listItems[1]))\n\n with open(filePathPhrase, \"w+\") as f:\n for listItems in listItemwithFreq:\n tempVocabList = []\n for listSet in listItems[1]:\n tempVocabList.append(dictVocab[listSet])\n f.write(\"%s %s\\n\" % (round(listItems[0]/listlength,4),tempVocabList))\n\ndef main():\n\n allTopicFreqPattern = []\n patternDigitObj = re.compile('\\d+')\n if len(sys.argv) <= 2:\n progName = sys.argv[0]\n print('\\nusage: [%s] Vocab File Name Min_Support\\n' % (progName))\n print('Vocab FileName: File in which mapping of number to word is stored')\n print('Min_Support: Minimum Support count required to find Frequent Pattern\\n')\n exit\n else:\n inputFilename = sys.argv[1]\n min_support_count = sys.argv[2]\n print('InputFileName: ',inputFilename)\n dictVocab = parseInputfile(inputFilename)\n #Parse the topic0~4.txt\n fileIndex = [0,1,2,3,4]\n dt = [\n [10047,[{1:17326},{2:17988},{3:17999},{4:17820}]],\n [9674,[{0:17326},{2:17446},{3:17902},{4:17486}]],\n [9959,[{0:17988},{1:17446},{3:18077},{4:17492}]],\n [10161,[{0:17999},{1:17902},{2:18077},{4:17912}]],\n [9845,[{0:17820},{1:17486},{2:17492},{3:17912}]]\n ]\n for indx in fileIndex:\n transactionList,min_support,listLen = parseTopicFile('topic-'+str(indx)+'.txt',float(min_support_count))\n freqItemSet,listItemwithFreq = funcAprioriFreqPattern(transactionList,min_support)\n outputFreqPatternName = 'pattern-'+str(indx)+'.txt'\n outputClosedPatternName = 'closed-'+str(indx)+'.txt'\n outputMaxPatternName = 'max-'+str(indx)+'.txt'\n outputCompletenessFileName = 'completeness-'+str(indx)+'.txt'\n \n for key,value in listItemwithFreq.items(): \n allTopicFreqPattern.append([indx,value,patternDigitObj.findall(str(key))])\n\n func_FormatFreqPattern(listItemwithFreq,dt,indx,outputFreqPatternName,dictVocab)\n func_ClosedMaxPattern(listItemwithFreq,dt,indx,outputClosedPatternName,outputMaxPatternName,dictVocab)\n func_Completeness(listItemwithFreq,dt,indx,outputCompletenessFileName,dictVocab)\n\n Index = [0,1,2,3,4]\n for indx in Index:\n outputPurityFileName = 'purity-'+str(indx)+'.txt'\n outputPhrasenessFileName = 'purityphraseness-'+str(indx)+'.txt'\n func_Purity(allTopicFreqPattern,indx,dt,outputPurityFileName,dictVocab)\n func_Phraseness(allTopicFreqPattern,indx,dt,outputPhrasenessFileName,dictVocab)\n\n# This is the standard boilerplate that calls the main() function.\nif __name__ == '__main__':\n main()","sub_path":"rudani2_apriori.py","file_name":"rudani2_apriori.py","file_ext":"py","file_size_in_byte":19219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"606096578","text":"import numpy as np\n\nclass SVT():\n def __init__(self, M, mask):\n self.M = M\n self.mask = mask\n self.tau = 5 * np.sum(self.M.shape) / 2\n self.delta = 1.2 * np.prod(self.M.shape) / np.sum(self.mask)\n self.Y = np.zeros_like(self.M)\n\n def execute(self, max_iterations):\n for k in range(max_iterations):\n U, S, V = np.linalg.svd(self.Y, full_matrices=False)\n\n S = np.maximum(S - self.tau, 0)\n\n X = np.linalg.multi_dot([U, np.diag(S), V])\n self.Y += self.delta * self.mask * (self.M - X)\n\n recon_error = np.linalg.norm(self.mask * (X - self.M)) / np.linalg.norm(self.mask * self.M)\n print(\"[%d/%d] reconstruction_error : %.4f\"%(k+1, max_iterations, recon_error))\n\n return X","sub_path":"mat_comp.py","file_name":"mat_comp.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338569467","text":"from compas.datastructures import Mesh\nfrom compas.geometry import Translation, Scale, Point\n\ntetra = Mesh.from_polyhedron(4)\nhexa = Mesh.from_polyhedron(6)\nocta = Mesh.from_polyhedron(8)\ndodeca = Mesh.from_polyhedron(12)\nicosa = Mesh.from_polyhedron(20)\n\no = Point(0, 0, 0)\n\nT = Translation.from_vector([2.5, 0, 0])\n\np = Point(* tetra.vertex_coordinates(tetra.get_any_vertex()))\ns = 1 / (p - o).length\nS = Scale.from_factors([s, s, s])\n\ntetra.transform(S)\ntetra.dual()\n\np = Point(* hexa.vertex_coordinates(hexa.get_any_vertex()))\ns = 1 / (p - o).length\nS = Scale.from_factors([s, s, s])\n\nhexa.transform(T * S)\nhexa.dual()\n\np = Point(* octa.vertex_coordinates(octa.get_any_vertex()))\ns = 1 / (p - o).length\nS = Scale.from_factors([s, s, s])\n\nocta.transform(T * T * S)\nocta.dual()\n\np = Point(* dodeca.vertex_coordinates(dodeca.get_any_vertex()))\ns = 1 / (p - o).length\nS = Scale.from_factors([s, s, s])\n\ndodeca.transform(T * T * T * S)\ndodeca.dual()\n\np = Point(* icosa.vertex_coordinates(icosa.get_any_vertex()))\ns = 1 / (p - o).length\nS = Scale.from_factors([s, s, s])\n\nicosa.transform(T * T * T * T * S)\nicosa.dual()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122857491","text":"\nclass Element:\n\ttag='html'\n\tattributes=''\n\tindentation_spaces=' '\n\tdef __init__(self, content=None):\n\t\tself.content=[]\n\t\tif content is not None:\n\t\t\tself.content.append(content)\n\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself.content=[]\n\t\tif args is not None:\n\t\t\tfor i in args:\n\t\t\t\tself.content.append(i)\n\n\t\tif kwargs is not None and isinstance(kwargs, dict):\n\t\t\t\n\t\t\tfor n in kwargs:\n\t\t\t\tif len(self.attributes) > 0:\n\t\t\t\t\tself.attributes+= \", \"\n\t\t\t\telse:\n\t\t\t\t\tself.attributes=\" \"# start with a space\n\t\t\t\tself.attributes += \"{}=\\\"{}\\\"\".format(n, kwargs[n])\n\n\tdef append(self,content):\n\t\tself.content.append(content)\n\n\tdef render(self, f, ind=\"\"):\n\t\tstart_tag = \"{}<{}{}>{}\".format(ind, self.tag, self.attributes, '')\n\t\tf.write(start_tag)\n\n\t\tfor el in self.content:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tif isinstance(el, Element):\n\t\t\t\t\tf.write('\\n')\n\t\t\t\t# if this is an Element object, this method will succeed\n\t\t\t\tel.render(f, ind+self.indentation_spaces)\n\t\t\texcept AttributeError:\n\t\t\t\tf.write(\"{}{}{}\".format('\\n', ind+self.indentation_spaces, str(el)))\n\n\t\t# for s in self.content:\n\t\t# \tf.write(s)\n\t\t#f.write(\" \".join(self.content))\n\n\t\tend_tag = \"{}{}\".format('\\n', ind, self.tag)\n\t\tf.write(end_tag)\n\nclass Body(Element):\n\ttag='body'\n\nclass P(Element):\n\ttag='p'\n\nclass Html(Element):\n\ttag='html'\n\nclass Head(Element):\n\ttag='head'\n\nclass OneLineTag(Element):\n\ttag='one-line-tag'\n\n\tdef render(self, f, ind=\"\"):\n\n\t\tif len(self.content) == 0:\n\t\t\tstart_tag = \"{}<{} \".format(ind, self.tag)\n\t\telse:\n\t\t\tstart_tag = \"{}<{}{}>{}\".format(ind, self.tag, self.attributes, '')\n\n\t\tf.write(start_tag)\n\t\tfor el in self.content:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tif isinstance(el, Element):\n\t\t\t\t\tf.write('')\n\t\t\t\t# if this is an Element object, this method will succeed\n\t\t\t\tel.render(f, ind+self.indentation_spaces)\n\t\t\texcept AttributeError:\n\t\t\t\tf.write(\"{}\".format(str(el)))\n\n\t\t# for s in self.content:\n\t\t# \tf.write(s)\n\t\t#f.write(\" \".join(self.content))\n\t\tif len(self.content) == 0:\n\t\t\tend_tag = \"/>\"\n\t\telse:\n\t\t\tend_tag = \"{}{}\".format('', '', self.tag)\n\n\t\tf.write(end_tag)\n\nclass Title(OneLineTag):\n\ttag = 'title'\n\nclass Hr(OneLineTag):\n\ttag = 'hr'\n\nclass H(OneLineTag):\n\ttag=''\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself.content=[]\n\t\tassert len(args) == 2, \"Expected two arguments for H tag\"\n\n\t\tself.content.append(args[1])\n\t\tself.tag = \"h{}\".format(str(args[0]))\n\nclass A(OneLineTag):\n\ttag='a'\n\n\tdef __init__(self, *args, **kwargs):\n\t\tself.content=[]\n\t\tassert len(args) == 2, \"Expected two arguments for A tag\"\n\n\t\tself.content.append(args[1])\n\t\tself.attributes = \" {}=\\\"{}\\\"\".format('href', args[0])\n\n\n\n\n","sub_path":"students/ericrosko/session07/html_render.py","file_name":"html_render.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624107942","text":"import logging\nimport os\nimport time\n\nimport lasagne\nfrom lasagne.utils import floatX\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom tqdm import tqdm\n\nfrom .data import FileSystemData\nfrom .util import gpu_free_mem\n\nlogger = logging.getLogger(__name__)\n\n\nclass Solver(object):\n\n def __init__(self, max_iter, batch_size, iter_size, base_lr):\n self.max_iter = max_iter\n self.batch_size = batch_size\n self.iter_size = iter_size\n self.base_lr = base_lr\n\n def train(self, Xs, Ys, Xv, Yv, net):\n raise NotImplementedError\n\n\nclass SGDSolver(Solver):\n\n # TODO apply updates only at iter_size\n # TODO track updates:weights ratios\n\n def __init__(self, max_iter, batch_size, iter_size, base_lr):\n super(SGDSolver, self).__init__(max_iter, batch_size, iter_size,\n base_lr)\n\n def train(self, Xs, Ys, Xv, Yv, mdl,\n data_folder='data/', out_folder='out/'):\n\n data_folder = os.path.join(data_folder, 'imgs/', 'train/')\n input_var = mdl.input_var\n net = mdl.get_output_layer()\n target_var = T.ivector('targets')\n\n prediction = lasagne.layers.get_output(net)\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\n loss = loss.mean()\n\n params = lasagne.layers.get_all_params(net, trainable=True)\n updates = lasagne.updates.adam(loss, params, learning_rate=self.base_lr)\n\n test_prediction = lasagne.layers.get_output(net, deterministic=True)\n test_loss = lasagne.objectives. \\\n categorical_crossentropy(test_prediction, target_var)\n test_loss = test_loss.mean()\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),\n dtype=theano.config.floatX)\n\n logger.info(\"Compiling network functions...\")\n train_fn = theano.function([input_var, target_var], loss, updates=updates)\n val_fn = theano.function([input_var, target_var], [test_loss, test_acc])\n predict_proba = theano.function([input_var], test_prediction)\n\n logger.info(\"Training...\")\n logger.info('GPU Free Mem: %.3f' % gpu_free_mem('gb'))\n\n # TODO change to steps\n epochs = self.max_iter / len(Xs)\n\n best_val_loss, best_epoch = None, None\n best_mdl_path = os.path.join(out_folder, 'best_model.npz')\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n\n for epoch in range(epochs):\n start_time = time.time()\n train_err, train_batches = 0, 0\n data_s = FileSystemData(Xs, Ys, data_folder, self.batch_size,\n infinite=False, augment=True, shuffle=True)\n for batch in tqdm(data_s, total=data_s.steps):\n inputs, targets = batch\n inputs = floatX(np.array([mdl.preprocess(x) for x in inputs]))\n train_err += train_fn(inputs, targets)\n train_batches += 1\n\n data_v = FileSystemData(Xv, Yv, data_folder, self.batch_size,\n infinite=False, augment=True, shuffle=True)\n val_err, val_acc, val_batches = 0, 0, 0\n for batch in tqdm(data_v, total=data_v.steps):\n inputs, targets = batch\n inputs = floatX(np.array([mdl.preprocess(x) for x in inputs]))\n err, acc = val_fn(inputs, targets)\n val_err += err\n val_acc += acc\n val_batches += 1\n\n train_loss = train_err / train_batches\n val_loss = val_err / val_batches\n val_acc = val_acc / val_batches * 100\n end_time = time.time() - start_time\n\n if not best_val_loss or val_loss < best_val_loss:\n best_val_loss = val_loss\n best_epoch = epoch\n np.savez(best_mdl_path,\n *lasagne.layers.get_all_param_values(net))\n snapshot_path = os.path.join(out_folder, 'snapshot_epoch_%d.npz'\n % epoch)\n np.savez(snapshot_path, *lasagne.layers.get_all_param_values(net))\n\n logger.info(\"epoch[%d] -- Ls: %.3f | Lv: %.3f | ACCv: %.3f | Ts: %.3f\"\n % (epoch, train_loss, val_loss, val_acc, end_time))\n\n logger.info(\"loading best model: epoch[%d]\" % best_epoch)\n with np.load(best_mdl_path) as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n lasagne.layers.set_all_param_values(net, param_values)\n\n return predict_proba\n\n def predict(self, Xt, pred_fn, mdl, batchsize=2, data_folder='data/'):\n data_folder = os.path.join(data_folder, 'imgs/', 'test/')\n logger.info('Predicting on test set...')\n pred = []\n data_t = FileSystemData(Xt, None, data_folder, batch_size=batchsize)\n for batch in tqdm(data_t, total=data_t.steps):\n inputs, _ = batch\n inputs = floatX(np.array([mdl.preprocess(x) for x in inputs]))\n pred.extend(pred_fn(inputs))\n pred = np.array(pred)\n logger.info('pred shape: (%d, %d)' % pred.shape)\n return pred\n","sub_path":"sfddd/sgd.py","file_name":"sgd.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623109961","text":"# coding=utf-8\nfrom conf.case_strategy import CaseStrategy\nfrom conf.devices import Devices\nfrom conf.appium_server import *\nfrom conf.run_cases import RunCases\nimport multiprocessing\nfrom conf.base_config import GetVariable as gv\n\n\nclass Drivers:\n # @staticmethod\n # def start_process_sync(process):\n # for x in process:\n # x.start()\n #\n # for x in process:\n # x.join()\n\n @staticmethod\n def run_cases():\n devices_info = Devices().get_devices()\n if not len(devices_info):\n print('there is no device connected this PC')\n else:\n pass\n\n start_args_list = []\n port_list = []\n item_count = len(gv.CASE_INFO) if len(gv.CASE_INFO) < len(devices_info) else len(devices_info)\n\n for x in range(item_count):\n port = 4723 + 2*x\n appium_server = AppiumServer(port)\n port_list.append(port)\n device_name = list(devices_info.keys())[x]\n device_version = devices_info[device_name]\n run = RunCases(device_name)\n appium_server.start_appium_server(run)\n cs = CaseStrategy()\n cases = cs.collect_cases(index=x, suite=True)\n start_args_list.append((device_name, device_version, port, run, cases))\n\n pool = multiprocessing.Pool(item_count)\n for x in start_args_list:\n pool.apply_async(Devices().appium_desired, args=x)\n pool.close()\n pool.join()\n\n for x in port_list:\n appium_server = AppiumServer(x)\n appium_server.release_port()\n\n\n","sub_path":"conf/drivers.py","file_name":"drivers.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638833766","text":"\"\"\"\nType hinting example.\n\"\"\"\n\ndef normalize_date(date_str: str, year: str = '2020') -> str:\n normalized_date: str = date_str + '/' + year\n return normalized_date\n\nmax: int = 100\ndate: str = '03/14'\nnew_date: str = normalize_date('03/14', '2021')\nprint(date + ' after normalizing is ' + new_date)\n","sub_path":"examples/ch02_control_structs/type_hints_fixed.py","file_name":"type_hints_fixed.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527911648","text":"# -*- coding: iso-8859-1 -*-\n#\n# $Id: disk_free_info.py,v 1.2 2013/02/05 15:58:59 xantgui Exp $\n#\n# Copyright (c) Ericsson Espaņa S.A., 2011.\n# All rights reserved.\n#\n# This product or document is proprietary to and embodies the\n# confidential technology of Ericsson Espaņa S.A.\n# Possession, use, duplication or distribution of this product\n# or document is authorized only pursuant to a valid written\n# license from Ericsson Espaņa S.A\n#\n\n\nfrom NSTpyfw.metrics.ssr.ssr_metrics_info import SsrMetricsInfo\n\n\"\"\"\nClass to get information about the Service Pools installed\n\"\"\"\nclass ShowReleaseServicePoolInfo(SsrMetricsInfo):\n \"\"\"\n Classes to get information about a specific service pool\n \"\"\"\n \n def __init__(self, ssh_connection=None, *args, **kargs):\n \"\"\"\n Constructor\n \"\"\"\n super(ShowReleaseServicePoolInfo, self).__init__(ssh_connection, *args, **kargs)\n\n self.metrics_info = []\n ## END METHOD __init__()\n def parse(self, metrics_info):\n \"\"\"\n Parsing command output information\n \"\"\"\n headers = [\"name\", \"status\", \"path\"]\n on_service_pools = False\n for line in metrics_info:\n if 0 == len(line.strip()) or line.startswith(\"-\"):\n continue\n ## End If\n if on_service_pools:\n fields = filter(None, line.split())\n fields = dict(zip(headers, fields))\n result = {\"Path\": fields[\"path\"]}\n if fields[\"name\"].startswith(\"*\") and \"active\" == fields[\"status\"]:\n result[\"Active\"] = True\n result[\"Service Pool\"] = fields[\"name\"][1:]\n else:\n result[\"Active\"] = False\n result[\"Service Pool\"] = fields[\"name\"]\n ## End If\n self.metrics_info.append(result)\n elif line.strip().startswith(\"Service\"):\n on_service_pools = True\n ## End If\n ## End For\n ## END METHOD parse()\n## END CLASS ShowReleaseServicePoolInfo( SsrMetricsInfo )\n\n","sub_path":"sds/back_test/ref/metrics/ssr/show_release_service_pool_info.py","file_name":"show_release_service_pool_info.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"15248327","text":"\nimport requests\nfrom strex.parse import Parser\n\nclass GetWare(object):\n def __init__(self, tag, options=None):\n self.set_tag(tag) \n\n def set_tag(self, tag):\n self.tag = tag\n\n def run(self, parser, structure, doc):\n obj = structure.get('_httpget') \n\n if obj.get('url'):\n url = parser.engine.run_query(obj['url'], doc)\n else:\n url = doc\n\n options = obj['options']\n structure = obj['structure']\n\n content = requests.get(url)\n st = Parser(options, 'xpath')\n res = st.parse(structure, content.text)\n return res \n","sub_path":"strex/addins/http_requests.py","file_name":"http_requests.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328309225","text":"# -*- coding: utf-8 -*-\n\"\"\"\n logging\n ~~~~~~~\n\n Implements the logging support for GameServer.\n\"\"\"\n\nimport sys\nimport logging\nimport logging.config\nimport logging.handlers\n\n\n__all__ = ['log']\n\nlog = logging.getLogger('root')\n\n\ndef initialize():\n\n LOGGING = {\n 'version': 1,\n 'formatters': {\n 'detail': {\n 'class': 'logging.Formatter',\n 'format': '%(asctime)s, %(levelname)s - %(name)s - [%(filename)s:%(lineno)d], %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(name)s [%(filename)s:%(lineno)d], %(message)s'\n },\n },\n 'loggers': {\n 'root': {\n 'handlers': ['file', 'console'],\n 'level': 'DEBUG',\n },\n 'tornado': {\n 'handlers': ['file'],\n 'level': 'INFO',\n },\n },\n 'handlers': {\n 'file': {\n 'class': 'logging.handlers.RotatingFileHandler',\n 'formatter': 'detail',\n 'filename': '/tmp/gfun-gameserver.log',\n 'maxBytes': 1024 * 1024 * 1,\n 'backupCount': 5,\n 'encoding': 'utf-8',\n },\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n 'stream': sys.stdout,\n }\n }\n }\n\n logger_root = logging.getLogger('root')\n logging.config.dictConfig(LOGGING)\n\n # logging messages are not passed to the handlers of ancestor loggers.\n logger_root.propagate = False\n\n # set colour\n # green\n logging.addLevelName(logging.DEBUG, \"\\033[32m%s\\033[0m\" % logging.getLevelName(logging.DEBUG))\n # YELLOW\n logging.addLevelName(logging.WARNING, \"\\033[33m%s\\033[0m\" % logging.getLevelName(logging.WARNING))\n # PURPLE\n logging.addLevelName(logging.ERROR, \"\\033[35m%s\\033[0m\" % logging.getLevelName(logging.ERROR))\n # RED\n logging.addLevelName(logging.CRITICAL, \"\\033[31m%s\\033[0m\" % logging.getLevelName(logging.CRITICAL))\n\n # logger_root.info('Logger initialized......')\n\n\n\n","sub_path":"library/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425402571","text":"# coding: utf-8\n\n\n\"\"\"Implementation of command 'label'.\"\"\"\n\n\nimport sys\nimport os\nimport locale\nimport argparse\nimport subprocess\n\nfrom storage import Storage, NoOptionError\n\n\nstorage = Storage()\n(language, encoding) = locale.getdefaultlocale()\n\n\ndef list_target_content(label, mode):\n try:\n target = storage.get(label)\n flags = '-a' if mode == 'all' else ''\n subprocess.call('ls %s %s' % (target, flags), shell=True)\n except NoOptionError:\n sys.stderr.write('%s is not a valid label.\\n' % label)\n sys.exit(1)\n\n\ndef main():\n \"\"\"Entrypoint for the `list` utility.\"\"\"\n parser = argparse.ArgumentParser()\n parser.set_defaults(mode='list')\n parser.add_argument('-a', '--all', action='store_const', dest='mode',\n const='all', help='list all files')\n parser.add_argument('label', nargs='?', help='name of the label')\n\n args = parser.parse_args()\n storage.open_or_create()\n\n if not args.label:\n parser.error('can\\'t list without specify a label.')\n\n args.label = unicode(args.label, encoding)\n list_target_content(args.label, args.mode)","sub_path":"goto/tolist.py","file_name":"tolist.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617638950","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.PostList.as_view(), name='blog'),\n path('post//', views.PostDetail.as_view(), name='post_detail'),\n path('add-post/', views.add_post, name='add_post'),\n url(r'^edit-post/(?P\\d+)/$', views.edit_post, name='edit_post'),\n url(r'^delete-post/(?P\\d+)/$', views.delete_post, name='delete_post'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333926295","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport argparse\nfrom tqdm import tqdm\nimport torch\nfrom torch.backends import cudnn\nfrom torch.utils import data\n\ncur_path = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(cur_path, '../..'))\nfrom model import model_zoo\nfrom data.helper import make_data_sampler\nfrom data.batchify import Tuple, Stack, Pad, Empty\nfrom data.pascal_voc.detection_cv import VOCDetection\nfrom data.mscoco.detection_cv import COCODetection\nfrom data.transforms.ssd_cv import SSDDefaultValTransform\nfrom utils.metrics.voc_detection_pt import VOC07MApMetric\nfrom utils.metrics.coco_detection import COCODetectionMetric\nimport utils as ptutil\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Eval SSD networks.')\n parser.add_argument('--network', type=str, default='resnet50_v1s',\n help=\"Base network name\")\n parser.add_argument('--data-shape', type=int, default=512,\n help=\"Input data shape\")\n parser.add_argument('--batch-size', type=int, default=8,\n help='Training mini-batch size')\n parser.add_argument('--dataset', type=str, default='voc',\n help='Training dataset.')\n parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,\n default=4, help='Number of data workers')\n parser.add_argument('--cuda', type=ptutil.str2bool, default='true',\n help='Training with GPUs.')\n parser.add_argument('--pretrained', type=str, default='True',\n help='Load weights from previously saved parameters.')\n parser.add_argument('--save-prefix', type=str, default='',\n help='Saving parameter prefix')\n # parser.add_argument('--root', type=str, default=os.path.expanduser('~/.torch/models'),\n # help='Saving parameter prefix')\n parser.add_argument('--root', type=str, default='/home/ace/cbb/own/pretrained/ssd/dist',\n help='Saving parameter prefix')\n\n # device\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--init-method', type=str, default=\"env://\")\n\n args = parser.parse_args()\n return args\n\n\ndef get_dataset(dataset, data_shape):\n transform = SSDDefaultValTransform(data_shape, data_shape)\n if dataset.lower() == 'voc':\n val_dataset = VOCDetection(splits=[(2007, 'test')], transform=transform)\n val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)\n elif dataset.lower() == 'coco':\n val_dataset = COCODetection(splits='instances_val2017', skip_empty=False, transform=transform, keep_idx=True)\n val_metric = COCODetectionMetric(\n val_dataset, args.save_prefix + '_eval', cleanup=True,\n data_shape=(data_shape, data_shape))\n else:\n raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))\n return val_dataset, val_metric\n\n\ndef get_dataloader(val_dataset, batch_size, num_workers, distributed, coco=False):\n \"\"\"Get dataloader.\"\"\"\n if coco:\n batchify_fn = Tuple(Stack(), Pad(pad_val=-1), Empty())\n else:\n batchify_fn = Tuple(Stack(), Pad(pad_val=-1))\n sampler = make_data_sampler(val_dataset, False, distributed)\n batch_sampler = data.BatchSampler(sampler=sampler, batch_size=batch_size, drop_last=False)\n val_loader = data.DataLoader(val_dataset, batch_sampler=batch_sampler, collate_fn=batchify_fn,\n num_workers=num_workers)\n return val_loader\n\n\ndef validate(net, val_data, device, metric, coco=False):\n net.eval()\n metric.reset()\n tbar = tqdm(val_data)\n\n for ib, batch in enumerate(tbar):\n x = batch[0].to(device)\n y = batch[1].to(device)\n with torch.no_grad():\n ids, scores, bboxes = net(x)\n # clip to image size\n bboxes.clamp_(0, x.shape[2])\n # split ground truths\n gt_ids = y.narrow(-1, 4, 1)\n gt_bboxes = y.narrow(-1, 0, 4)\n gt_difficults = y.narrow(-1, 5, 1) if y.shape[-1] > 5 else None\n if coco:\n metric.update(bboxes, ids, scores, batch[2], gt_bboxes, gt_ids, gt_difficults)\n else:\n metric.update(bboxes, ids, scores, gt_bboxes, gt_ids, gt_difficults)\n return metric\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n # device\n device = torch.device('cpu')\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n if args.cuda and torch.cuda.is_available():\n cudnn.benchmark = True\n device = torch.device('cuda')\n else:\n distributed = False\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\", init_method=args.init_method)\n\n # network\n net_name = '_'.join(('ssd', str(args.data_shape), args.network, args.dataset))\n args.save_prefix += net_name\n if args.pretrained.lower() in ['true', '1', 'yes', 't']:\n net = model_zoo.get_model(net_name, pretrained=True, root=args.root)\n else:\n net = model_zoo.get_model(net_name, pretrained=False)\n net.load_parameters(args.pretrained.strip())\n net.to(device)\n net.set_nms(nms_thresh=0.45, nms_topk=400)\n\n # testing data\n val_dataset, val_metric = get_dataset(args.dataset, args.data_shape)\n val_data = get_dataloader(val_dataset, args.batch_size, args.num_workers, distributed, args.dataset == 'coco')\n classes = val_dataset.classes # class names\n\n # testing\n val_metric = validate(net, val_data, device, val_metric, args.dataset == 'coco')\n ptutil.synchronize()\n names, values = ptutil.accumulate_metric(val_metric)\n if ptutil.is_main_process():\n for k, v in zip(names, values):\n print(k, v)\n","sub_path":"scripts/ssd/eval_ssd_cv.py","file_name":"eval_ssd_cv.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"543065142","text":"\"\"\"Controller global functions.\r\n\r\nContains functions and classes for data cleaning and validation.\r\nContains a pagination function that is used in most query results.\r\n\r\n\"\"\"\r\n\r\nimport bleach\r\nimport decimal\r\nimport datetime\r\n\r\nfrom flask_bcrypt import Bcrypt\r\nfrom jsonschema import validators, Draft4Validator\r\nfrom jsonschema.exceptions import ValidationError\r\n\r\nbcrypt = Bcrypt()\r\n\r\n\r\ndef not_empty(validator, value, instance, schema):\r\n \"\"\"Raises error if the casted string is empty.\"\"\"\r\n if value and len(str(instance)) == 0:\r\n yield ValidationError(\"Required\")\r\n\r\n\r\ndef is_id(validator, value, instance, schema):\r\n \"\"\"Raises error if instance cannot be cast into int or is not None\"\"\"\r\n if value:\r\n try:\r\n test = int(instance)\r\n except:\r\n if instance is not None:\r\n yield ValidationError(\"Invalid ID\")\r\n\r\n# Get all existing validators and add in custom ones.\r\nall_validators = dict(Draft4Validator.VALIDATORS)\r\n#all_validators[\"not_empty\"] = not_empty\r\n\r\nCustomValidator = validators.create(\r\n meta_schema=Draft4Validator.META_SCHEMA,\r\n validators=all_validators\r\n)\r\n\r\n\r\ndef clean_data(request_data, schema=None):\r\n \"\"\" Data cleaner.\r\n\r\n Casts against a schema, if present.\r\n Bleaches all strings.\r\n Returns a dictionary of cleaned data.\r\n\r\n \"\"\"\r\n return_dict = dict()\r\n\r\n for k, v in request_data.items():\r\n if (schema is not None and\r\n k in schema[\"properties\"] and\r\n \"type\" in schema[\"properties\"][k]):\r\n # Cast\r\n cast_type = schema[\"properties\"][k][\"type\"]\r\n if cast_type == \"string\":\r\n v = str(v)\r\n elif cast_type == \"integer\":\r\n v = int(v)\r\n if k in [\"limit\", \"offest\"] and v < 0:\r\n v = 0\r\n elif cast_type == \"id\" and v is not None:\r\n v = int(v)\r\n\r\n if isinstance(v, str):\r\n v = bleach.clean(v)\r\n\r\n return_dict[k] = v\r\n\r\n return return_dict\r\n\r\n\r\ndef populate_return(query_results, limit, offset):\r\n \"\"\" Populates return dictionary for paginated responses.\r\n\r\n Everything that is paginated is overqueried by one to determine if more\r\n pages exist. This is used to determine the 'has next' parameter, but the\r\n extra result needs to be removed from the returned results.\r\n\r\n The offset can be used to determine if there are any previous pages.\r\n\r\n \"\"\"\r\n to_return = dict()\r\n to_return[\"has_next\"] = False if len(query_results) < limit else True\r\n to_return[\"has_prev\"] = False if offset == 0 else True\r\n to_return[\"query_result\"] = [i for i in query_results[:limit]]\r\n\r\n return to_return\r\n\r\n\r\ndef serialize(to_serialize):\r\n \"\"\"Serializes a list of objects based on attribute types.\"\"\"\r\n\r\n # ! NEEDS TO SUPPORT RECURSION !\r\n # ! NEEDS TO POPULATE FIELDS ATTRIBUTE FOR RESPONSE DICTIONARY !\r\n\r\n '''\r\n return_list = []\r\n\r\n for to_serialize in object_list[:1]:\r\n current_serialize_dict = dict()\r\n for field in to_serialize._fields:\r\n value = getattr(to_serialize, field)\r\n if value is None:\r\n current_serialize_dict[field] = value\r\n elif type(value) in [int, str, bool]:\r\n current_serialize_dict[field] = value\r\n elif type(value) in [list, dict]:\r\n # RECURSION HERE\r\n pass\r\n elif type(value) == decimal.Decimal:\r\n current_serialize_dict[field] = str(value)\r\n elif type(value) == datetime.datetime:\r\n current_serialize_dict[field] = str(value)\r\n\r\n return_list.append(current_serialize_dict)\r\n\r\n return return_list\r\n '''\r\n #print(to_serialize)\r\n\r\n if to_serialize is None:\r\n return (None, None)\r\n elif type(to_serialize) == int:\r\n return (to_serialize, \"integer\")\r\n elif type(to_serialize) == str:\r\n return (to_serialize, \"string\")\r\n elif type(to_serialize) == bool:\r\n return (to_serialize, \"boolean\")\r\n elif type(to_serialize) == decimal.Decimal:\r\n return (str(to_serialize), \"decimal\")\r\n elif type(to_serialize) == list:\r\n return_list = []\r\n # Handle results of [None]\r\n if not (len(to_serialize) == 1 and to_serialize[0] == None):\r\n for to_reserialize in to_serialize:\r\n return_list.append(serialize(to_reserialize))\r\n return (return_list, \"list\")\r\n elif type(to_serialize) == datetime.datetime:\r\n return (to_serialize.strftime(\"%Y-%m-%d %H:%M:%S\"), \"datetime\")\r\n else:\r\n raise TyperError(\"Serializer for type {} not implemented.\".format(type(to_serialize)))\r\n\r\n\r\ndef process_result(object_list, required_list=[], exclude_list=[]):\r\n \"\"\"Serializes values in passed list and applies metadata for return\"\"\"\r\n if type(object_list) != list:\r\n object_list = [object_list]\r\n\r\n return_list = []\r\n\r\n for to_process in object_list:\r\n field_dict = dict()\r\n\r\n iter_list = to_process._fields if hasattr(to_process, \"_fields\") else [x.name for x in to_process.__table__.columns]\r\n\r\n for index, field in enumerate(iter_list):\r\n if field not in exclude_list:\r\n current_field_dict = dict()\r\n\r\n value = getattr(to_process, field)\r\n serialized_value = serialize(value)\r\n\r\n current_field_dict[\"type\"] = serialized_value[1]\r\n current_field_dict[\"label\"] = field\r\n current_field_dict[\"required\"] = True if field in required_list else False\r\n current_field_dict[\"order\"] = index\r\n current_field_dict[\"value_attrib\"] = field\r\n current_field_dict[\"value\"] = serialized_value[0]\r\n\r\n field_dict[field] = current_field_dict\r\n\r\n return_list.append(field_dict)\r\n\r\n return return_list\r\n","sub_path":"app/controllers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148700465","text":"def bet(t, wealth, round):\r\n budget = wealth[\"corey\"]\r\n \r\n t = dict(t)\r\n wealth = dict(wealth)\r\n \r\n if \"dummy\" in t:\r\n del t[\"dummy\"]\r\n if \"example\" in t:\r\n del t[\"example\"]\r\n if \"total\" in t:\r\n del t[\"total\"]\r\n if \"dummy\" in wealth:\r\n del wealth[\"dummy\"]\r\n if \"example\" in wealth:\r\n del wealth[\"example\"]\r\n if \"total\" in t:\r\n del wealth[\"total\"]\r\n \r\n numPart = len(t.keys()) - 1\r\n weights = {} #{\"jack\": 5, \"scott\": 2, \"jessica\": 2, \"william\": 3, \"james\": 2, \"mladen\": 3}\r\n for k in t.keys():\r\n if k == \"dummy\" or k == \"total\":\r\n continue\r\n if not(weights.has_key(k)):\r\n weights[k] = wealth[k] * wealth[k]\r\n\r\n totalWealth = sum(wealth.values())\r\n totalWeight = sum(weights.values())\r\n sumBet = 0\r\n for k in t.keys():\r\n if k == \"dummy\" or k == \"total\":\r\n continue\r\n wealthBet = (t[k][0] - t[k][1])\r\n sumBet += wealthBet * weights[k]\r\n if sumBet > 0:\r\n return (0.3*budget, 0.0)\r\n elif sumBet < 0:\r\n return (0.0, 0.3 * budget)\r\n else:\r\n return (0.0,0.0)\r\n","sub_path":"players/corey.py","file_name":"corey.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"463250564","text":"from typing import List, Iterator\n\nimport pytest\nfrom requests import Response\nfrom starlette.status import HTTP_204_NO_CONTENT, HTTP_200_OK\n\nfrom .utils import TransactionsResponseParser, TransactionsResponseValidator\nfrom ..base_test_case import BaseTestCase\n\n\nclass ListTransactionsCase(BaseTestCase):\n transactions_data: List[dict]\n\n def __init__(self, client, user_data, transactions_data: List[dict]):\n super().__init__(client, user_data)\n self.transactions_data = transactions_data\n\n def create_transactions(self) -> Iterator[Response]:\n for transaction in self.transactions_data:\n yield self.client.post(\n 'transactions',\n json=transaction,\n headers={\n 'Authorization': f'Bearer {self.token}'\n }\n )\n\n def fetch_all_transactions(self, ticker: str) -> Response:\n return self.client.get(\n 'transactions',\n headers={'Authorization': f'Bearer {self.token}'},\n params={'ticker': ticker}\n )\n\n\n@pytest.fixture(scope='module')\ndef case(client, user_data, transactions_data):\n case = ListTransactionsCase(client, user_data, transactions_data)\n\n case.register_user()\n case.authenticate_user()\n\n return case\n\n\ndef test_all_transactions_were_accepted(case):\n for response in case.create_transactions():\n assert response.status_code == HTTP_204_NO_CONTENT\n\n\n@pytest.fixture(scope='module')\ndef transactions_response(case, ticker) -> Response:\n return case.fetch_all_transactions(ticker)\n\n\ndef test_response_is_successful(transactions_response):\n assert transactions_response.status_code == HTTP_200_OK\n\n\n@pytest.fixture(scope='function')\ndef transactions_response_validator(transactions_response):\n return TransactionsResponseValidator(transactions_response)\n\n\ndef test_response_data_format_is_correct(transactions_response_validator):\n assert transactions_response_validator.validate()\n\n\n@pytest.fixture(scope='function')\ndef transactions_response_parser(transactions_response):\n return TransactionsResponseParser(transactions_response)\n\n\ndef test_transactions_are_the_same_created(\n transactions_response_parser,\n transactions_data,\n):\n transactions_recieved = transactions_response_parser.get_parsed()\n\n for created, recieved in zip(transactions_data, reversed(transactions_recieved)):\n assert created.get('ticker') == recieved.ticker\n assert created.get('quantity') == recieved.quantity\n assert created.get('total_value') == recieved.total_value\n","sub_path":"tests/services/transactions/test_get_all_transactions.py","file_name":"test_get_all_transactions.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484957258","text":"from contextlib import contextmanager\nfrom itertools import count\nfrom collections import OrderedDict\n\nfrom . import error\n\n\ndef console(prompt = '> ', **kwargs):\n import readline\n readline.parse_and_bind(\"tab: complete\")\n\n history = kwargs.get('history', None)\n history_size = kwargs.get('history_size', 1000)\n\n if history:\n try:\n readline.read_history_file(history)\n except FileNotFoundError:\n pass\n \n try:\n while True:\n yield input(prompt)\n finally:\n if history:\n readline.set_history_length( history_size )\n readline.write_history_file(history)\n \ndef coroutine(f):\n\n def start(*args, **kwargs):\n res = f(*args, **kwargs)\n next(res)\n\n return res\n\n return start\n \n\n\ndef extends(cls):\n '''parametrized decorator to extend a class with a method'''\n from types import MethodType\n \n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n \n return decorator\n\n\nfrom types import GeneratorType\n\ndef producer(func):\n '''converts a regular function/generator into a generator yieling (self, result)\n before returning result'''\n \n def result(self, *args):\n res = func(self, *args)\n\n if type(res) is GeneratorType:\n res = yield from res\n\n yield (self, res)\n return res\n\n result.__name__ = func.__name__\n return result\n\n\nclass MultiMethod(object):\n \n def __init__(self, name):\n self.name = name\n self.typemap = {}\n \n def __call__(self, *args):\n types = tuple( type(arg) for arg in args) \n function = self.typemap.get(types)\n\n if function is None:\n raise TypeError(\"no match\")\n \n return function(*args)\n\n \n def register(self, types, function):\n if types in self.typemap:\n raise TypeError(\"duplicate registration\")\n self.typemap[types] = function\n\n\n registry = {}\n\ndef multimethod(*types):\n \n def register(function):\n name = function.__name__\n mm = MultiMethod.registry.get(name)\n \n if mm is None:\n mm = MultiMethod.registry[name] = MultiMethod(name)\n \n mm.register(types, function)\n return mm\n \n return register \n \n \nclass Context(OrderedDict):\n\n '''hierarchical map: variable -> stuff'''\n \n def __init__(self, parent = None):\n super(Context, self).__init__()\n self.parent = parent\n \n def __getitem__(self, name):\n ctx, res = self.context(name)\n return res\n \n def __delitem__(self, name):\n try:\n return super(Context, self).__delitem__(name)\n except KeyError:\n if self.parent is None: raise error.UnboundVariable(name)\n del self.parent[name]\n\n def __setitem__(self, name, value):\n if name in self: raise error.Redefinition(name)\n super(Context, self).__setitem__(name, value)\n \n\n \n def context(self, name):\n try:\n return self, super(Context, self).__getitem__(name)\n except KeyError:\n if self.parent is None: raise error.UnboundVariable(name)\n return self.parent.context(name)\n\n\nclass UnionFind(object):\n\n def __init__(self):\n self.parent = {}\n \n\n def find(self, value):\n\n try:\n p = self.parent[value]\n if p == value: return p\n else: return self.find(p)\n\n # TODO path compression\n \n except KeyError:\n # new class\n self.parent[value] = value\n return value\n\n\n # x, y must be representants of their classes\n # y becomes the representant\n def link(self, x, y):\n self.parent[x] = y\n\n \n\n\n\n# TODO move crap to tool\nDEBUG = 0\nindent_level = 0\n\n@contextmanager\ndef debug_level(value):\n '''temporarily enable/disable debugging'''\n global DEBUG\n old = DEBUG\n\n try:\n DEBUG = value\n yield\n finally:\n DEBUG = old\n\n\ndef debug(*args):\n if DEBUG > 0: print(' ' * indent_level, *args)\n \n@contextmanager\ndef indentation():\n global indent_level\n old = indent_level\n try:\n indent_level += 1\n yield\n\n finally:\n indent_level -= 1\n\n\ndef hexid(x):\n return ('%x' % id(x) )[-4:]\n\n\n@contextmanager\ndef debug_scope(name):\n name = str(name)\n debug('>', name)\n cell = [name]\n with indentation():\n yield cell\n debug('<', cell[0] )\n\n\n\ndef debug_func(name = None):\n \n def decorator(func):\n n = name or func.__name__\n \n def decorated(*args, **kwargs):\n with debug_scope(n):\n return func(*args, **kwargs)\n\n decorated.__name__ = func.__name__\n\n return decorated\n \n return decorator\n\n\n\n\nfrom contextlib import contextmanager\nimport ctypes\nimport io\nimport os, sys\nimport tempfile\n\nlibc = ctypes.CDLL(None)\n\ntry:\n c_stdout = ctypes.c_void_p.in_dll(libc, 'stdout')\nexcept ValueError:\n c_stdout = ctypes.c_void_p.in_dll(libc, '__stdoutp')\n \n@contextmanager\ndef stdout_redirector(stream):\n # The original fd stdout points to. Usually 1 on POSIX systems.\n original_stdout_fd = sys.stdout.fileno()\n\n def _redirect_stdout(to_fd):\n \"\"\"Redirect stdout to the given file descriptor.\"\"\"\n # Flush the C-level buffer stdout\n libc.fflush(c_stdout)\n \n # Flush and close sys.stdout - also closes the file descriptor (fd)\n sys.stdout.close()\n \n # Make original_stdout_fd point to the same file as to_fd\n os.dup2(to_fd, original_stdout_fd)\n \n # Create a new sys.stdout that points to the redirected fd\n sys.stdout = io.TextIOWrapper(os.fdopen(original_stdout_fd, 'wb'))\n\n # Save a copy of the original stdout fd in saved_stdout_fd\n saved_stdout_fd = os.dup(original_stdout_fd)\n try:\n # Create a temporary file and redirect stdout to it\n tfile = tempfile.TemporaryFile(mode='w+b')\n _redirect_stdout(tfile.fileno())\n # Yield to caller, then redirect stdout back to the saved fd\n yield\n _redirect_stdout(saved_stdout_fd)\n # Copy contents of temporary file to the given stream\n tfile.flush()\n tfile.seek(0, io.SEEK_SET)\n stream.write(tfile.read())\n finally:\n tfile.close()\n os.close(saved_stdout_fd)\n\n\n\nimport os\nimport sys\nfrom contextlib import contextmanager\nimport tempfile\n\n@contextmanager\ndef stdout_redirected(to = os.devnull):\n '''\n import os\n\n with stdout_redirected(to=filename):\n print(\"from Python\")\n os.system(\"echo non-Python applications are also supported\")\n '''\n fd = sys.stdout.fileno()\n\n ##### assert that Python and C stdio write using the same file descriptor\n # \n\n assert libc.fileno(c_stdout) == fd == 1\n\n def _redirect_stdout(to):\n sys.stdout.close() # + implicit flush()\n os.dup2(to.fileno(), fd) # fd writes to 'to' file\n sys.stdout = os.fdopen(fd, 'w') # Python writes to fd\n\n with os.fdopen(os.dup(fd), 'w') as old_stdout:\n with open(to, 'w') as file:\n _redirect_stdout(to=file)\n try:\n yield # allow code to be run with the redirected stdout\n finally:\n # cout.flush()\n _redirect_stdout(to=old_stdout) # restore stdout.\n # buffering and flags such as\n # CLOEXEC may be different \n # with os.fdopen( libc.fileno(c_stdout) ) as cout:\n # cout.flush()\n","sub_path":"slip/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171105011","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 합성 대상 영상 읽기\nimg1 = cv2.imread('../img/drawing.jpg')\nimg2 = cv2.imread('../img/my_hand.jpg')\n\n# 마스크 생성, 합성할 이미지 전체 영역을 255로 세팅\nmask = np.full_like(img1, 255)\n\n# 합성 대상 좌표 계산(img2의 중앙)\nheight, width = img2.shape[:2]\ncenter = (width//2, height//2)\n\n# seamlessClone으로 합성\nnormal = cv2.seamlessClone(img1, img2, mask, center, cv2.NORMAL_CLONE)\nmixed = cv2.seamlessClone(img1, img2, mask, center, cv2.MIXED_CLONE)\n\n# 결과 출력\ncv2.imshow('normal', normal)\ncv2.imshow('mixed', mixed)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"Python_OpenCV_Project-master/Chapter4. Image processing basic/seamlessclone.py","file_name":"seamlessclone.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53505330","text":"num_stages = 3\nnum_proposals = 100\nconv_kernel_size = 1\nmodel = dict(\n type='KNet',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=True,\n style='pytorch',\n init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n start_level=0,\n add_extra_convs='on_input',\n num_outs=4),\n rpn_head=dict(\n type='ConvKernelHead',\n num_classes=133, # modified for panoptic\n cat_stuff_mask=True, # modified for panoptic\n conv_kernel_size=conv_kernel_size,\n feat_downsample_stride=2,\n feat_refine_stride=1,\n feat_refine=False,\n use_binary=True,\n num_loc_convs=1,\n num_seg_convs=1,\n conv_normal_init=True,\n localization_fpn=dict(\n type='SemanticFPNWrapper',\n in_channels=256,\n feat_channels=256,\n out_channels=256,\n start_level=0,\n end_level=3,\n upsample_times=2,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n cat_coors=False,\n cat_coors_level=3,\n fuse_by_cat=False,\n return_list=False,\n num_aux_convs=1,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),\n num_proposals=num_proposals,\n proposal_feats_with_obj=True,\n xavier_init_kernel=False,\n kernel_init_std=1,\n num_cls_fcs=1,\n in_channels=256,\n feat_transform_cfg=None,\n loss_rank=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=0.1),\n loss_seg=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_mask=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_dice=dict(type='DiceLoss', loss_weight=4.0)),\n roi_head=dict(\n type='KernelIterHead',\n do_panoptic=True,\n num_stages=num_stages,\n stage_loss_weights=[1] * num_stages,\n proposal_feature_channel=256,\n mask_head=[\n dict(\n type='KernelUpdateHead',\n num_classes=133,\n num_ffn_fcs=2,\n num_heads=8,\n num_cls_fcs=1,\n num_mask_fcs=1,\n feedforward_channels=2048,\n in_channels=256,\n out_channels=256,\n dropout=0.0,\n mask_thr=0.5,\n conv_kernel_size=conv_kernel_size,\n mask_upsample_stride=2,\n ffn_act_cfg=dict(type='ReLU', inplace=True),\n with_ffn=True,\n feat_transform_cfg=dict(\n conv_cfg=dict(type='Conv2d'), act_cfg=None),\n kernel_updator_cfg=dict(\n type='KernelUpdator',\n in_channels=256,\n feat_channels=256,\n out_channels=256,\n input_feat_shape=3,\n act_cfg=dict(type='ReLU', inplace=True),\n norm_cfg=dict(type='LN')),\n loss_rank=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=0.1),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n loss_dice=dict(\n type='DiceLoss', loss_weight=4.0),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=2.0)) for _ in range(num_stages)\n ]),\n # training and testing settings\n train_cfg=dict(\n rpn=dict(\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='FocalLossCost', weight=2.0),\n dice_cost=dict(type='DiceCost', weight=4.0, pred_act=True),\n mask_cost=dict(type='MaskCost', weight=1.0, pred_act=True)),\n sampler=dict(type='MaskPseudoSampler'),\n pos_weight=1),\n rcnn=[\n dict(\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='FocalLossCost', weight=2.0),\n dice_cost=dict(type='DiceCost', weight=4.0, pred_act=True),\n mask_cost=dict(type='MaskCost', weight=1.0,\n pred_act=True)),\n sampler=dict(type='MaskPseudoSampler'),\n pos_weight=1) for _ in range(num_stages)\n ]),\n test_cfg=dict(\n rpn=None,\n rcnn=dict(\n max_per_img=num_proposals,\n mask_thr=0.5,\n stuff_score_thr=0.05,\n merge_stuff_thing=dict(\n overlap_thr=0.6,\n iou_thr=0.5, stuff_max_area=4096, instance_score_thr=0.3))))\n\ncustom_imports = dict(\n imports=[\n 'knet.det.knet',\n 'knet.det.kernel_head',\n 'knet.det.kernel_iter_head',\n 'knet.det.kernel_update_head',\n 'knet.det.semantic_fpn_wrapper',\n 'knet.kernel_updator',\n 'knet.det.mask_hungarian_assigner',\n 'knet.det.mask_pseudo_sampler',\n ],\n allow_failed_imports=False)\n","sub_path":"configs/det/_base_/models/knet_s3_r50_fpn_panoptic.py","file_name":"knet_s3_r50_fpn_panoptic.py","file_ext":"py","file_size_in_byte":5713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"296828582","text":"from typing import AnyStr\n\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n hash_map = {}\n for a, b in zip(s, t):\n if hash_map.get(a) == None:\n hash_map[a] = 1\n else:\n hash_map[a] += 1\n if hash_map.get(b) == None:\n hash_map[b] = -1\n else:\n hash_map[b] -= 1\n \n for i in hash_map:\n if hash_map.get(i) == None or hash_map.get(i) != 0:\n return False\n\n return True\n\nif __name__ == '__main__':\n inputs_s = [\"anagram\", \"rat\", \"aacc\"]\n inputs_t = [\"nagaram\", \"car\", \"ccac\"]\n for i in range(len(inputs_s)):\n ans = Solution().isAnagram(inputs_s[i], inputs_t[i])\n print(ans)\n","sub_path":"leetcode/0242_Valid_Anagram.py","file_name":"0242_Valid_Anagram.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318634065","text":"from mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport sys\nimport Wind\nimport numpy as np\nimport json\nimport Geometry\nimport math\n\ndef float_range(Start, End, Step):\n List = []\n i = Start\n\n while i <= End:\n i = round(i, 2)\n List.append(i)\n i += Step\n return List\n\nclass Chart:\n def __init__(self, LatitudeBounds=[25, 49.5], LongitudeBounds=[-125, -67.5]):\n # set the bounds of the map\n self.LatitudeBounds = LatitudeBounds\n self.LongitudeBounds = LongitudeBounds \n\n self.map = Basemap(projection='tmerc', \n lat_0=(LatitudeBounds[0] + LatitudeBounds[1]) / 2.0, lon_0=(LongitudeBounds[0] + LongitudeBounds[1]) / 2.0,\n llcrnrlon=LongitudeBounds[0], \n llcrnrlat=LatitudeBounds[0], \n urcrnrlon=LongitudeBounds[1], \n urcrnrlat=LatitudeBounds[1],\n resolution='l')\n\n # setup all of the points that will be graphed on this map\n self.XPoints = []\n self.YPoints = []\n self.PointColors = []\n\n #self.map.shadedrelief()\n #self.map.drawcountries()\n #self.map.drawstates()\n #self.map.shadedrelief()\n\n def DrawCircle(self, Latitude, Longitude, Color=\"black\"):\n x, y = self.map([Longitude], [Latitude])\n self.PointColors.append(Color)\n self.XPoints.append(x)\n self.YPoints.append(y)\n\n def DrawLine(self, StartPosition, EndPosition, Color):\n Latitudes = [StartPosition[0], EndPosition[0]]\n Longitudes = [StartPosition[1], EndPosition[1]]\n\n x, y = self.map(Longitudes, Latitudes)\n\n self.map.plot(x, y, marker=None, color=Color)\n\n\n def ArrowsOnMap(self, Positions, Vectors, MaxLength=80000.0):\n LongestVector = sorted([np.linalg.norm(Vector) for Vector in Vectors], reverse=True)[0]\n\n k = MaxLength / LongestVector\n\n X, Y = self.map([Position[1] for Position in Positions], [Position[0] for Position in Positions]) # lon, lat\n\n for i in range(len(Positions)):\n DX = Vectors[i][0] * k\n DY = Vectors[i][1] * k\n\n plt.arrow(X[i], Y[i], DX, DY, width=MaxLength/10, head_width=MaxLength / 2, head_length=MaxLength / 2, color=\"black\")\n \n def Scatter(self, Points):\n # convert to map coords \n ProjectedX, ProjectedY = self.map([Point[1] for Point in Points], [Point[0] for Point in Points]) # lon, lat\n self.map.scatter(ProjectedX, ProjectedY, s=50, c=[1 for color in range(len(ProjectedX))])\n\n def Contour(self, DataMatrix):\n # get the gradiant of lats and lons\n LongitudeShape = DataMatrix.shape[1]\n LatitudeShape = DataMatrix.shape[0]\n LongitudeGradiant = np.linspace(self.LongitudeBounds[0], self.LongitudeBounds[1], LongitudeShape)\n LatitudeGradiant = np.linspace(self.LatitudeBounds[0], self.LatitudeBounds[1], LatitudeShape)\n\n # convert to two matrices\n LongitudeMatrix, LatitudeMatrix = np.meshgrid(LongitudeGradiant, LatitudeGradiant)\n\n # convert lat lon to map coordinates\n for i in range(len(LatitudeMatrix)):\n for j in range(len(LatitudeMatrix[0])):\n x, y = self.map([LongitudeMatrix[i][j]], [LatitudeMatrix[i][j]])\n LongitudeMatrix[i][j] = x[0]\n LatitudeMatrix[i][j] = y[0]\n\n levels = [i for i in range(0, 3500, 50)]\n\n # draw the actual contours\n contour = self.map.contourf(LongitudeMatrix, LatitudeMatrix, DataMatrix, levels, alpha=.35)\n plt.colorbar(contour)\n\n\n def Show(self):\n # create size array\n Size = [10]*len(self.XPoints)\n\n # graph all of the points\n plt.scatter(self.XPoints, self.YPoints, Size, marker='D', c=self.PointColors)\n\n plt.show()","sub_path":"tools/Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95756220","text":"import json\nimport os, time\nfrom multiprocessing import Process\n\nfrom malib.agents.agent_factory import *\nfrom malib.environments import DifferentialGame\nfrom malib.environments.fortattack import make_fortattack_env\nfrom malib.logger.utils import set_logger\nfrom malib.samplers.sampler import SingleSampler, MASampler\nfrom malib.trainers import SATrainer, MATrainer\nfrom malib.utils.random import set_seed\nimport numpy as np, pickle\n\ndef svd_sol(A, b):\n U, sigma, Vt = np.linalg.svd(A)\n sigma[sigma<1e-10] = 0\n sigma_reci = [(1/s if s!=0 else 0) for s in sigma]\n sigma_reci = np.diag(sigma_reci)\n x = Vt.transpose().dot(sigma_reci).dot(U.transpose()).dot(b)\n return(x)\n\ndef isCaught(observation_n, th):\n pos = np.array([obs[2:4] for obs in observation_n])\n posAdv, posAgent = pos[:3,:], pos[3,:]\n dists = np.sqrt(np.sum(np.square(posAdv-posAgent), axis = 1))\n \n caught = False\n if np.max(dists) <= th:\n A = np.concatenate((posAdv.transpose(), np.ones((1,3))), axis = 0)\n b = np.concatenate((posAgent, np.ones(1))).reshape(3,1)\n alpha = svd_sol(A,b)\n if all(alpha>=0) and all(alpha<=1):\n caught = True\n return(caught)\n\ndef main():\n # PR2 - empirical estimation of opponent conditional policy\n # PR2S - soft estimation of opponent conditional policy\n settings = [\n # 'ROMMEO_ROMMEO_ROMMEO_ROMMEO',\n # 'PR2S_PR2S_PR2S',\n # 'PR2_PR2_PR2',\n # 'PR2_PR2',\n # 'PR2_PR2_PR2_PR2',\n 'PR2_PR2_PR2_PR2',\n # 'PR2_PR2_PR2_DDPG'\n # 'SAC_SAC_SAC',\n # 'DDPG_DDPG_DDPG_DDPG',\n ]\n # game = 'simple_spread'\n # game = 'simple_adversary'\n # game = 'simple_push'\n # game = 'simple_tag'\n # game = 'simple_predator_prey'\n game = 'fortattack-v0' \n checkpoint = 15000\n\n for setting in settings:\n seed = 1 + int(23122134 / (3 + 1))\n path = 'saved_agents/'+game+'/'+setting+'/agents_ckpt_'+str(checkpoint-1)+'.pickle'\n # path = 'saved_agents/'+game+'/'+setting+'_individual_reward_only/agents_ckpt_'+str(checkpoint-1)+'.pickle'\n # path = 'saved_agents/'+game+'_different_rew/'+setting+'/agents_ckpt_'+str(checkpoint-1)+'.pickle'\n with open(path, 'rb') as f:\n agents = pickle.load(f)\n\n\n env = make_fortattack_env()\n gymAgents = env.world.agents\n \n th = 0.3\n caughtCount = 0\n totalCaughtSteps = 0\n for i in range(10):\n print(i)\n current_observation_n = env.reset()\n for j in range(200):\n action_n = []\n for agent, current_observation in zip(agents, current_observation_n):\n action = agent.act(current_observation.astype(np.float32))\n action_n.append(np.array(action))\n next_observation_n, reward_n, done_n, info = env.step(action_n)\n \n\n # pos = np.array([obs[2:4] for obs in next_observation_n])\n # dists = np.sqrt(np.sum(np.square(pos[:3,:]-pos[3,:]), axis = 1))\n # print(dists)\n # print('NEW ',np.array([obs[2:4] for obs in next_observation_n]))\n \n env.render(mode=\"rgb_array\")[0]\n # if np.max(dists) <= th:\n # if isCaught(next_observation_n, th):\n # caughtCount += 1\n # totalCaughtSteps += j+1\n # # time.sleep(2)\n # break\n time.sleep(0.05)\n\n current_observation_n = next_observation_n\n \nif __name__ == '__main__':\n main()\n","sub_path":"examples/render_fortattack.py","file_name":"render_fortattack.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207624668","text":"from tkinter import *\nfrom tkinter import font\nimport urllib\nfrom bs4 import BeautifulSoup\nimport matplotlib\n\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\n\n\n\n\n\n\nclass App():\n '''Builds the tkinter app to select game'''\n def __init__(self, master, url):\n '''\n :param master: root for this App\n :param url: landing page for brooks baseball\n '''\n\n\n self.title = Label(text=\"Brooks Baseball PitchFX Data\\nChoose a Game\", bg=\"dark blue\", fg=\"white\").grid(column=0, row=0, columnspan=3)\n # select from lists\n self.url = url\n self.page = urllib.request.urlopen(url)\n self.soup = BeautifulSoup(self.page.read(), \"html.parser\")\n\n # make a frame to contain selectors\n self.group = LabelFrame(master, text=\"Select your game and pitcher\", padx=5, pady=5, bg=\"light blue\")\n self.group.grid(column=0, row=1, columnspan=3, rowspan=5, sticky=\"w\" + \"e\")\n\n self.month_list, self.month_numlist = self.get_list(url, \"month\")\n self.month_var = StringVar()\n self.month_var.set(self.month_list[0])\n self.month_drop = OptionMenu(master, self.month_var, *self.month_list).grid(column=0,row=1)\n\n self.day_list, x = self.get_list(url, \"day\")\n self.day_var = StringVar()\n self.day_var.set(self.day_list[0])\n self.day_drop = OptionMenu(master, self.day_var, *self.day_list).grid(column=1,row=1)\n\n self.year_list, x = self.get_list(url, \"year\")\n self.year_var = StringVar()\n self.year_var.set(self.year_list[0])\n self.year_drop = OptionMenu(master, self.year_var, *self.year_list).grid(column=2, row=1)\n\n\n\n\n self.submit_date_button = Button(master, text=\"Select Date\", command=lambda:self.submit_date(master, self.month_numlist[get_index(self.month_list, self.month_var)], self.day_var.get() ,self.year_var.get()))\n self.submit_date_button.grid(column=0,row=2)\n\n def submit_date(self, master, month, day, year):\n '''\n Choose the date of your game from 3 dropdowns\n :param master: root\n :param month: month as string number\n :param day: day as string number\n :param year: year as string number\n '''\n self.submit_date_button.destroy()\n\n self.url = \"http://www.brooksbaseball.net/pfxVB/pfx.php?month=%s&day=%s&year=%s&prevDate=43&league=mlb\" % (month, day, year)\n print(self.url)\n self.page = urllib.request.urlopen(self.url)\n self.soup = BeautifulSoup(self.page.read(),\"html.parser\")\n self.game_list, self.gameid_list = self.get_list(self.url, \"game\")\n self.game_var = StringVar()\n longest = 0\n longesti = 0\n for i in range(len(self.game_list)):\n if len(self.game_list[i]) > longest:\n longest = len(self.game_list[i])\n longesti = i\n self.game_var.set(self.game_list[longesti])\n self.game_drop = OptionMenu(master, self.game_var, *self.game_list).grid(column=0, row=3, columnspan=3)\n self.game_submit_button = Button(master, text=\"Select Game\", command=lambda:self.submit_game(master, self.month_numlist[get_index(self.month_list, self.month_var)], self.day_var.get() ,self.year_var.get(), self.gameid_list[get_index(self.game_list, self.game_var)]))\n self.game_submit_button.grid(column=0, row=5)\n\n def submit_game(self, master, month, day, year, game):\n '''\n Choose the game to view\n :param master: root\n :param month: month numberstring\n :param day: day numberstring\n :param year: year numberstirng\n :param game: gameid from brooks baseball\n '''\n self.game_submit_button.destroy()\n self.url = \"http://www.brooksbaseball.net/pfxVB/pfx.php?month=%s&day=%s&year=%s&game=%s&prevDate=53&league=mlb\" % (month, day, year, game)\n print(self.url)\n self.page = urllib.request.urlopen(self.url)\n self.soup = BeautifulSoup(self.page.read(), \"html.parser\")\n self.pitcher_list, self.pitcherid_list = self.get_list(self.url, \"pitchSel\")\n self.pitcher_var = StringVar()\n print(self.pitcher_list)\n self.pitcher_var.set(self.pitcher_list[0])\n self.pitcher_drop = OptionMenu(master, self.pitcher_var, *self.pitcher_list).grid(column=0, row=6, columnspan=3)\n #self.submit_date_button.destroy()\n self.pitcher_submit_button = Button(master, text=\"Select Pitcher\", command=lambda:self.select_pitcher(master, self.month_numlist[get_index(self.month_list, self.month_var)], self.day_var.get() ,self.year_var.get(), self.gameid_list[get_index(self.game_list, self.game_var)], self.pitcherid_list[get_index(self.pitcher_list, self.pitcher_var)]))\n self.pitcher_submit_button.grid(column=0, row=7)\n\n def select_pitcher(self, master, month, day, year, game, pitcher):\n '''\n Select pitcher from selected game\n :param master: root\n :param month: string of month number\n :param day: string of day number\n :param year: string of year number\n :param game: gid from brooks baseball\n :param pitcher: pitcherid from brooks\n '''\n #self.pitcher_submit_button.destroy()\n self.url = \"http://www.brooksbaseball.net/pfxVB/pfx.php?month=%s&day=%s&year=%s&game=%s&pitchSel=%s&league=mlb\" % (month, day, year, game, pitcher)\n print(self.url)\n self.page = urllib.request.urlopen(self.url)\n self.soup = BeautifulSoup(self.page.read(), \"html.parser\")\n\n self.plotter = Ploty(game, pitcher)\n\n\n def get_list(self, url, name):\n '''\n Take the url with the new dropdown, and return the labels and values\n :param url: \n :param name: \n :return: label_list and val_list, (val list is used for new url, label is used for display only) \n '''\n my_label_list = [x.text.strip() for x in self.soup.find(\"select\", {\"name\": name}).findAll(\"option\")]\n my_val_list = [x['value'] for x in self.soup.find(\"select\", {\"name\": name}).findAll(\"option\")]\n print(my_val_list)\n return my_label_list, my_val_list\n\n\n\nclass Ploty():\n '''\n object to manage the plot through matplotlib\n '''\n def __init__(self, game, pitcher):\n '''\n Pass game and pitcher to go straight to brooks game data page for that pitcher and day\n :param game: \n :param pitcher: \n '''\n self.link = \"http://www.brooksbaseball.net/pfxVB/tabdel_expanded.php?pitchSel=%s&game=%s\" % (pitcher, game)\n self.page = urllib.request.urlopen(self.link)\n self.soup = BeautifulSoup(self.page.read(), \"html.parser\")\n print(self.soup.prettify())\n self.headers = [x.text.strip() for x in self.soup.find(\"table\").find(\"tr\").findAll(\"th\")]\n\n self.data = [[y for y in x.findAll(\"td\")] for x in self.soup.find(\"table\").findAll(\"tr\")]\n self.data = self.data[1:]\n for i in range(len(self.headers)):\n print(i, \"=\", self.headers[i], \":\", self.data[0][i])\n print(self.data[0])\n\n # pull the x, y data\n self.x_data = [float(x[-6].text.strip()) for x in self.data]\n self.y_data = [float(x[-5].text.strip()) for x in self.data]\n\n self.x_strike = [float(x[-6].text.strip()) for x in self.data if x[9].text.strip() == \"S\"]\n self.y_strike = [float(x[-5].text.strip()) for x in self.data if x[9].text.strip() == \"S\"]\n print(self.x_strike)\n self.x_ball = [float(x[-6].text.strip()) for x in self.data if x[9].text.strip() == \"B\"]\n self.y_ball = [float(x[-5].text.strip()) for x in self.data if x[9].text.strip() == \"B\"]\n\n self.x_inplay = [float(x[-6].text.strip()) for x in self.data if x[9].text.strip() == \"X\"]\n self.y_inplay = [float(x[-5].text.strip()) for x in self.data if x[9].text.strip() == \"X\"]\n\n self.x = [float(x[-6].text.strip()) for x in self.data]\n self.y = [float(x[-5].text.strip()) for x in self.data]\n self.type = [x[15].text.strip() for x in self.data]\n\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n\n for i in range(len(self.x)):\n ax.annotate(str(self.type[i]), xy=(self.x[i], self.y[i]), horizontalalignment='center', verticalalignment='center')\n\n #plot the pitches\n #plt.scatter(self.x_data, self.y_data)\n plt.scatter(self.x_inplay,self.y_inplay, color=\"b\", s=200, alpha=0.5)\n plt.scatter(self.x_ball, self.y_ball, color=\"g\", s=200, alpha=0.5)\n plt.scatter(self.x_strike, self.y_strike, color=\"r\", s=200, alpha=0.5)\n\n\n plt.title(my_app.pitcher_var.get() + \"\\n\" + my_app.game_var.get() + \"\\n\" + my_app.month_var.get() + \" \" + my_app.day_var.get() + \", \" + my_app.year_var.get())\n\n #draw an approximate strike zone\n x_strike = [-17 / 12 / 2, 17 / 12 / 2]\n y_top_strike = [3.3, 3.3]\n y_bottom_strike = [1.49, 1.49]\n\n y_vert_strike = [1.49, 3.3]\n x_vert_strike_left = [-17 / 12 / 2, -17 / 12 / 2]\n x_vert_strike_right = [17 / 12 / 2, 17 / 12 / 2]\n\n plt.plot(x_strike,y_top_strike, \"r--\")\n plt.plot(x_strike,y_bottom_strike, \"r--\")\n plt.plot(x_vert_strike_left,y_vert_strike, \"r--\")\n plt.plot(x_vert_strike_right,y_vert_strike, \"r--\")\n\n plt.xlabel(\"Horizontal Pitch Location (ft)\")\n plt.ylabel(\"Vertical Pitch Location (ft)\")\n\n plt.axis(\"equal\")\n plt.xlim([-2.5, 2.5])\n plt.ylim([0, 5])\n\n\n plt.tight_layout()\n\n\n plt.show()\n\n\ndef get_index(list, option_value):\n return list.index(option_value.get())\n\n\nif __name__ == \"__main__\":\n\n url = \"http://www.brooksbaseball.net/pfxVB/pfx.php\"\n root = Tk()\n root.title(\"Brooks PitchFX\")\n\n default_font = font.nametofont(\"TkDefaultFont\")\n default_font.configure(size=18)\n my_app = App(root, url)\n root.mainloop()\n","sub_path":"tkinter_pitchfx.py","file_name":"tkinter_pitchfx.py","file_ext":"py","file_size_in_byte":9867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122975189","text":"\"\"\" Test BIG-IP module \"\"\"\n\nimport tempfile\nimport shutil\n\nfrom ....global_test_imports import pytest, Mock\nfrom ....shared import constants\n\nREQ = constants.MOCK['requests']\n\n\nclass TestExtensionService(object):\n \"\"\"Test Class: bigip.extension.service module \"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"\" Setup func \"\"\"\n cls.test_tmp_dir = tempfile.mkdtemp()\n\n @classmethod\n def teardown_class(cls):\n \"\"\"\" Teardown func \"\"\"\n shutil.rmtree(cls.test_tmp_dir)\n\n\n @staticmethod\n @pytest.mark.usefixtures(\"cf_extension_client\")\n def test_cf_show_failover(cf_extension_client, mocker):\n \"\"\"Test: show_failover\n\n Assertions\n ----------\n - show_failover() response should be trigger endpoint API response\n \"\"\"\n sample_return_value = {\n \"code\": 200,\n \"failoverOperations\": {},\n \"instance\": \"A\",\n \"message\": \"Failover state file was reset\",\n \"taskState\": \"SUCCEEDED\",\n \"timestamp\": \"XYZ\"\n }\n\n\n mocker.patch(REQ).return_value.json = Mock(return_value=sample_return_value)\n\n assert cf_extension_client.service.show_trigger() == sample_return_value\n\n @staticmethod\n @pytest.mark.usefixtures(\"cf_extension_client\")\n def test_cf_show_inspect(cf_extension_client, mocker):\n \"\"\"Test: show_inspect\n\n Assertions\n ----------\n - show_inspect() response should be inspect endpoint API response\n \"\"\"\n\n sample_return_value = {\n \"addresses\": [\n {\n \"networkInterfaceId\": \"nic0\",\n \"privateIpAddress\": \"x.x.x.x\",\n \"publicIpAddress\": \"y.y.y.y\"\n },\n {\n \"networkInterfaceId\": \"nic1\",\n \"privateIpAddress\": \"x.x.x.x\",\n \"publicIpAddress\": \"y.y.y.y\"\n },\n {\n \"networkInterfaceId\": \"nic2\",\n \"privateIpAddress\": \"x.x.x.x\",\n \"publicIpAddress\": \"null\"\n }\n ],\n \"deviceStatus\": \"active\",\n \"hostName\": \"test\",\n \"instance\": \"test-i\",\n \"routes\": [\n {\n \"networkId\": \"int-net-test\",\n \"routeTableId\": \"1\",\n \"routeTableName\": \"test-i\"\n }\n ],\n \"trafficGroup\": [\n {\n \"name\": \"/Common/traffic-group-1\"\n }\n ]\n }\n\n mocker.patch(REQ).return_value.json = Mock(return_value=sample_return_value)\n\n assert cf_extension_client.service.show_inspect() == sample_return_value\n\n @staticmethod\n @pytest.mark.usefixtures(\"cf_extension_client\")\n def test_cf_trigger_failover(cf_extension_client, mocker):\n \"\"\"Test: show_inspect\n\n Assertions\n ----------\n - trigger() response should be trigger endpoint API response\n \"\"\"\n\n sample_return_value = {\n \"failoverOperations\": {\n \"addresses\": {\n \"fwdRules\": {\n \"operations\": []\n },\n \"nics\": {\n \"associate\": [],\n \"disassociate\": []\n }\n },\n \"routes\": {\n \"operations\": []\n }\n },\n \"instance\": \"test-i\",\n \"message\": \"Failover Completed Successfully\",\n \"taskState\": \"SUCCEEDED\",\n \"timestamp\": \"XYZ\"\n }\n\n mocker.patch(REQ).return_value.json = Mock(return_value=sample_return_value)\n\n assert cf_extension_client.service.trigger() == sample_return_value\n\n @staticmethod\n @pytest.mark.usefixtures(\"cf_extension_client\")\n def test_cf_show_info(cf_extension_client, mocker):\n \"\"\"Test: reset\n\n Assertions\n ----------\n - reset() response should be reset endpoint API response\n \"\"\"\n\n sample_return_value = {\n \"version\": \"0\",\n \"schemaCurrent\": \"0.9.1\",\n \"schemaMinimum\": \"1.0.0\",\n \"release\": \"1\"\n }\n\n mocker.patch(REQ).return_value.json = Mock(return_value=sample_return_value)\n\n assert cf_extension_client.service.show_info() == sample_return_value\n\n @staticmethod\n @pytest.mark.usefixtures(\"cf_extension_client\")\n def test_cf_reset(cf_extension_client, mocker):\n \"\"\"Test: reset\n\n Assertions\n ----------\n - reset() response should be reset endpoint API response\n \"\"\"\n\n sample_return_value = {\n \"message\": \"success\"\n }\n\n mocker.patch(REQ).return_value.json = Mock(return_value=sample_return_value)\n\n assert cf_extension_client.service.reset() == sample_return_value\n","sub_path":"tests/unit/bigip/extension/test_extension_cf.py","file_name":"test_extension_cf.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"531409036","text":"import functools\n\nimport tornado.web\nfrom schema import Schema, Optional, SchemaError\n\n\nclass RequestSchema(object):\n\n def __init__(self, **defs):\n if defs:\n self.__dict__.update(defs)\n\n @property\n def describe_url(self):\n return self.descriptions('url')\n\n @property\n def describe_querystring(self):\n return self.descriptions('querystring')\n\n @property\n def describe_body(self):\n _, description = self.process_body()\n return description\n\n def validate_url(self, value):\n return self.validate('url', value)\n\n def validate_querystring(self, value):\n return self.validate('querystring', value)\n\n def validate_body(self, value):\n pattern, _ = self.process_body()\n return Schema(pattern).validate(value)\n\n def querystring_optionals(self):\n _, _, optionals = self.process_definition('querystring')\n return optionals\n\n def process_body(self):\n attr = self.get_definition_attr('body')\n pattern = attr\n description = ''\n if isinstance(attr, tuple):\n pattern, description = attr\n return pattern, description\n\n def descriptions(self, attr_name):\n _, descriptions, _ = self.process_definition(attr_name)\n return descriptions\n\n def validate(self, attr_name, value):\n patterns, _, _ = self.process_definition(attr_name)\n return Schema(patterns).validate(value)\n\n def process_definition(self, attr_name):\n attr = self.get_definition_attr(attr_name)\n if not isinstance(attr, dict):\n raise InvalidSchemaDefinition('Schema definition need to be a dict')\n patterns = {}\n descriptions = {}\n optionals = []\n for key, rule in attr.items():\n key_name = key\n if isinstance(key, Optional):\n key_name = key._schema\n optionals.append(key_name)\n descriptions[key_name] = ''\n if isinstance(rule, tuple):\n rule, descriptions[key_name] = rule\n patterns[key] = rule\n return patterns, descriptions, optionals\n\n def get_definition_attr(self, attr_name):\n if not hasattr(self, attr_name):\n raise SchemaNotDefined('Is necessary to define a shema for {}'\\\n .format(attr_name))\n return getattr(self, attr_name)\n\n\nclass SchemaNotDefined(Exception):\n pass\n\n\nclass InvalidSchemaDefinition(Exception):\n pass\n\n\nclass ValidateDecorator(object):\n\n def __init__(self, validation_object=None, **validation_schema):\n if validation_object:\n self.request_schema = validation_object()\n else:\n self.request_schema = RequestSchema(**validation_schema)\n self.handler = None\n\n def __call__(self, func):\n func.request_schema = self.request_schema\n\n @functools.wraps(func)\n def wrapper(handler, *args, **url_params):\n self.handler = handler\n self.handler.values = {}\n\n try:\n self.process_params_in_url(url_params)\n self.process_params_in_querystring()\n self.process_body()\n except SchemaError:\n raise tornado.web.HTTPError(400)\n\n return func(handler, *args, **url_params)\n return wrapper\n\n def process_params_in_url(self, url_params):\n if url_params:\n parsed_values = self.request_schema.validate_url(url_params)\n self.handler.values['url'] = parsed_values\n\n def process_params_in_querystring(self):\n if hasattr(self.request_schema, 'querystring'):\n request_values = {}\n for key in self.request_schema.describe_querystring:\n value = self.handler.get_argument(key, default=None)\n if value != None:\n request_values[key] = value\n parsed_values = self.request_schema.validate_querystring(\n request_values)\n self.handler.values['querystring'] = parsed_values\n\n def process_body(self):\n if hasattr(self.request_schema, 'body'):\n parsed_values = self.request_schema.validate_body(\n self.handler.request.body)\n self.handler.values['body'] = parsed_values\n\n\nvalidate = ValidateDecorator\n","sub_path":"tapioca/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"135397619","text":"from luna_core.common.Neo4jConnection import Neo4jConnection\nfrom luna_core.common.Node import Node, CONTAINER_TYPES\nfrom luna_core.common.config import ConfigSet\n\nimport os, socket, pathlib, logging, shutil\nfrom minio import Minio\n\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed\n\nlogger = logging.getLogger(__name__)\n\nclass DataStore_v2:\n def __init__(self, store_location):\n if os.environ['LUNA_HOME']:\n self.params = ConfigSet(name='STORE_CFG',\n config_file=os.path.join(os.environ['LUNA_HOME'], 'conf', 'datastore.cfg')) \\\n .get_config_set(\"STORE_CFG\")\n else:\n raise RuntimeError(\n \"$LUNA_HOME is not set. Make sure you have set $LUNA_HOME and $LUNA_HOME/conf/datastore.cfg\")\n logger.info(f\"Configured datastore with {self.params}\")\n\n self.backend = store_location\n os.makedirs(self.backend, exist_ok=True)\n logger.info(f\"Datstore file backend= {self.backend}\")\n\n def ensure_datastore(self, datastore_id, datastore_type):\n \"\"\"\n :params: datastore_id - unique container ID\n \"params: datastore_type - the type of the container\n \"\"\"\n datastore_id = str(datastore_id)\n\n if not datastore_type in CONTAINER_TYPES:\n logger.warning (f\"DataStore type [{datastore_type}] invalid, please choose from [{CONTAINER_TYPES}]\" )\n return\n\n if \":\" in datastore_id:\n logger.warning (f\"Invalid datastore_id [{datastore_id}], only use alphanumeric characters\")\n return\n\n conn = Neo4jConnection(uri=self.params['GRAPH_URI'], user=self.params['GRAPH_USER'], pwd=self.params['GRAPH_PASSWORD'])\n res = conn.query(f\"\"\" MERGE (datastore:globals:{datastore_type}{{qualified_address:'{datastore_id}'}}) RETURN count(datastore)\"\"\")\n\n if res[0]['count(datastore)']==1:\n logger.info(f\"DataStore [{datastore_id}] of type [{datastore_type}] created or matched successfully!\")\n else:\n logger.error(\"The datastore {node} could not be created or found\")\n\n def _write_to_graph_store(self, node, store_id):\n \"\"\" Saves the 'node' to a datastore managed in the graph DB \"\"\"\n\n try:\n # Configure our connection\n conn = Neo4jConnection(uri=self.params['GRAPH_URI'], user=self.params['GRAPH_USER'], pwd=self.params['GRAPH_PASSWORD'])\n res = conn.query( f\"\"\"\n MATCH (datastore) WHERE datastore.qualified_address = '{store_id}'\n MERGE (datastore)-[:HAS_DATA]->(da:{node.get_match_str()})\n ON MATCH SET da = {node.get_map_str()}\n ON CREATE SET da = {node.get_map_str()}\n RETURN count(datastore)\"\"\" )\n if res is None:\n logger.error(f\"Tried adding data to {store_id}, however query failed, this data will not be available!\", extra={'store_id': store_id})\n return\n if not res[0]['count(datastore)']==1:\n logger.warning(f\"Tried adding data to {store_id}, however datastore did not exist, this data will not be available!\", extra={'store_id': store_id})\n return\n except Exception as exc:\n logger.exception(f\"On write, encountered {exc}, continuing...\", extra={'store_id': store_id})\n\n def get(self, store_id, namespace_id, data_type, data_tag='data', realpath=True):\n \"\"\" Looks up and returns the path of data given the store_id, namespace_id, data_type, and data_tag \"\"\"\n\n dest_dir = os.path.join (self.backend, store_id, namespace_id, data_type, data_tag)\n if not os.path.exists(dest_dir):\n # if realpath is true, return path to data instead of symlink location\n if os.path.lexists(dest_dir):\n if realpath:\n dest_dir = os.readlink(dest_dir)\n else:\n raise RuntimeWarning(f\"Data not found at {dest_dir}\")\n return dest_dir\n\n def put(self, filepath, store_id, namespace_id, data_type, data_tag='data', metadata={}, symlink=False):\n \"\"\" Puts the file at filepath at the proper location given a store_id, namespace_id, data_type, and data_tag, and save metadata to DB \"\"\"\n\n dest_dir = os.path.join (self.backend, store_id, namespace_id, data_type, data_tag)\n\n if symlink:\n os.makedirs(pathlib.Path(dest_dir).parent, exist_ok=True)\n\n if os.path.lexists(dest_dir):\n os.remove(dest_dir)\n logger.info(f\"Create symlink {dest_dir} -> {filepath}\")\n os.symlink(filepath, dest_dir)\n else:\n os.makedirs(dest_dir, exist_ok=True)\n logger.info(f\"Save {filepath} -> {dest_dir}\")\n shutil.copy(filepath, dest_dir )\n\n if self.params['GRAPH_STORE_ENABLED']:\n node = Node(data_type, data_tag, metadata)\n node.set_namespace(namespace_id, store_id)\n logger.info(f\"Adding: {node}\")\n self._write_to_graph_store (node, store_id)\n\n return dest_dir\n\n def write(self, iostream, store_id, namespace_id, data_type, data_tag, metadata={}, dtype='w'):\n \"\"\" Writes iostream at the proper location given a store_id, namespace_id, data_type, and data_tag, and save metadata to DB \"\"\"\n\n dest_path_dir = os.path.join (store_id, namespace_id, data_type)\n dest_path_file = os.path.join (dest_path_dir, data_tag)\n\n dest_dir = os.path.join (self.backend, dest_path_dir)\n dest_file = os.path.join (self.backend, dest_path_file)\n\n os.makedirs(dest_dir, exist_ok=True)\n logger.info(f\"Save -> {dest_file}\")\n with open(dest_file, dtype) as fp:\n fp.write(iostream)\n\n if self.params['GRAPH_STORE_ENABLED']:\n node = Node(data_type, data_tag, metadata)\n node.set_namespace(namespace_id, store_id)\n logger.info(f\"Adding: {node}\")\n self._write_to_graph_store (node, store_id)\n\n return dest_file\n\n\n\ndef bootstrap (container_id):\n logger.info(f\"Bootstrapping pipeline for {container_id}\")\n return 1\n\nclass DataStore(object):\n \"\"\"\n DataStore: an abstraction with an id, name, namespace, type, and a list of associated data nodes\n\n Interfaces with a metadata store (graph DB) and raw file stores (gpfs, potentially others)\n\n Handles the matching and creation of metadata\n\n Example usage:\n $ container = data_processing.common.GraphEnum.DataStore( params ).setNamespace(\"test\").setContainer(\"1.2.840...\")\n > Connecting to: neo4j://localhost:7687\n > Connection successfull: True\n > Running on: localhost\n > Lookup ID: 1.2.840...\n > Found: [ /some/path/1.dcm\n\n $ container.get(\"dicom\", \"my-dicom\").properties['Modality']\n > 'CT'\n\n The container includes a logging method:\n $ container.logger.info(\"I am processing the CT\")\n > 'yyyy-mm-dd h:m:s,ms - DataStore [1] - INFO - I am processing the CT'\n\n\n \"\"\"\n # TODO: worried about schema issues? like making sure name, namespace, type and qualified path are present, neo4j offers schema enforcment.\n # TODO: testing\n # TODO: error checking\n\n def __init__(self, params):\n \"\"\"\n Initialize the container object.\n Connects to the graph DB\n Figure out what host this code is running on\n\n :params: params - dictonary of important configuration, right now, only the graph URI connection parameters are needed.\n \"\"\"\n if isinstance(params, ConfigSet):\n params=params.get_config_set(\"APP_CFG\")\n\n # Connect to graph DB\n logger.debug (\"Connecting to: %s\", params['GRAPH_URI'])\n self._conn = Neo4jConnection(uri=params['GRAPH_URI'], user=params['GRAPH_USER'], pwd=params['GRAPH_PASSWORD'])\n logger.debug (\"Connection test: %s\", self._conn.test_connection())\n\n if params.get('OBJECT_STORE_ENABLED', False):\n logger.debug (\"Connecting to: %s\", params['MINIO_URI'])\n self._client = Minio(params['MINIO_URI'], access_key=params['MINIO_USER'], secret_key=params['MINIO_PASSWORD'], secure=False)\n try:\n for bucket in self._client.list_buckets():\n logger.debug(\"Found bucket %s\", bucket.name )\n logger.debug(\"OBJECT_STORE_ENABLED=True\")\n params['OBJECT_STORE_ENABLED'] = True\n except:\n logger.warning(\"Could not connect to object store\")\n logger.warning(\"Set OBJECT_STORE_ENABLED=False\")\n params['OBJECT_STORE_ENABLED'] = False\n\n self._host = socket.gethostname() # portable to *docker* containers\n logger.debug (\"Running on: %s\", self._host)\n\n self.params = params\n self._attached = False\n\n def createNamespace(self, namespace_id: str):\n \"\"\"\n Creates a namesapce, if it doesn't exist, else, tells you it exists\n\n :params: namespace_id - namespace value\n \"\"\"\n cohort = Node(\"cohort\", namespace_id)\n create_res = self._conn.query(f\"\"\" MERGE (co:{cohort.get_create_str()}) RETURN co\"\"\")\n\n if len(create_res) == 1:\n logger.info(f\"Namespace [{namespace_id}] created successfully\")\n\n return self\n\n def setNamespace(self, namespace_id: str):\n \"\"\"\n Sets the namespace for this container's commits, if it exists\n\n :params: namespace_id - namespace value\n \"\"\"\n self._namespace_id = namespace_id\n self._namespace_node = Node(\"cohort\", namespace_id)\n self._bucket_id = namespace_id.lower().replace('_','-')\n\n logger.debug(f\"Checking if [{namespace_id}] exists...\")\n\n match_res = self._conn.query(f\"\"\" MATCH (co:{self._namespace_node.get_match_str()}) RETURN co\"\"\")\n\n if not len(match_res) == 1:\n raise RuntimeError( f\"Namespace [{namespace_id}] does not exist, call .createNamespace() first!\")\n\n if self.params.get('OBJECT_STORE_ENABLED', False):\n if not self._client.bucket_exists(self._bucket_id):\n self._client.make_bucket(self._bucket_id)\n\n return self\n\n def createDatastore(self, container_id, container_type):\n \"\"\"\n Checks if the node referenced by container_id is a valid container, queries the metastore for relevant metadata\n\n :params: container_id - unique container ID\n \"params: type - the type of the container\n \"\"\"\n\n if not container_type in ['generic', 'patient', 'accession', 'scan', 'slide', 'parquet']:\n logger.warning (f\"DataStore type [{container_type}] invalid, please choose from ['generic', 'patient', 'accession', 'scan', 'slide', 'parquet']\" )\n\n if \":\" in container_id:\n logger.warning (f\"Invalid container_id [{container_id}], only use alphanumeric characters\")\n\n node = Node(container_type, container_id)\n node.set_namespace( self._namespace_id )\n\n create_res = self._conn.query(f\"\"\" MERGE (container:{node.get_create_str()}) RETURN container\"\"\")\n\n if len(create_res)==1:\n logger.info(f\"DataStore [{container_id}] of type [{container_type}] created or matched successfully!\")\n else:\n logger.error(\"The container does not exists\")\n\n return self\n\n def setDatastore(self, container_id):\n \"\"\"\n Checks if the node referenced by container_id is a valid datastore, queries the metastore for relevant metadata\n\n :params: container_id - the unique container ID, either as an integer (neo4j autopopulated ID) or as a string (the Qualified Path)\n \"\"\"\n self._attached = False\n logger.info (\"Lookup ID: %s\", container_id)\n\n # Figure out how to match the node\n if isinstance(container_id, str) and not \"uid://\" in container_id:\n node = Node(\"generic\", container_id)\n node.set_namespace( self._namespace_id )\n print (node.get_address())\n match_clause = f\"\"\"WHERE container.qualified_address = '{node.get_address()}' \"\"\"\n elif isinstance(container_id, str) and \"uid://\" in container_id:\n match_clause = f\"\"\"WHERE id(container) = {container_id.replace('uid://', '')} \"\"\"\n elif isinstance(container_id, int):\n match_clause = f\"\"\"WHERE id(container) = {container_id} \"\"\"\n else:\n raise RuntimeError(\"Invalid container_id type not (str, int)\")\n\n # Run query\n res = self._conn.query(f\"\"\"\n MATCH (container) {match_clause}\n RETURN id(container), labels(container), container.type, container.name, container.namespace, container.qualified_address\"\"\"\n )\n\n # Check if the results are singleton (they should be... since we only query unique IDs!!!)\n if res is None or len(res) == 0:\n logger.warning (f\"DataStore [{container_id}] does not exist, you can try creating it first with createContainer()\")\n return self\n\n # Set some potentially import parameters\n self._datastore_id = res[0][\"id(container)\"]\n self._name = res[0][\"container.name\"]\n self._qualifiedpath = res[0][\"container.qualified_address\"]\n self._type = res[0][\"container.type\"]\n self._labels = res[0][\"labels(container)\"]\n self.address = res[0][\"container.qualified_address\"]\n\n # Containers need to have a qualified path\n if self._qualifiedpath is None:\n logger.warning (\"Found, however not valid container object, containers must have a name, namespace, and qualified path\")\n return self\n\n # Let us know attaching was a success! :)\n logger.info (\"Successfully attached to %s container id=%s @ %s\", self._type, self._datastore_id, self.address)\n self._attached = True\n\n return self\n\n def isAttached(self):\n \"\"\"\n Returns true if container was properly attached (i.e. checks in setDatastore succeeded), else False\n \"\"\"\n logger.debug (\"Attached: %s\", self._attached)\n return self._attached\n\n\n def get(self, type, name):\n \"\"\"\n Query graph DB container node for dependent data nodes, and return one\n Parses the path field URL for various cases, and sets the node.data an node.aux attribute with a corrected path\n Note: namespace is not a default filter for get nodes, but is for adding them (i.e., one can write data under a different namespace)\n\n :params: type - the type of data designed\n e.g. radiomics, mha, dicom, png, svs, geojson, etc.\n :params: name - can be used to filter nodes\n e.g. name of the node in the subspace of the container (e.g. generate-mhd)\n :example: get(\"mhd\", \"generate-mhd\") gets data of type \"mhd\" generated from the method \"generate-mhd\" in this container's context/subspace\n \"\"\"\n assert self.isAttached()\n\n query = f\"\"\"MATCH (container)-[:HAS_DATA]-(data:{type}) WHERE id(container) = {self._datastore_id} AND data.name='{type}-{name}' AND data.namespace='{self._namespace_id}' RETURN data\"\"\"\n\n logger.debug(query)\n res = self._conn.query(query)\n\n # Catches bad queries\n # If successfull query, reconstruct a Node object\n if res is None:\n logger.warning(f\"get() query failed, data.name='{type}-{name}' returning None\")\n return None\n elif len(res) == 0:\n logger.warning(f\"get() found no nodes, data.name='{type}-{name}' returning None\")\n return None\n elif len(res) > 1:\n logger.warning(f\"get() found many nodes (?), data.name='{type}-{name}' returning None\")\n return None\n else:\n node = Node(res[0]['data']['type'], res[0]['data']['name'], dict(res[0]['data'].items()))\n logger.debug (\"Query Successful:\")\n logger.debug (node)\n\n node.set_data(node.properties.get('data', None))\n node.set_aux (node.properties.get('aux', None))\n\n return node\n\n @staticmethod\n def run(namespace, container_id, pipeline):\n \"\"\"\n Runner for pipelined jobs\n \"\"\"\n for func in pipeline:\n module = func[0]\n params = func[1]\n module (cohort_id=namespace, container_id=container_id, method_data=params)\n\n def runLocal(self, pipeline):\n \"\"\"\n Run a pipeline in the main thread, blocking.\n\n :params: pipeline - an ordered list of (function, params) tuples to execute\n \"\"\"\n self.run (self._namespace_id, self._name, pipeline)\n\n def runProcessPoolExecutor(self, pipeline, executor):\n \"\"\"\n Use a process pool executor to run full pipelines in background\n\n :params: pipeline - an ordered list of (function, params) tuples to execute\n :params: executor - a ProcessPoolExecutor passed from a parent script\n \"\"\"\n\n assert isinstance(executor, ProcessPoolExecutor)\n return executor.submit(self.run, self._namespace_id, self._name, pipeline)\n\n def runDaskDistributed(self, pipeline, client):\n \"\"\"\n Submit functions to dask workers.\n Dask can track dependencies via a semaphore future, so we pass that explicitly and submit each function individually\n\n :params: pipeline - an ordered list of (function, params) tuples to execute\n :params: client - a dask client\n \"\"\"\n from dask.distributed import Client\n\n assert isinstance(client, Client)\n future = client.submit (bootstrap, self._name)\n for func in pipeline:\n module = func[0]\n params = func[1]\n future = client.submit (module, self._namespace_id, self._name, params, semaphore=future)\n return future\n\n\n def put(self, node: Node):\n \"\"\"\n Adds a node to a temporary dictonary that will be used to save/commit nodes to the relevant databases\n If you add the same node under the same name, no change as the\n Decorates the node with the container's namespace\n\n :param: node - node object\n \"\"\"\n assert isinstance(node, Node)\n assert self.isAttached()\n\n logger.info(f\"Adding node: {node}\")\n\n # Decorate with the container namespace\n node.set_namespace( self._namespace_id, self._name )\n node._datastore_id = self._datastore_id\n\n # Set node data object(s) only if there is a path and the object store is enabled\n node.objects = []\n if node.data is not None and self.params.get(\"OBJECT_STORE_ENABLED\", False):\n node.properties['object_bucket'] = f\"{self._bucket_id}\"\n node.properties['object_folder'] = f\"{self._name}/{node.name}\"\n\n data_path = pathlib.Path( node.data )\n\n if data_path.is_file():\n node.objects.append( data_path )\n\n if data_path.is_dir():\n # TODO: enable extention in glob via something?\n for path in data_path.glob(\"*.*\"):\n node.objects.append(path)\n\n logger.info (\"Node has %s pending object commits\", len(node.objects))\n\n # Set node aux object only if a path and the object store is enabled\n if node.aux is not None and self.params.get(\"OBJECT_STORE_ENABLED\", False):\n node.properties['object_bucket'] = f\"{self._bucket_id}\"\n node.properties['object_folder'] = f\"{self._name}/{node.name}\"\n node.objects.append( pathlib.Path( node.aux ))\n logger.info (\"Node has %s pending object commits\", len(node.objects))\n\n # Add to node commit dictonary\n logger.info (\"Adding: %s\", node.get_address())\n\n self._conn.query( f\"\"\"\n MATCH (container) WHERE id(container) = {node._datastore_id}\n MERGE (container)-[:HAS_DATA]->(da:{node.get_match_str()})\n ON MATCH SET da = {node.get_map_str()}\n ON CREATE SET da = {node.get_map_str()}\n \"\"\" )\n\n if self.params.get(\"OBJECT_STORE_ENABLED\", False):\n future_uploads = []\n executor = ThreadPoolExecutor(max_workers=4)\n\n object_bucket = node.properties.get(\"object_bucket\")\n object_folder = node.properties.get(\"object_folder\")\n for p in node.objects:\n future = executor.submit(self._client.fput_object, object_bucket, f\"{object_folder}/{p.name}\", p, part_size=250000000)\n future_uploads.append(future)\n\n n_count_futures = 0\n n_total_futures = len (future_uploads)\n for future in as_completed(future_uploads):\n try:\n data = future.result()\n except:\n logger.exception('Bad upload: generated an exception:')\n else:\n n_count_futures += 1\n if n_count_futures < 10: logger.info(\"Upload successful with etag: %s\", data[0])\n if n_count_futures < 1000 and n_count_futures % 100 == 0: logger.info(\"Uploaded [%s/%s]\", n_count_futures, n_total_futures)\n if n_count_futures % 1000 == 0: logger.info(\"Uploaded [%s/%s]\", n_count_futures, n_total_futures)\n\n logger.info(\"Uploaded [%s/%s]\", n_count_futures, n_total_futures)\n logger.info(\"Shutdown executor %s\", executor)\n executor.shutdown()\n logger.info(\"Done saving all records!!\")\n\n def add(self, *args):\n logger.warning (\"Datastore.add() has been depreciated\")\n def saveAll(self, *args):\n logger.warning (\"Datastore.saveAll() has been depreciated\")\n","sub_path":"luna_core/common/DataStore.py","file_name":"DataStore.py","file_ext":"py","file_size_in_byte":22344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564391271","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#======================================================================\n#\n# wordkit.py - \n#\n# Created by skywind on 2018/07/20\n# Last Modified: 2018/07/20 16:18:34\n#\n#======================================================================\nfrom __future__ import print_function, unicode_literals\nimport sys\nimport time\nimport os\nimport codecs\n\n\n#----------------------------------------------------------------------\n# python3 compatible\n#----------------------------------------------------------------------\nif sys.version_info[0] >= 3:\n unicode = str\n long = int\n xrange = range\n\n\n#----------------------------------------------------------------------\n# WordBook\n#----------------------------------------------------------------------\nclass WordBook (object):\n\n def __init__ (self, filename = None):\n self._words = []\n self._lookup = {}\n self._info = {}\n self._title = None\n self._count = 0\n self.load(filename)\n\n def reset (self):\n self._words = []\n self._lookup = {}\n self._title = None\n self._count = 0\n\n def push (self, word, info = None):\n word = word.strip('\\r\\n\\t ')\n if not word:\n return False\n key = word.lower()\n if key in self._lookup:\n return False\n self._lookup[key] = len(self._words)\n self._words.append(word)\n self._info[key] = info\n self._count = len(self._words)\n return True\n \n def load (self, filename):\n if filename is None:\n return False\n if isinstance(filename, str) or isinstance(filename, unicode):\n fp = open(filename, 'r')\n else:\n fp = filename\n for line in fp:\n line = line.strip('\\r\\n\\t ')\n if not line: \n continue\n if line.startswith('#') or line.startswith(';'):\n line = line[1:].lstrip('\\r\\n\\t ')\n if line and self._title is None:\n self._title = line\n continue\n word, _, info = line.partition(',')\n word = word.strip()\n if not word:\n continue\n info = info.strip()\n if info:\n info = info.split(',')\n else:\n info = None\n self.push(word, info)\n fp.close()\n return True\n\n def check (self, key):\n return self._lookup.__contains__(key.lower())\n \n def __contains__ (self, key):\n return self._lookup.__contains__(key.lower())\n\n def __len__ (self):\n return self._count\n\n def __getitem__ (self, key, default = None):\n if isinstance(key, unicode) or isinstance(key, str):\n return self._lookup.get(key.lower(), default)\n if key < 0 or key >= self._count:\n return default\n return self._words[key]\n\n def __iter__ (self):\n return self._words.__iter__()\n\n def info (self, key):\n return self._info.get(key.lower())\n\n def minus (self, wb):\n result = []\n for word in self._words:\n if word not in wb:\n result.append(word)\n return result\n\n def dumps (self):\n return [ n for n in self._words ]\n\n\n#----------------------------------------------------------------------\n# testing case\n#----------------------------------------------------------------------\nif __name__ == '__main__':\n def test1():\n wb = WordBook('e:/english/english/skip/simple-1.txt')\n wb.load('e:/english/english/skip/simple-2.txt')\n print(len(wb))\n return 0\n test1()\n\n\n\n","sub_path":"lib/wordkit.py","file_name":"wordkit.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91538203","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on July 2017\n\n@author: JulienWuthrich\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.decomposition import PCA\n\n\ndef optimalPcaComponents(pca_ratio):\n \"\"\"Compute the optimal number of components for the pca.\n\n Arg:\n ----\n pca_ration (pca.explained_variance_ratio_): var explained\n\n Return:\n -------\n int, of optimal pc components\n \"\"\"\n for i in range(2, len(pca_ratio)):\n pca_explication = sum(pca_ratio[:i])\n if pca_explication > 0.99:\n return i\n return i\n\n\ndef plotPca(pca_ratio):\n \"\"\"Plot the pca curve.\n\n Arg:\n ----\n pca_ration (pca.explained_variance_ratio_): var explained\n \"\"\"\n plt.bar(np.arange(len(pca_ratio)) + 1, pca_ratio)\n plt.title(\"Variance expliquée\")\n plt.show()\n\n\ndef buildPca(df, n_components=100):\n \"\"\"Make a pca decomposition.\n\n Args:\n -----\n df (pandas.DataFrame): datas\n n_components (int): nb of components\n\n Return:\n -------\n pca model\n \"\"\"\n return PCA(n_components=n_components).fit(df)\n\n\ndef nbPca(df):\n \"\"\"Define the number of components.\n\n Arg:\n ----\n df (pandas.DataFrame): datas\n\n Return:\n -------\n int, of optimal number of components\n \"\"\"\n pca = buildPca(df)\n pca_ratio = pca.explained_variance_ratio_\n plotPca(pca_ratio)\n\n return optimalPcaComponents(pca_ratio)\n\n\ndef applyPca(df):\n \"\"\"Build a dataframe based on PCA decomposition.\n\n Arg:\n ----\n df (pandas.DataFrame): datas\n\n Return:\n -------\n pandas.DataFrame with PCA columns\n \"\"\"\n nb_pca = nbPca(df)\n pca = buildPca(df, n_components=100)\n df = pd.DataFrame(pca.transform(df))\n for col in df.columns:\n df.rename(columns={col: \"PCA_\" + str(col)}, inplace=True)\n\n return df\n","sub_path":"mozinor/preprocess/transformer/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169283169","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.signal import hilbert, detrend, find_peaks\nfrom scipy.linalg import solve_toeplitz, toeplitz\nimport scipy.linalg as la\nimport sys\nimport time \nfrom swellex.audio.ts_comp import get_nb_fname\n\n'''\nDescription:\nRoutines for implementing an autoregressive estimator for the PSD of the data\non each sensor in the array. \n\nThe golden nugget here is run_esprit and the \nesprit implementation\n\nDate: \n07/22/2020\n\nAuthor: Hunter Akins\n'''\n\n\ndef get_chunk(data, min0, min1):\n \"\"\" Get the chunk from min0 to min1 \"\"\"\n ind0, ind1 = int(min0*60*1500), int(min1*60*1500)\n if data.shape[0] == data.size:\n data = data.reshape(1,data.size)\n data = data[:,ind0:ind1]\n return data\n\ndef fourier_psd(data):\n freqs, ft = np.fft.fftfreq(data.size, 1/1500), np.fft.fft(data)\n psd = np.square(abs(ft))\n return freqs, psd\n\ndef retain_relevant(freq, freqs, psd):\n \"\"\"\n Only keep the bins nearby freq \n \"\"\"\n freq_inds = [i for i in range(len(freqs)) if abs(freqs[i] - freq) < .5]\n return freqs[freq_inds], psd[freq_inds]\n\ndef est_autocorr(data, model_order, start_lag=0):\n \"\"\"\n Perform simple autocorrelation estimation for lag up\n to model order m\n Input\n data - np array, possibly 1d\n model_order - int\n number of lags\n start_lag - int\n helpful for recursive implementations, if you've laread\n computed lags up to L, start_lag=L+1 will only work onwards\n \"\"\"\n if data.shape[0] == data.size: \n data = data.reshape(1, data.size)\n m = model_order\n rxx = np.zeros(m, dtype=np.complex128)\n N = data.shape[1]\n \"\"\" FOr each lag \"\"\"\n for m in range(start_lag, model_order):\n sum_vals= 0\n for n in range(N-m):\n sum_vals += np.sum(data[:,n+m]*(data[:,n].conj()))\n rxx[m] = sum_vals/(N-m)\n return rxx\n\ndef update_autocorr(data, rxx, new_order):\n \"\"\"\n Given rxx, which contains estimates of the lag up to\n l = rxx.size-1, expand it to contain estimates to include\n lags up to new_order-1\n \"\"\"\n start_lag = rxx.size\n if new_order < start_lag:\n raise ValueError('New lag is less than old lkag')\n new_rxx = est_autocorr(data, new_order, start_lag = start_lag)\n full_rxx = np.zeros(new_order,dtype=np.complex128)\n full_rxx[:start_lag] = rxx\n full_rxx[start_lag:] = new_rxx\n return full_rxx\n\ndef make_data_mat(data, model_order):\n num_rows = data.size - model_order\n num_cols = model_order\n mat = np.zeros((num_rows, num_cols), dtype=np.complex128)\n h = np.zeros(num_rows, dtype=np.complex128)\n for i in range(num_rows):\n mat[i,:] = -data[i:i+model_order:][::-1]\n h[i] = data[i+model_order]\n mat = np.matrix(mat)\n return mat, h\n \ndef solve_yw(data, model_order):\n \"\"\"\n Solve yule-walker equations for data\n Input\n data- array like\n ...\n model_order - int\n number of AR coeffs\n output\n an - array\n var - float\n estimated noise variance\n \"\"\"\n mat,h = make_data_mat(data, model_order)\n print(mat.shape)\n inv = np.linalg.inv(mat.H@mat)@mat.H\n an = inv@h\n an = an.T\n #an = solve_toeplitz(rxx[:-1], rxx[1:])\n #var = rxx[0] + np.sum(an*(rxx[1:].conj()))\n var =1\n return an, var\n\ndef get_psd(f_grid, delta_t, an, var):\n \"\"\"\n Get power spectral density from estimated\n autoregressive parameters\n \"\"\"\n numer =delta_t*abs(var)\n psd = np.zeros(f_grid.size)\n t = delta_t * np.linspace(1, len(an), len(an))\n an =an.reshape(1, an.size)\n for i, f in enumerate(f_grid):\n fvals = np.exp(-complex(0,1)*2*np.pi*f*t)\n fvals.reshape(fvals.size, 1)\n denom = 1 + (an@fvals)[0]\n psd[i] = numer/np.square(abs(denom))\n psd = psd / np.max(psd)\n return psd\n\ndef get_naive_psd(f_grid, data, delta_t = 1/1500):\n psd = np.zeros(f_grid.size)\n t = delta_t * np.linspace(0, data.size-1, data.size)\n print(t[1]-t[0], delta_t)\n for i, f in enumerate(f_grid):\n comb = np.exp(-complex(0,1)*2*np.pi*f*t)\n psd[i] = np.square((abs(comb@data)))\n psd = psd / np.max(psd)\n return psd\n\ndef pseudo_spec(v, df=.1, fs=1500):\n \"\"\"\n Input\n v - numpy matrix\n columns are the noise eigenvectors\n df - float\n frequency spacing (fs = 1)\n Output \n psd - numpy array\n \"\"\"\n T = 1/df\n N = T*fs\n N = np.power(2, int(np.log2(N))+1)\n fft_vals = np.fft.fft(v, n=N, axis=0)\n freqs = np.fft.fftfreq(N, 1/1500)\n power = np.square(abs(fft_vals))\n power_sum = np.sum(power, axis=1)\n psd = 1/power_sum\n return freqs, psd\n \ndef music(data, num_freqs, M, df=.1, fs=1500):\n \"\"\"\n Compute the pseudospectrum for the data\n looking for num_freqs using M eigenvectors\n Input -\n data - np 1d array\n time series with harmonic content\n num_freqs - int\n number of harmonic elements\n M - int\n lag order to compute\n df - float\n pseudospectrum spacing\n fs - float\n sampling rate (Hz)\n \"\"\"\n rxx = est_autocorr(data, M)\n mat = toeplitz(rxx)\n lam, v = la.eigh(mat)\n #plt.figure()\n #plt.scatter(np.linspace(0, lam.size-1, lam.size), lam)\n #plt.ylim(0, np.max(lam)*1.1)\n snr_proxy = lam[-1]\n noise_dim = M - num_freqs\n noise_v = v[:,:noise_dim]\n freqs, psd = pseudo_spec(noise_v, df, fs)\n df = freqs[1]-freqs[0]\n return freqs, psd, snr_proxy\n \ndef esprit_make_mat(data, M):\n \"\"\"\n Make the data matrix for ESPRIT \n if data is multidimensional, stack the data\n Rows are expected to be sensors\n Input \n data -numpy 1d or nd array\n M - int\n \"\"\"\n num_rows = data.size-M\n if len(data.shape) > 1:\n data = data.reshape(data.size)\n X = np.zeros((num_rows, M), dtype=data.dtype) \n for i in range(M):\n X[:,i] = data[i:i+num_rows]\n return X\n \ndef esprit(data, p, M,timer=False):\n \"\"\"\n Given the data, implement Esprit\n Looking for p sinusoids using data vectors of length M\n Input - \n data - numpy ndarray\n potentially a higher dimensional array,\n in which case there are two ways to approach it\n p - int\n number of complex exponentials hiding in the data\n M - length of the data records\n Output -\n \"\"\"\n dt = 1/1500\n if timer == True:\n now = time.time()\n X = esprit_make_mat(data,M)\n if timer == True:\n print('making data mat esp', time.time()-now)\n now = time.time()\n U, s, VH = la.svd(X,full_matrices=False)\n if timer == True:\n print('svd 1', time.time() - now)\n now = time.time()\n VH = np.matrix(VH)\n VHs = VH[:p,:]\n VH1 = VHs[:, 1:]\n VH2 = VHs[:, :-1]\n \"\"\" DO some TLS\"\"\"\n X_tls = np.zeros((M-1, 2*p), dtype=VH1[0,0].dtype)\n X_tls[:,:p] = (VH1.H)\n X_tls[:,p:] = (VH2.H)\n if timer == True:\n print('tls stuff ', time.time() - now)\n now = time.time()\n U,s,VH_tls = la.svd(X_tls)\n if timer == True:\n print('svd 2', time.time()-now)\n now = time.time()\n U_tild = np.matrix(VH_tls).H\n U12_tild = U_tild[:p,p:]\n U22_tild = U_tild[p:,p:]\n Psi_tls = -U12_tild@la.inv(U22_tild)\n Psi = 1/(VH1@(VH1.H))*VH1@(VH2.H)\n eigs = la.eigvals(Psi)\n f = np.angle(eigs)/2/np.pi/dt\n eigs = la.eigvals(Psi_tls)\n f = np.angle(eigs)/2/np.pi/dt\n if timer == True:\n print('final calcs', time.time()-now)\n return f\n\ndef check_ts(chunk,freq):\n \"\"\"\n The data is a chunk of the time series\n Determine if the chunk is good by looking for nulls\n To do so, look at variation of the amplitude, estimated\n for every ten cycles\n \"\"\"\n chunk_len = chunk.size\n dt = 1/1500\n T = 20/freq\n N = int(T/dt)\n num_subs = chunk_len // N\n masks = np.zeros((num_subs, chunk_len))\n for i in range(num_subs):\n masks[i, N*i:N*i+N] = 1\n amps = masks@abs(chunk)\n amp_range = np.max(amps)/np.min(amps)\n return amp_range\n\ndef get_top(dats, freq, num):\n \"\"\"\n Use check_ts as template\n to measure amplitude variation of all\n the 63 chunks\n Then keep the ...you guessed it...TOP TEN WINNERSSS!!!\n \"\"\"\n chunk_len = dats.shape[1]\n dt = 1/1500\n T = 20/freq\n N = int(T/dt)\n num_subs = chunk_len // N\n masks = np.zeros((num_subs, chunk_len))\n for i in range(num_subs):\n masks[i, N*i:N*i+N] = 1\n amps = masks@abs(dats.T)\n max_amp, min_amp = np.max(amps, axis=0), np.min(amps, axis=0)\n diffs = max_amp - min_amp\n top_ten = np.argsort(diffs)[:num]\n return dats[top_ten,:]\n \ndef test_music(freq, p, model_order, T):\n varrs = []\n lfreq = freq-1\n rfreq = freq+1\n for t_start in np.arange(0, 10, 1):\n fs = 1500\n data = np.load('npy_files/'+str(freq) + '_short.npy') #this is data from 35 to 40 minutes into S5\n snrs = []\n x1 = get_chunk(data, t_start/60, t_start/60+T/60)\n x1 = hilbert(x1)\n x1 = x1[0,:]\n x1 = x1.reshape(1, x1.size)\n\n #t = np.linspace(t_start, t_start + T- 1/fs, T*fs)\n #print(t[1]-t[0])\n #dats = np.cos(2*np.pi*127.2*t)\n #x2, y= fourier_psd(dats)\n #dats = hilbert(dats)\n #x2 = dats\n print('N = ', x1.shape[1])\n freqs, psd, snr_proxy = music(x1, p, model_order, df=.001)\n snrs.append(np.var(x1))\n llim, rlim = np.argmin([abs(lfreq-x) for x in freqs]), np.argmin([abs(rfreq - x) for x in freqs])\n freqs = freqs[llim:rlim]\n psd = psd[llim:rlim]\n psd /= np.max(psd)\n #plt.plot(freqs, psd)\n\n inds = find_peaks(psd)\n peak = freqs[inds[0]]\n print(peak)\n if len(peak) != 0:\n plt.scatter(t_start, peak)\n #plt.show()\n plt.suptitle('T='+str(T)+', f=' + str(freq) + ' music')\n\ndef test_esprit(freq, p, model_order, T):\n varrs = []\n lfreq = freq-1\n rfreq = freq+1\n for t_start in np.arange(0, 5, 1):\n guesses=[]\n for i in range(63):\n fs = 1500\n data = np.load('npy_files/'+str(freq) + '_short.npy') #this is data from 35 to 40 minutes into S5\n snrs = []\n x1 = get_chunk(data, t_start/60, t_start/60+T/60)\n x1 = hilbert(x1)\n x1 = x1[i,:]\n #x1 = x1[:10,:]\n #plt.plot(x1.real)\n #plt.show()\n\n t = np.linspace(t_start, t_start + T- 1/fs, T*fs)\n dats = np.cos(2*np.pi*freq*t)\n x2, y= fourier_psd(dats)\n dats = hilbert(dats)\n x2 = dats\n f= esprit(x1, p, model_order)\n plt.scatter(t_start, (f-freq)/freq*1500,color='b')\n guesses.append(f)\n guesses=np.array(guesses)\n plt.scatter(t_start, (np.median(guesses)-freq)/freq*1500,color='r')\n\n\n plt.suptitle('T='+str(T)+', f=' + str(freq) + ' Esprit')\n\ndef run_esprit(freq, p, model_order, N, delta_n, alt=False):\n \"\"\"\n Run ESPRIT on the tscc supercomputer for frequency freq\n on sliding windows of length N and spcing delta_n\n Input \n freq - int\n source frequency under consideration\n p - int\n number of freqs to estimate\n model_order - int\n number of elements used in the data matrix formation\n N - int\n num_samps of windows\n delta_t - float\n spacing of windows\n \"\"\"\n now = time.time()\n x = np.load(get_nb_fname(freq, alt=alt))\n print(get_nb_fname(freq, alt=alt))\n #x = np.load('npy_files/'+str(freq) + '_short.npy') #this is data from 35 to 40 minutes into S5\n #x = np.load('npy_files/'+str(freq) + '_short_hilb.npy') #this is data from 35 to 40 minutes into S5\n #x = x[:, :15000]\n x = hilbert(x)\n now = time.time()\n \n num_to_keep = 5\n num_samps = x.shape[1]\n num_ests = (num_samps- N)//delta_n\n f_hat = np.zeros((num_to_keep, num_ests))\n err = np.zeros((num_to_keep, num_ests))\n amp = np.zeros((num_to_keep, num_ests))\n dt = 1/1500\n t = np.arange(0, N*dt, dt)\n print('Running esprit on ', freq, ' band. Window size is ', N, ' spaced by ', delta_n)\n min_amp_f = []\n min_err_f = []\n print('initializing variables')\n print(time.time() - now)\n for i in range(num_ests):\n lind, rind = i*delta_n, i*delta_n + N\n dats = np.copy(x[:,lind:rind])\n dats /= np.std(dats, axis=1).reshape(63,1)\n dats =get_top(dats, freq, 5)\n for j in range(num_to_keep):\n tmp = dats[j,:]\n #tmp /= np.std(tmp) # std is sqrt(A) / 2 for pure sine wave\n peak = np.max(tmp[:100])\n p0 = np.angle(tmp[0]/peak)\n f= esprit(tmp, p, model_order)\n dat_hat = np.sqrt(2)*np.exp(complex(0,1)*2*np.pi*f*t)\n err_var = np.sqrt(np.sum(abs(dat_hat - tmp)))\n amp_var = check_ts(tmp,freq)\n f_hat[j,i] = f\n err[j,i] = err_var\n amp[j,i] = amp_var\n np.save('/oasis/tscc/scratch/fakins/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat.npy', f_hat)\n np.save('/oasis/tscc/scratch/fakins/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat_err.npy', np.array(err))\n np.save('/oasis/tscc/scratch/fakins/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat_amp.npy', np.array(amp))\n \n #fig, axes = plt.subplots(3,1)\n #axes[0].plot(f_hat[:,i])\n #minaf = f_hat[np.argmin(amp[:,i]),i]\n #min_amp_f.append(minaf)\n #minerrf = f_hat[np.argmin(err[:,i]),i]\n #min_err_f.append(minerrf)\n #axes[0].scatter(np.argmin(amp[:,i]), minaf, color='r')\n #axes[0].scatter(np.argmin(err[:,i]), minerrf, color='g')\n #axes[1].plot(err[:,i])\n #axes[2].plot(amp[:,i])\n #plt.show()\n #min_amp_f = np.array(min_amp_f)\n #min_err_f = np.array(min_err_f)\n #print(min_amp_f - min_err_f)\n #plt.plot(min_amp_f, color='r')\n #plt.plot(min_err_f,color='g')\n #plt.show()\n \n np.save('/oasis/tscc/scratch/fakins/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat.npy', f_hat)\n np.save('/oasis/tscc/scratch/fakins/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat_err.npy', np.array(err))\n\ndef rm_outliers(fests, num_std):\n \"\"\"\n Trim outliers from the set\n Return the median of the trimmed series\n Input \n fests - np array\n each row is a vector of msmts froma sensor\n num_std - float\n number of standard deviations to clip\n Output\n medians - np array\n \"\"\"\n var = np.var(fests, axis=0)\n means = np.mean(fests, axis=0)\n diffs= abs(fests-means)\n medians = np.zeros(var.size)\n for i in range(fests.shape[1]):\n inds = np.where(diffs[:,i] < num_std*np.sqrt(var[i]))\n medians[i] = np.median(fests[:,i][inds])\n #medians[i] = np.median())\n return medians\n\ndef load_fest(freq, N=3000, delta_n=1500,tscc=False):\n \"\"\" \n Load the frequency estimate for freq for the given interval and spacing N and delta_n\n \"\"\"\n if tscc == True:\n root = '/oasis/tscc/scratch/fakins/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat' \n else:\n root = 'npy_files/fests/'+str(freq) + '_' + str(N) + '_' + str(delta_n) + '_fhat' \n fhat = np.load(root+'.npy')\n err = np.load(root+'_err.npy')\n amp = np.load(root+'_amp.npy')\n return fhat, err, amp\n\ndef check_fest(freq, N, delta_n):\n \"\"\" Compare to walker? \"\"\"\n #i1 = int(15*60*1500 / delta_n)\n #i2 = int(40*60*1500/delta_n)\n i1, i2 = 0, -1\n x,y,z = load_fest(freq, N, delta_n)\n x = x[:,i1:i2]\n y = y[:,i1:i2]\n z = z[:,i1:i2]\n best_inds_err = np.argmin(y, axis=0)\n best_inds_amp = np.argmin(z, axis=0)\n i = np.linspace(0, best_inds_err.size-1, best_inds_err.size,dtype=int)\n fig, axis = plt.subplots(1,1)\n axes = [axis]\n t = i*delta_n/1500 / 60\n axes[0].plot(t, x[best_inds_err,i],color='b')\n #axes[0].plot(t, x[best_inds_amp,i],color='g')\n fig.suptitle(str(freq))\n #fig1, axes = plt.subplots(2,1)\n #axes[0].acorr(x[best_inds_amp, i], detrend=detrend)\n #axes[1].acorr(x[best_inds_err, i], detrend=detrend)\n #fig1.suptitle(str(freq))\n return fig, axes, t\n \ndef comp_fest(freqs, N, delta_n):\n \"\"\" Compare to walker? \"\"\"\n i1 = int(15*60*1500 / delta_n)\n i2 = int(40*60*1500/delta_n)\n #i1, i2 = 0, -1\n fig, axes = plt.subplots(1,1)\n fig1, axes1 = plt.subplots(1,1)\n check = 0\n for freq in freqs:\n x,y,z = load_fest(freq, N, delta_n)\n x = x[:,i1:i2]\n y = y[:,i1:i2]\n z = z[:,i1:i2]\n best_inds_err = np.argmin(y, axis=0)\n best_inds_amp = np.argmin(z, axis=0)\n i = np.linspace(0, best_inds_err.size-1, best_inds_err.size,dtype=int)\n if check == 0:\n ref_x = x[best_inds_err,i]\n ratio = 1\n check = 1\n else:\n curr_x = x[best_inds_err, i]\n ratio = curr_x@ref_x.T / (np.square(np.linalg.norm(curr_x)))\n print(ratio)\n axes.plot(i, (x[best_inds_err,i]*ratio))\n #axes[1].plot(i, z[best_inds_amp, i], color='b')\n #axes[2].plot(i, y[best_inds_amp, i], color='g')\n axes1.acorr(x[best_inds_amp, i], detrend=detrend)\n\ndef comp_two_fest(freqs, N, delta_n):\n \"\"\" Compare to walker? \"\"\"\n i1 = int(15*60*1500 / delta_n)\n i2 = int(40*60*1500/delta_n)\n #i1, i2 = 0, -1\n fig, axes = plt.subplots(1,1)\n fig1, axes1 = plt.subplots(1,1)\n check = 0\n x,y,z = load_fest(freqs[0], N, delta_n)\n x = x[:,i1:i2]\n y = y[:,i1:i2]\n z = z[:,i1:i2]\n best_inds_err = np.argmin(y, axis=0)\n i = np.linspace(0, best_inds_err.size-1, best_inds_err.size,dtype=int)\n ref_x = x[best_inds_err,i]\n ratio = 1\n check = 1\n\n \n x,y,z = load_fest(freqs[1], N, delta_n)\n x = x[:,i1:i2]\n y = y[:,i1:i2]\n z = z[:,i1:i2]\n curr_x = x[best_inds_err, i]\n ratio = curr_x@ref_x.T / (np.square(np.linalg.norm(curr_x)))\n axes.plot(i, (curr_x*ratio))\n axes.plot(i, ref_x)\n #axes[1].plot(i, z[best_inds_amp, i], color='b')\n #axes[2].plot(i, y[best_inds_amp, i], color='g')\n #axes1.acorr(x[best_inds_amp, i], detrend=detrend)\n axes1.plot(i, ref_x - curr_x*ratio)\n\ndef comp_filter():\n \"\"\"\n I ran ESPRIT on 385 with .2 Hz bandwidth\n and .1 \n Compare the results \n \"\"\"\n freq = 385\n i1 = int(6.5*60*1500/750)\n i2 = int(60*60*1500/750)\n narr_x, y, z = load_fest(freq, N=1500, delta_n=750)\n best_inds_err = np.argmin(y, axis=0)\n i = np.linspace(0, best_inds_err.size-1, best_inds_err.size, dtype=int)\n narr_x = narr_x[best_inds_err,i]\n root = 'npy_files/fests/385_1500_750_fhat' \n x = np.load(root+'_orig.npy')\n y = np.load(root+'_err_orig.npy')\n z = np.load(root+'_amp_orig.npy')\n best_inds_err = np.argmin(y, axis=0)\n x = x[best_inds_err,i]\n plt.plot(x[i1:i2])\n plt.plot(narr_x[i1:i2])\n plt.show()\n\nif __name__ == '__main__':\n freq = int(sys.argv[1])\n N = int(sys.argv[2])\n delta_n = int(sys.argv[3])\n T = 1\n p = 1 \n M = 40\n\n run_esprit(freq, p, M, N, delta_n, alt=False)\n\n#plt.figure()\n#test_esprit(freq,p,M,T)\n#freq = 335\n#plt.figure()\n#test_esprit(freq,p,M,T)\n#plt.show()\n","sub_path":"audio/autoregressive.py","file_name":"autoregressive.py","file_ext":"py","file_size_in_byte":19238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601506192","text":"# Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University. \n# All Rights Reserved.\n\nimport logging\nimport argparse\nimport os\nimport paddle.fluid as fluid\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom ..model import KGLearnModel\nfrom ...distributed.openks_distributed import KSDistributedFactory\nfrom ...distributed.openks_distributed.base import RoleMaker\nfrom ...distributed.openks_strategy.cpu import CPUStrategy, SyncModeConfig\n\nlogger = logging.getLogger(__name__)\n\n\n@KGLearnModel.register(\"KGLearn\", \"Paddle\")\nclass KGLearnPaddle(KGLearnModel):\n\tdef __init__(self, name='paddle-default', graph=None, model=None, args=None):\n\t\tself.name = name\n\t\tself.graph = graph\n\t\tself.args = args\n\t\tself.model = model\n\n\tdef triples_reader(self, ratio=0.01):\n\t\t\"\"\"read from triple data files to id triples\"\"\"\n\t\trel2id = self.graph.relation_to_id()\n\t\ttrain_triples, test_triples = train_test_split(self.graph.triples, test_size=ratio)\n\t\ttrain_triples = [(triple[0][0], rel2id[triple[0][1]], triple[0][2]) for triple in train_triples]\n\t\ttest_triples = [(triple[0][0], rel2id[triple[0][1]], triple[0][2]) for triple in test_triples]\n\t\treturn np.array(train_triples), np.array(test_triples), np.array(test_triples)\n\n\tdef triples_generator(self, train_triples, batch_size):\n\t\ttrain_triple_positive_batches = []\n\t\tn = len(train_triples)\n\t\trand_idx = np.random.permutation(n)\n\t\tn_triple = len(rand_idx)\n\t\tstart = 0\n\t\twhile start < n_triple:\n\t\t\tend = min(start + batch_size, n_triple)\n\t\t\ttrain_triple_positive = train_triples[rand_idx[start:end]]\n\t\t\tstart = end\n\t\t\ttrain_triple_positive_batches.append(train_triple_positive)\n\n\t\tdef triple_constructor(train_triple_positive):\n\t\t\t\"\"\" training triples generator \"\"\"\n\t\t\tsize = len(train_triple_positive) # neg_times = 1\n\t\t\ttrain_triple_negative = train_triple_positive.repeat(1, axis=0)\n\t\t\treplace_head_probability = 0.5 * np.ones(size)\n\t\t\treplace_entity_id = np.random.randint(size, size=size)\n\t\t\trandom_num = np.random.random(size=size)\n\t\t\tindex_t = (random_num < replace_head_probability) * 1\n\t\t\ttrain_triple_negative[:, 0] = train_triple_negative[:, 0] + (replace_entity_id - train_triple_negative[:, 0]) * index_t\n\t\t\ttrain_triple_negative[:, 2] = replace_entity_id + (train_triple_negative[:, 2] - replace_entity_id) * index_t\n\t\t\ttrain_triple_positive = np.expand_dims(train_triple_positive, axis=2)\n\t\t\ttrain_triple_negative = np.expand_dims(train_triple_negative, axis=2)\n\t\t\treturn train_triple_positive, train_triple_negative\n\n\t\tdef triple_loader():\n\t\t\tfor batch_data in train_triple_positive_batches:\n\t\t\t\tyield triple_constructor(batch_data)\n\t\t\n\t\treturn triple_loader\n\n\tdef evaluate(self, exe, program, test_triples, test_feed_list, fetch_list):\n\t\tall_rank = []\n\t\tcount = 0\n\t\tfor triple in test_triples:\n\t\t\tdata = np.array(triple)\n\t\t\tdata = data.reshape((-1))\n\t\t\tfeed_dict = {}\n\t\t\tfor k, v in zip(test_feed_list, [data]):\n\t\t\t\tfeed_dict[k] = v\n\t\t\ttail_score, head_score = exe.run(program=program, fetch_list=fetch_list, feed=feed_dict)\n\n\t\t\thead, relation, tail = feed_dict[\"test_triple\"][0], feed_dict[\"test_triple\"][1], feed_dict[\"test_triple\"][2]\n\t\t\thead_order = np.argsort(head_score)\n\t\t\ttail_order = np.argsort(tail_score)\n\t\t\thead_rank_raw = 1\n\t\t\ttail_rank_raw = 1\n\t\t\tfor candidate in head_order:\n\t\t\t\tif candidate == head:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\thead_rank_raw += 1\n\t\t\tfor candidate in tail_order:\n\t\t\t\tif candidate == tail:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\ttail_rank_raw += 1\n\t\t\tall_rank.extend([head_rank_raw, tail_rank_raw])\n\t\t\tif count % 500 == 0:\n\t\t\t\tprint(\"=================\")\n\t\t\t\tprint((np.array(all_rank) <= 1).sum(), (np.array(all_rank) <= 3).sum(), (np.array(all_rank) <= 10).sum())\n\t\t\t\tprint(\"=================\")\n\t\t\tcount += 1\n\t\traw_rank = np.array(all_rank)\n\t\treturn (raw_rank <= 1).mean(), (raw_rank <= 3).mean(), (raw_rank <= 10).mean(), (1 / raw_rank).mean()\n\n\tdef run(self, dist=False):\n\t\tprogram = None\n\t\tdist_algorithm = None\n\n\t\ttrain_triples, valid_triples, test_triples = self.triples_reader(ratio=0.01)\n\n\t\tdevice = fluid.cuda_places() if self.args['gpu'] else fluid.cpu_places()\n\n\t\tif dist:\n\t\t\tdist_algorithm = KSDistributedFactory.instantiation(flag=0)\n\t\t\trole = RoleMaker.PaddleCloudRoleMaker()\n\t\t\tdist_algorithm.init(role)\n\n\t\tmodel = self.model(\n\t\t\tnum_entity=self.graph.get_entity_num(),\n\t\t\tnum_relation=self.graph.get_relation_num(),\n\t\t\thidden_size=self.args['hidden_size'],\n\t\t\tmargin=self.args['margin'],\n\t\t\tlr=self.args['learning_rate'],\n\t\t\topt=self.args['optimizer'],\n\t\t\tdist=dist_algorithm)\n\n\t\tif dist:\n\t\t\tif dist_algorithm.is_server():\n\t\t\t\tdist_algorithm.init_server()\n\t\t\t\tdist_algorithm.run_server()\n\t\t\telif dist_algorithm.is_worker():\n\t\t\t\tdist_algorithm.init_worker()\n\t\t\t\tprogram = dist_algorithm.main_program\n\t\telse:\n\t\t\tprogram = fluid.CompiledProgram(model.train_program).with_data_parallel(loss_name=model.train_fetch_vars[0].name)\n\n\t\ttrain_loader = fluid.io.DataLoader.from_generator(feed_list=model.train_feed_vars, capacity=20, iterable=True)\n\t\ttrain_loader.set_batch_generator(self.triples_generator(train_triples, batch_size=self.args['batch_size']), places=device)\n\n\t\texe = fluid.Executor(device[0])\n\t\texe.run(model.startup_program)\n\t\texe.run(fluid.default_startup_program())\n\n\t\tbest_score = 0.0\n\t\tfor epoch in range(1, self.args['epoch'] + 1):\n\t\t\tprint(\"Starting epoch: \", epoch)\n\t\t\tloss = 0\n\t\t\t# train in a batch\n\t\t\tfor batch_feed_dict in train_loader():\n\t\t\t\tbatch_fetch = exe.run(program, fetch_list=model.train_fetch_vars, feed=batch_feed_dict)\n\t\t\t\tloss += batch_fetch[0]\n\t\t\tprint(\"Loss: \" + str(loss))\n\n\t\t\t# evaluation periodically\n\t\t\tif epoch % self.args['eval_freq'] == 0:\n\t\t\t\tprint(\"Starting validation...\")\n\t\t\t\t_, _, hits_at_10, _ = self.evaluate(exe, model.test_program, valid_triples, model.test_feed_list, model.test_fetch_vars)\n\t\t\t\tscore = hits_at_10\n\t\t\t\tprint(\"HIT@10: \" + str(score))\n\t\t\t\tif score > best_score:\n\t\t\t\t\tbest_score = score\n\t\t\t\t\tfluid.io.save_params(exe, dirname=self.args['model_dir'], main_program=model.train_program)\n\t\tif dist:\n\t\t\tdist_algorithm.stop_worker()\n\n\t\t# load saved model and test\n\t\tfluid.io.load_params(exe, dirname=self.args['model_dir'], main_program=model.train_program)\n\t\tscores = self.evaluate(exe, program, test_triples, model.test_feed_list, model.test_fetch_vars)\n\t\tprint(\"Test scores: \", scores)\n","sub_path":"openks/models/paddle/kg_learn.py","file_name":"kg_learn.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76256669","text":"from datetime import datetime\nimport logging\nimport random\n\nfrom app import appbuilder, db, create_app\nfrom app.models import ContactGroup, Gender, Contact, Company\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_random_name(names_list, size=1):\n name_lst = [\n names_list[random.randrange(0, len(names_list))].decode(\"utf-8\").capitalize()\n for i in range(0, size)\n ]\n return \" \".join(name_lst)\n\n\napp = create_app(\"config\")\napp.app_context().push()\n\ncompany1 = Company(name=\"Company 1\")\ncompany2 = Company(name=\"Company 2\")\ntry:\n db.session.add(company1)\n db.session.add(company2)\n db.session.commit()\nexcept Exception as e:\n log.error(\"Group creation error: %s\", e)\n db.session.rollback()\n exit(1)\n\n\nrole_admin = appbuilder.sm.find_role(appbuilder.sm.auth_role_admin)\n\nuser1 = appbuilder.sm.add_user(\n \"user1_company1\", \"user1\", \"test\", \"user1@company1.com\", role_admin, \"password\"\n)\nuser2 = appbuilder.sm.add_user(\n \"user1_company2\", \"user1\", \"test\", \"user1@company2.com\", role_admin, \"password\"\n)\nuser3 = appbuilder.sm.add_user(\n \"user2_company2\", \"user2\", \"test\", \"user2@company2.com\", role_admin, \"password\"\n)\nuser1.company = company1\nuser2.company = company2\nuser3.company = company2\ndb.session.merge(user1)\ndb.session.merge(user2)\ndb.session.merge(user3)\ndb.session.commit()\n\ntry:\n db.session.add(ContactGroup(name=\"Friends\"))\n db.session.add(ContactGroup(name=\"Family\"))\n db.session.add(ContactGroup(name=\"Work\"))\n db.session.commit()\nexcept Exception as e:\n log.error(\"Group creation error: %s\", e)\n db.session.rollback()\n exit(1)\n\ntry:\n db.session.add(Gender(name=\"Male\"))\n db.session.add(Gender(name=\"Female\"))\n db.session.commit()\nexcept Exception as e:\n log.error(\"Gender creation error: %s\", e)\n db.session.rollback()\n exit(1)\n\nf = open(\"NAMES.DIC\", \"rb\")\nnames_list = [x.strip() for x in f.readlines()]\n\nf.close()\n\nj = 1\nfor i in range(1, 100):\n c = Contact()\n c.name = get_random_name(names_list, random.randrange(2, 6))\n c.address = \"Street \" + names_list[random.randrange(0, len(names_list))].decode(\n \"utf-8\"\n )\n c.personal_phone = random.randrange(1111111, 9999999)\n c.personal_celphone = random.randrange(1111111, 9999999)\n c.contact_group_id = random.randrange(1, 4)\n c.gender_id = random.randrange(1, 3)\n year = random.choice(range(1900, 2012))\n month = random.choice(range(1, 12))\n day = random.choice(range(1, 28))\n c.birthday = datetime(year, month, day)\n c.changed_on = datetime.now()\n c.created_on = datetime.now()\n if j == 1:\n j += 1\n _user = user1\n elif j == 2:\n j += 1\n _user = user2\n else:\n j = 1\n _user = user3\n c.created_by = _user\n c.changed_by = _user\n\n db.session.add(c)\n try:\n db.session.commit()\n print(\"inserted\", c)\n except Exception as e:\n log.error(\"Contact creation error: %s\", e)\n db.session.rollback()\n","sub_path":"examples/extendsecurity/testdata.py","file_name":"testdata.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"355768361","text":"# -*- coding: utf-8 -*-\nimport sys, pyexcel, re,json\n# Leer el documento de excel\ndef read_excel_sheets(sheet_name=None, file_url=None, all_sheets=False):\n #if True:\n try:\n book_dict = pyexcel.get_book_dict(file_name=file_url)\n if all_sheets:\n dict_all_sheets = {}\n for name_sheet in book_dict:\n records = book_dict[name_sheet]\n if records:\n header = records.pop(0)\n else:\n header = []\n try:\n header = [str(col).lower().replace(u'\\xa0',u' ').strip().replace(' ', '_') for col in header]\n except UnicodeEncodeError:\n header = [col.lower().replace(u'\\xa0',u' ').strip().replace(' ', '_') for col in header]\n dict_all_sheets[ name_sheet.lower().replace(' ', '_') ] = {\n 'header': header,\n 'records': records\n }\n return dict_all_sheets\n if book_dict.get(sheet_name):\n records = book_dict[sheet_name]\n header = records.pop(0)\n try:\n header = [str(col).lower().replace(u'\\xa0',u' ').strip().replace(' ', '_') for col in header]\n except UnicodeEncodeError:\n header = [col.lower().replace(u'\\xa0',u' ').strip().replace(' ', '_') for col in header]\n return header, records\n except Exception as e:\n print( json.dumps({'error': 'Ocurrio un error al leer el documento Excel', 'msg': str(e)}) )\n\ndef get_month_number( m ):\n dict_months = {\n \"ene\": \"01\",\n \"feb\": \"02\",\n \"marz\": \"03\",\n \"abr\": \"04\",\n \"may\": \"05\",\n \"jun\": \"06\",\n \"jul\": \"07\",\n \"ago\": \"08\",\n \"sep\": \"09\",\n \"oct\": \"10\",\n \"nov\": \"11\",\n \"dic\": \"12\"\n }\n return dict_months.get( m, '0' )\n\ndef get_full_date( text_with_date ):\n try:\n text_fecha = text_with_date.split(')')[1]\n text_fecha = text_fecha.lower()\n\n # 'text_fecha=',text_fecha\n \n day = re.search(r\"[0-9]{1,2}\", text_fecha, re.IGNORECASE).group()\n int_day = int(day)\n day = \"0\"+str(int_day) if int_day < 10 else day\n # 'day=',day\n \n month = re.search(r\"ene|feb|marz|abr|may|jun|jul|ago|sep|oct|nov|dic\", text_fecha, re.IGNORECASE).group()\n # 'month=',month\n n_month = get_month_number(month)\n \n year = re.search(r\"202[0-9]\", text_fecha, re.IGNORECASE).group()\n # 'year=',year\n full_date = year + '-' + n_month + '-' + day\n # 'full_date=',full_date\n return full_date\n except Exception as e:\n return {'error': 'No se pudo encontrar la fecha en la hoja diesel', 'msg': 'text_with_date= {} error= {}'.format(text_with_date, e)}\n\n\"\"\"\n# Procesa la pagina de Diesel para obtener los totales de las ventas y la fecha de la venta\n\"\"\"\ndef process_diesel_sheet( content_sheet ):\n # Obtengo el texto donde está la fecha de corte\n # '====== Buscando la fecha ======'\n text_with_date = ''\n for r in content_sheet:\n for c in r:\n if re.match(r\"(TEOTI|teoti|Teoti).*(\\s+)?\\((\\s+)?5787(\\s+)?\\)\", str(c)):\n text_with_date = c\n break\n if text_with_date:\n break\n # 'text_with_date=',text_with_date\n \n # Proceso el texto encontrado para formatear la fecha\n full_date = ''\n if text_with_date:\n full_date = get_full_date( text_with_date )\n\n # Recorro de nuevo la información de la hoja para buscar los totales\n # '====== Buscando los totales ======'\n pos_row = None\n pos_col = None\n for cont_r, r in enumerate(content_sheet):\n for cont_c, c in enumerate(r):\n if str(c).lower().strip() == \"r1\":\n pos_row = cont_r\n pos_col = cont_c\n break\n if pos_row and pos_col:\n break\n # 'pos_row=',pos_row\n # 'pos_col=',pos_col\n if not pos_row or not pos_col:\n dict_totales = {'error': 'No se encontraron las posiciones para obtener los totales', 'msg': 'pos_row= {} pos_col= {}'.format(pos_row, pos_col)}\n else:\n dict_totales = {}\n for r in content_sheet[pos_row:]:\n tipo_bomba = r[ pos_col ]\n if not tipo_bomba:\n break\n dict_totales.update({\n tipo_bomba: round(r[ pos_col + 1 ], 2)\n })\n # 'dict_totales=',dict_totales\n return dict_totales, full_date\n\n\"\"\"\n# Procesa la pagina de Notas para obtener las compras de los Clientes\n\"\"\"\ndef get_client_name( full_name ):\n listEval = ['vales', 'notas', 'vale', 'nota']\n for strEval in listEval:\n lName = full_name.split( strEval )\n if len(lName) > 1:\n return lName[1].strip()\n return ''\n\ndef process_notas_sheet( content_sheet ):\n # '====== Buscando las ventas a los clientes ======'\n dict_ventas_clientes = {}\n for r in content_sheet:\n for cont_c, c in enumerate(r):\n try:\n if re.match(r\"^[0-9]{1,3}\\s+?(vale|nota)\", c.lower().strip()):\n monto_vendido = 0\n for cc in r[ cont_c + 1: ]:\n try:\n monto_vendido = round( float( cc ), 2 )\n break\n except:\n continue\n nameClient = get_client_name( c.lower() )\n dict_ventas_clientes.update({\n nameClient.lower().replace('.', ''): monto_vendido\n })\n except:\n continue\n # 'dict_ventas_clientes=',dict_ventas_clientes\n return dict_ventas_clientes\n\nif __name__ == \"__main__\":\n # Dictionary ={1:'Welcome', 2:'to',3:'Geeks', 4:'for',5:'Geeks'}\n # print(json.dumps(Dictionary))\n\n name_file = sys.argv[1]\n \n # Dirección donde está guardado el Excel\n # Windows\n file_url = 'C:/xampp/htdocs/sistemaponchov2/excel_files/{}.xlsx'.format(name_file)\n # Ubuntu\n #file_url = '/var/www/html/sistemaponchov2/excel_files/{}.xlsx'.format(name_file)\n \n # Obtengo un diccionario con las páginas y el contenido de cada una\n dict_all_sheets_excel = read_excel_sheets(file_url=file_url, all_sheets=True)\n dict_totales_found, full_date = process_diesel_sheet( dict_all_sheets_excel.get('diesel', {}).get('records', []) )\n dict_ventas_clientes = process_notas_sheet( dict_all_sheets_excel.get('notas', {}).get('records', []) )\n print( json.dumps( {\n 'full_date': full_date,\n 'totales': dict_totales_found,\n 'clientes': dict_ventas_clientes\n } ) )","sub_path":"lee_excel.py","file_name":"lee_excel.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218728726","text":"\"\"\"users table\n\nRevision ID: f4f59b4e4455\nRevises: 512c0326da18\nCreate Date: 2019-04-23 09:34:00.943442\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f4f59b4e4455'\ndown_revision = '512c0326da18'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('irinfo',\n sa.Column('cas', sa.String(length=12), nullable=False),\n sa.Column('en_number', sa.String(length=50), nullable=True),\n sa.Column('zh_number', sa.String(length=50), nullable=True),\n sa.Column('formula', sa.String(length=50), nullable=True),\n sa.Column('Spectra', sa.String(length=50), nullable=True),\n sa.PrimaryKeyConstraint('cas')\n )\n op.create_index(op.f('ix_irinfo_en_number'), 'irinfo', ['en_number'], unique=False)\n op.create_index(op.f('ix_irinfo_zh_number'), 'irinfo', ['zh_number'], unique=False)\n op.create_table('post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('body', sa.String(length=140), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_post_timestamp'), table_name='post')\n op.drop_table('post')\n op.drop_index(op.f('ix_irinfo_zh_number'), table_name='irinfo')\n op.drop_index(op.f('ix_irinfo_en_number'), table_name='irinfo')\n op.drop_table('irinfo')\n # ### end Alembic commands ###\n","sub_path":"ir_server/migrations/versions/f4f59b4e4455_users_table.py","file_name":"f4f59b4e4455_users_table.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126914926","text":"if __name__ == \"__main__\":\n from bs4 import BeautifulSoup\n\nclass banner():\n def __init__(self):\n f = open('../index.html',encoding='utf8',mode='r')\n self.soup = BeautifulSoup(f.read(),features='lxml')\n f.close()\n def to_html(self,change):\n f = open('../index_newbanner.html',encoding='utf8',mode='w')\n f.write(change)\n f.close()\n def change_text(self,slide,line,text):\n # slide: điền trang muốn thay đổi; 1 hoặc 2\n # line: điền dòng muốn thay đổi; 1, 2 hoặc 3\n # text: điền nội dung thay đổi\n self.soup.find(id=[['bannertext1','bannertitle1','bannertyping1'],\n ['bannertext2','bannertitle2','bannertyping2']][slide-1][line-1]).string.replace_with(text)\n self.to_html(str(self.soup.prettify()))\n\n# ví dụ\nb = banner()\nb.change_text(slide=1,line=2,text='fresh porto coffee')\n","sub_path":"python/banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559263551","text":"# get overlap of 2 kmer lists\n\nimport os, sys\n\ndir1= sys.argv[1]\n#kmers= open(sys.argv[2],\"r\")\nout= open(\"all_clusters_cre_rank.txt\",\"w\")\n\n# def get_kmers(kmers, lista):\n# for line in kmers:\n# L=line.strip().split('\\t')\n# k=L[0]\n# lista.append(k)\n# return lista\n\ndef add_to_dict(inp, D, curr_list, count):\n header= inp.readline()\n for line in inp:\n L=line.strip().split('\\t')\n k2=L[0]\n rank=L[1]\n if count != 0:\n if k2 not in curr_list:\n newlist=[]\n curr_list.append(k2)\n for i in range(count):\n newlist.append(\"NA\")\n if k2 not in D:\n D[k2]=newlist+[str(rank)]\n else:\n D[k2].append(str(rank))\n else:\n D[k2].append(str(rank))\n else:\n if k2 not in D:\n D[k2]=[str(rank)]\n\n \n \n return D, curr_list\n\ndef add_to_dict2(inp, D, curr_list, count):\n header= inp.readline()\n for line in inp:\n L=line.strip().split('\\t')\n k2=L[0]\n rank=L[6]\n if count != 0:\n if k2 not in curr_list:\n newlist=[]\n curr_list.append(k2)\n for i in range(count):\n newlist.append(\"NA\")\n if k2 not in D:\n D[k2]=newlist+[str(rank)]\n else:\n D[k2].append(str(rank))\n else:\n D[k2].append(str(rank))\n else:\n if k2 not in D:\n D[k2]=[str(rank)]\n\n \n \n return D, curr_list\n\nkmer_list=[]\n#final_list= get_kmers(kmers, kmer_list)\n#kmers.close()\nkdict={}\ntitle_list=[]\ncurrent_k_list=[]\ncount=0\nfor file in os.listdir(dir1):\n if file.endswith(\"_imp_avgrank_RF.txt\") or file.endswith(\"_imp\"):\n name = file.strip().split(\"_imp\")[0]\n title_list.append(name)\n inp = open(dir1 + \"/\" + file)\n kdict, current_k_list= add_to_dict(inp, kdict, current_k_list, count)\n inp.close()\n count= count+1\n if file.endswith(\"_imp_scaled.txt\"):\n name = file.strip().split(\"_imp\")[0]\n title_list.append(name)\n inp = open(dir1 + \"/\" + file)\n kdict, current_k_list= add_to_dict2(inp, kdict, current_k_list, count)\n inp.close()\n count= count+1\n\nprint(kdict)\ntitlestr= \"\\t\".join(title_list)\nout.write(\"kmer\\t%s\\n\" % (titlestr))\nfor key in kdict:\n data = kdict[key]\n if(len(set(data))==1):\n pass\n else:\n out.write(\"%s\\t\" % (key))\n for d in data:\n out.write(\"%s\\t\" % (d))\n out.write(\"\\n\")\nout.close()","sub_path":"get_same_kmers.py","file_name":"get_same_kmers.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141968015","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Gao Xiang\n\nimport os\nimport sys\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(base_dir)\nfrom manager import manager\n\nclass Manager(object):\n\n def run(self):\n while True:\n view = '''\n 1,学生视图\n 2,教师视图\n 3,学校视图\n 4,退出系统\n '''\n print(view)\n choice = input(\"选择视图:\")\n if choice == '1':\n a = manager.Student_view()\n a.run()\n if choice == '2':\n a = manager.Teacher_view()\n a.run()\n if choice == '3':\n a = manager.Manager_view()\n a.manager_teacher()\n if choice == '4':\n exit()\n if not choice.isdigit() or choice >'4':\n print(\"无效的输入\")\n\nif __name__ == '__main__':\n select_course = Manager()\n select_course.run()","sub_path":"bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441905461","text":"import cv2\nimport numpy as np\nfrom glob import glob\nimport os, shutil\nimport matplotlib.pyplot as plt\n\n\noutpath = 'outFrames_fixed/'\nimg_paths = glob('outFrames/*.png')\n\nshutil.rmtree(outpath, False)\nos.mkdir(outpath)\n\npurple = np.array([172, 125, 214], dtype=np.uint8)\ntofind = np.zeros((8, 3*4, 3), dtype=np.uint8) # pause sign\ntofind[:, :4] = tofind[:, 8:] = purple\ndesired_y_pos = 20 # has to be above (less than) of all the possible real values\ndesired_height = 175\n\nfor img_path in img_paths:\n img = cv2.imread(img_path)\n \n match = cv2.matchTemplate(img, tofind, cv2.TM_SQDIFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)\n top_left = min_loc\n img = img[top_left[1] - desired_y_pos:top_left[1]+desired_height, :, :]\n \n cv2.imwrite(outpath + os.path.basename(img_path), img)\n\nprint('done')\n","sub_path":"sim2600/stab_frames.py","file_name":"stab_frames.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"92010886","text":"# -*- coding: utff-8 -*-\n# 将二叉树的左右子树反转。两种方法:1.递归。2.遍历,用stack 存储节点。\n\ndef invertTree(root):\n #recursion\n# if not root:\n# return\n# root.left, root.right = root.right,root.left\n# \n# map(invertTree,(root.left,root.right))\n\n #Iteration\n \n if not root:\n return \n stack = [root]\n while len(stack):\n node = stack.pop()\n if node:\n node.left, node.right = node.right,node.left\n stack.append(node.left)\n stack,append(node.right)\n \n return root","sub_path":"workspace/新建文件夹/Leetcode/review/二叉树反转.py","file_name":"二叉树反转.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137946713","text":"\r\n'''\r\n WAP to open a file, read data and store data in list.\r\n'''\r\n\r\nd=[]\r\n\r\nfile=open(\"colors.txt\")\r\n#d.extend(file.readlines())\r\nd=file.readlines()\r\n\r\nprint(d)\r\nfor i in d:\r\n print(i,end=\"\")\r\n","sub_path":"29.09.2020 Python/read_demo2.py","file_name":"read_demo2.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569713386","text":"class SavingsAccount(object):\n \"\"\"This class represents a savings account\n with the owner's name, PIN, and balance. \"\"\"\n\n def __init__(self, name, pin, balance = 0.0):\n self._name = name;\n self._pin = pin;\n self._balance = balance\n\n def __lt__(self, other):\n return self._name < other._name\n\n\nif __name__ == \"__main__\":\n s1 = SavingsAccount(\"Ken\", \"1000\", 0)\n s2 = SavingsAccount(\"Bill\", \"1001\", 30)\n print (s1 < s2)\n print (s1 > s2)\n print (s2 > s1)\n print (s2 == s1)\n","sub_path":"ch03/SavingsAccount.py","file_name":"SavingsAccount.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17866990","text":"\"\"\"\nCopyright 2011 Mozes, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport socket\nimport select\nimport stomper\nimport logging\n\nfrom stompest.parser import StompFrameLineParser\nfrom stompest.error import StompProtocolError\n\nLOG_CATEGORY=\"stompest.simple\"\n\nclass Stomp(object):\n \"\"\"A simple implementation of a STOMP client\"\"\"\n \n def __init__(self, host, port):\n self.log = logging.getLogger(LOG_CATEGORY)\n self.host = host\n self.port = port\n self.socket = None\n self.sfile = None\n \n def connect(self, login='', passcode=''):\n self._socketConnect()\n self._write(stomper.connect(login, passcode))\n frame = self.receiveFrame()\n if frame['cmd'] == 'CONNECTED':\n return frame\n raise StompProtocolError('Unexpected frame received: %s' % frame)\n \n def disconnect(self):\n self._write(stomper.disconnect())\n self._socketDisconnect()\n\n def canRead(self, timeout=None):\n self._checkConnected()\n if timeout is None:\n readList, junk, junk = select.select([self.socket], [], [])\n else:\n readList, junk, junk = select.select([self.socket], [], [], timeout)\n return len(readList) > 0\n \n def send(self, dest, msg, headers={}):\n frame = {'cmd': 'SEND', 'headers': headers, 'body': msg}\n frame['headers']['destination'] = dest\n self.sendFrame(frame)\n \n def subscribe(self, dest, headers={}):\n if not 'ack' in headers:\n headers['ack'] = 'auto'\n if not 'activemq.prefetchSize' in headers:\n headers['activemq.prefetchSize'] = 1\n headers['destination'] = dest\n self.sendFrame({'cmd': 'SUBSCRIBE', 'headers': headers, 'body': ''})\n \n def ack(self, frame):\n messageId = frame['headers']['message-id']\n self.sendFrame({'cmd': 'ACK', 'headers': {'message-id': messageId}, 'body': ''})\n \n def sendFrame(self, frame):\n self._write(self.packFrame(frame))\n \n def receiveFrame(self):\n self._checkConnected()\n parser = StompFrameLineParser()\n while (not parser.isDone()):\n parser.processLine(self.sfile.readline()[:-1])\n\n return parser.getMessage()\n\n def packFrame(self, frame):\n sFrame = stomper.Frame()\n sFrame.cmd = frame['cmd']\n sFrame.headers = frame['headers']\n sFrame.body = frame['body']\n return sFrame.pack()\n \n def _socketConnect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sfile = self.socket.makefile()\n self.socket.connect((self.host, self.port))\n \n def _socketDisconnect(self):\n self.sfile.close()\n self.sfile = None\n self.socket.close()\n self.socket = None\n \n def _connected(self):\n return self.socket is not None\n \n def _checkConnected(self):\n if not self._connected():\n raise Exception('Not connected')\n \n def _write(self, data):\n self._checkConnected()\n self.socket.sendall(data)\n","sub_path":"stompest/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72154458","text":"from itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom numpy import ndarray\nfrom pandas import DataFrame, Panel, Series\n\nfrom linearmodels.compat.pandas import (is_categorical,\n is_datetime64_any_dtype,\n is_numeric_dtype, is_string_dtype,\n is_string_like)\nfrom linearmodels.utility import ensure_unique_column\n\n__all__ = ['PanelData']\n\n\nclass _Panel(object):\n \"\"\"\n Convert a MI DataFrame to a 3-d structure where columns are items\n\n Parameters\n ----------\n df : DataFrame\n Multiindex DataFrame containing floats\n\n Notes\n -----\n Contains the logic needed to transform a MI DataFrame with 2 levels\n into a minimal pandas Panel-like object\n \"\"\"\n\n def __init__(self, df):\n self._items = df.columns\n index = df.index\n self._major_axis = pd.Series(index.levels[1][index.labels[1]]).unique()\n self._minor_axis = pd.Series(index.levels[0][index.labels[0]]).unique()\n full_index = list(product(self._minor_axis, self._major_axis))\n self._full_index = pd.MultiIndex.from_tuples(full_index)\n new_df = df.copy().loc[self._full_index]\n self._frame = new_df\n i, j, k = len(self._items), len(self._major_axis), len(self.minor_axis)\n self._shape = (i, j, k)\n self._values = np.swapaxes(np.reshape(new_df.values.copy().T, (i, k, j)), 1, 2)\n\n @classmethod\n def from_array(cls, values, items, major_axis, minor_axis):\n index = list(product(minor_axis, major_axis))\n index = pd.MultiIndex.from_tuples(index)\n i, j, k = len(items), len(major_axis), len(minor_axis)\n values = np.swapaxes(values.copy(), 0, 2).ravel()\n values = np.reshape(values, ((j * k), i))\n\n df = pd.DataFrame(values, index=index, columns=items)\n return cls(df)\n\n @property\n def shape(self):\n return self._shape\n\n @property\n def items(self):\n return self._items\n\n @property\n def major_axis(self):\n return self._major_axis\n\n @property\n def minor_axis(self):\n return self._minor_axis\n\n @property\n def values(self):\n return self._values\n\n def to_frame(self):\n return self._frame\n\n\ndef convert_columns(s, drop_first):\n if is_string_dtype(s.dtype) and s.map(lambda v: is_string_like(v)).all():\n s = s.astype('category')\n\n if is_categorical(s):\n out = pd.get_dummies(s, drop_first=drop_first)\n out.columns = [str(s.name) + '.' + str(c) for c in out]\n return out\n return s\n\n\ndef expand_categoricals(x, drop_first):\n return pd.concat([convert_columns(x[c], drop_first) for c in x.columns], axis=1)\n\n\nclass PanelData(object):\n \"\"\"\n Abstraction to handle alternative formats for panel data\n\n Parameters\n ----------\n x : {ndarray, Series, DataFrame, Panel, DataArray}\n Input data\n var_name : str, optional\n Variable name to use when naming variables in NumPy arrays or\n xarray DataArrays\n convert_dummies : bool, optional\n Flat indicating whether pandas categoricals or string input data\n should be converted to dummy variables\n drop_first : bool, optional\n Flag indicating to drop first dummy category when converting\n\n Notes\n -----\n Data can be either 2- or 3-dimensional. The three key dimensions are\n\n * nvar - number of variables\n * nobs - number of time periods\n * nentity - number of entities\n\n All 3-d inputs should be in the form (nvar, nobs, nentity). With one\n exception, 2-d inputs are treated as (nobs, nentity) so that the input\n can be treated as-if being (1, nobs, nentity).\n\n If the 2-d input is a pandas DataFrame with a 2-level MultiIndex then the\n input is treated differently. Index level 0 is assumed ot be entity.\n Index level 1 is time. The columns are the variables. This is the most\n precise format to use since pandas Panels do not preserve all variable\n type information across transformations between Panel and MultiIndex\n DataFrame. MultiIndex Series are also accepted and treated as single\n column MultiIndex DataFrames.\n\n Raises\n ------\n TypeError\n If the input type is not supported\n ValueError\n If the input has the wrong number of dimensions or a MultiIndex\n DataFrame does not have 2 levels\n \"\"\"\n\n def __init__(self, x, var_name='x', convert_dummies=True, drop_first=True):\n self._var_name = var_name\n self._convert_dummies = convert_dummies\n self._drop_first = drop_first\n if isinstance(x, PanelData):\n x = x.dataframe\n self._original = x\n\n if not isinstance(x, (Series, DataFrame, Panel, ndarray)):\n try:\n from xarray import DataArray\n if isinstance(x, DataArray):\n if x.ndim not in (2, 3):\n raise ValueError('Only 2-d or 3-d DataArrays are supported')\n x = x.to_pandas()\n except ImportError:\n pass\n\n if isinstance(x, Series) and isinstance(x.index, pd.MultiIndex):\n x = DataFrame(x)\n elif isinstance(x, Series):\n raise ValueError('Series can only be used with a 2-level MultiIndex')\n\n if isinstance(x, (Panel, DataFrame)):\n if isinstance(x, DataFrame):\n if isinstance(x.index, pd.MultiIndex):\n if len(x.index.levels) != 2:\n raise ValueError('DataFrame input must have a '\n 'MultiIndex with 2 levels')\n self._frame = x.copy()\n else:\n self._frame = DataFrame({var_name: x.T.stack(dropna=False)})\n else:\n self._frame = x.swapaxes(1, 2).to_frame(filter_observations=False)\n elif isinstance(x, ndarray):\n if x.ndim not in (2, 3):\n raise ValueError('2 or 3-d array required for numpy input')\n if x.ndim == 2:\n x = x[None, :, :]\n\n k, t, n = x.shape\n var_str = var_name + '.{0:0>' + str(int(np.log10(k) + .01)) + '}'\n variables = [var_name] if k == 1 else [var_str.format(i) for i in range(k)]\n entity_str = 'entity.{0:0>' + str(int(np.log10(n) + .01)) + '}'\n entities = [entity_str.format(i) for i in range(n)]\n time = list(range(t))\n x = x.astype(np.float64)\n panel = _Panel.from_array(x, items=variables, major_axis=time,\n minor_axis=entities)\n self._fake_panel = panel\n self._frame = panel.to_frame()\n else:\n raise TypeError('Only ndarrays, DataFrames, Panels or DataArrays '\n 'are supported')\n if convert_dummies:\n self._frame = expand_categoricals(self._frame, drop_first)\n self._frame = self._frame.astype(np.float64)\n\n time_index = Series(self._frame.index.levels[1])\n if not (is_numeric_dtype(time_index.dtype) or\n is_datetime64_any_dtype(time_index.dtype)):\n raise ValueError('The index on the time dimension must be either '\n 'numeric or date-like')\n self._k, self._t, self._n = self.panel.shape\n self._frame.index.levels[0].name = 'entity'\n self._frame.index.levels[1].name = 'time'\n\n @property\n def panel(self):\n \"\"\"pandas Panel view of data\"\"\"\n return _Panel(self._frame)\n\n @property\n def dataframe(self):\n \"\"\"pandas DataFrame view of data\"\"\"\n return self._frame\n\n @property\n def values2d(self):\n \"\"\"NumPy ndarray view of dataframe\"\"\"\n return self._frame.values\n\n @property\n def values3d(self):\n \"\"\"NumPy ndarray view of panel\"\"\"\n return self.panel.values\n\n def drop(self, locs):\n \"\"\"\n Parameters\n ----------\n locs : ndarray\n Booleam array indicating observations to drop with reference to\n the dataframe view of the data\n \"\"\"\n self._frame = self._frame.loc[~locs.ravel()]\n self._frame = self._minimize_multiindex(self._frame)\n self._k, self._t, self._n = self.shape\n\n @property\n def shape(self):\n \"\"\"Shape of panel view of data\"\"\"\n return self.panel.shape\n\n @property\n def ndim(self):\n \"\"\"Number of dimensions of panel view of data\"\"\"\n return 3\n\n @property\n def isnull(self):\n \"\"\"Locations with missing observations\"\"\"\n return np.any(self._frame.isnull(), axis=1)\n\n @property\n def nobs(self):\n \"\"\"Number of time observations\"\"\"\n return self._t\n\n @property\n def nvar(self):\n \"\"\"Number of variables\"\"\"\n return self._k\n\n @property\n def nentity(self):\n \"\"\"Number of entities\"\"\"\n return self._n\n\n @property\n def vars(self):\n \"\"\"List of variable names\"\"\"\n return list(self._frame.columns)\n\n @property\n def time(self):\n \"\"\"List of time index names\"\"\"\n index = self._frame.index\n return list(index.levels[1][index.labels[1]].unique())\n\n @property\n def entities(self):\n \"\"\"List of entity index names\"\"\"\n index = self._frame.index\n return list(index.levels[0][index.labels[0]].unique())\n\n @property\n def entity_ids(self):\n \"\"\"\n Get array containing entity group membership information\n\n Returns\n -------\n id : ndarray\n 2d array containing entity ids corresponding dataframe view\n \"\"\"\n return np.asarray(self._frame.index.labels[0])[:, None]\n\n @property\n def time_ids(self):\n \"\"\"\n Get array containing time membership information\n\n Returns\n -------\n id : ndarray\n 2d array containing time ids corresponding dataframe view\n \"\"\"\n return np.asarray(self._frame.index.labels[1])[:, None]\n\n def _demean_both(self, weights):\n \"\"\"\n Entity and time demean\n\n Parameters\n ----------\n weights : PanelData, optional\n Weights to use in demeaning\n \"\"\"\n if self.nentity > self.nobs:\n group = 'entity'\n dummy = 'time'\n else:\n group = 'time'\n dummy = 'entity'\n e = self.demean(group, weights=weights)\n d = self.dummies(dummy, drop_first=True)\n d.index = e.index\n d = PanelData(d).demean(group, weights=weights)\n d = d.values2d\n e = e.values2d\n resid = e - d @ np.linalg.lstsq(d, e)[0]\n resid = DataFrame(resid, index=self._frame.index, columns=self._frame.columns)\n\n return PanelData(resid)\n\n def general_demean(self, groups, weights=None):\n \"\"\"\n Multi-way demeaning using only groupby\n\n Parameters\n ----------\n groups : PanelData\n Arrays with the same size containing group identifiers\n weights : PanelData, optional\n Weights to use in the weighted demeaning\n\n Returns\n -------\n demeaned : PanelData\n Weighted, demeaned data according to groups\n\n Notes\n -----\n Iterates until convergence\n \"\"\"\n if not isinstance(groups, PanelData):\n groups = PanelData(groups)\n if weights is None:\n weights = PanelData(pd.DataFrame(np.ones((self._frame.shape[0], 1)),\n index=self.index,\n columns=['weights']))\n weights = weights.values2d\n groups = groups.values2d.astype(np.int64)\n\n weight_sum = {}\n\n def weighted_group_mean(df, weights, root_w, level):\n num = (root_w * df).groupby(level=level).transform('sum')\n if level in weight_sum:\n denom = weight_sum[level]\n else:\n denom = weights.groupby(level=level).transform('sum')\n weight_sum[level] = denom\n return num.values / denom.values\n\n def demean_pass(frame, weights, root_w):\n levels = groups.shape[1]\n for level in range(levels):\n mu = weighted_group_mean(frame, weights, root_w, level)\n if level == 0:\n frame = frame - root_w * mu\n else:\n frame -= root_w * mu\n\n return frame\n\n # Swap out the index for better performance\n init_index = pd.DataFrame(groups)\n init_index.set_index(list(init_index.columns), inplace=True)\n\n root_w = np.sqrt(weights)\n weights = pd.DataFrame(weights, index=init_index.index)\n wframe = root_w * self._frame\n wframe.index = init_index.index\n\n previous = wframe\n current = demean_pass(previous, weights, root_w)\n if groups.shape[1] == 1:\n current.index = self._frame.index\n return PanelData(current)\n\n exclude = np.ptp(self._frame.values, 0) == 0\n max_rmse = np.sqrt(self._frame.values.var(0).max())\n scale = self._frame.std().values\n exclude = exclude | (scale < 1e-14 * max_rmse)\n replacement = np.maximum(scale, 1)\n scale[exclude] = replacement[exclude]\n scale = scale[None, :]\n\n while np.max(np.abs(current.values - previous.values) / scale) > 1e-8:\n previous = current\n current = demean_pass(previous, weights, root_w)\n current.index = self._frame.index\n\n return PanelData(current)\n\n def demean(self, group='entity', weights=None):\n \"\"\"\n Demeans data by either entity or time group\n\n Parameters\n ----------\n group : {'entity', 'time'}\n Group to use in demeaning\n weights : PanelData, optional\n Weights to implement weighted averaging\n\n Returns\n -------\n demeaned : PanelData\n Demeaned data according to type\n\n Notes\n -----\n If weights are provided, the values returned will be scaled by\n sqrt(weights) so that they can be used in WLS estimation.\n \"\"\"\n if group not in ('entity', 'time', 'both'):\n raise ValueError\n if group == 'both':\n return self._demean_both(weights)\n\n level = 0 if group == 'entity' else 1\n if weights is None:\n group_mu = self._frame.groupby(level=level).transform('mean')\n return PanelData(self._frame - group_mu)\n else:\n w = weights.values2d\n frame = self._frame.copy()\n frame = w * frame\n weighted_sum = frame.groupby(level=level).transform('sum')\n frame.iloc[:, :] = w\n sum_weights = frame.groupby(level=level).transform('sum')\n group_mu = weighted_sum / sum_weights\n return PanelData(np.sqrt(w) * (self._frame - group_mu))\n\n def __str__(self):\n return self.__class__.__name__ + '\\n' + str(self._frame)\n\n def __repr__(self):\n return self.__str__() + '\\n' + self.__class__.__name__ + ' object, id: ' + hex(id(self))\n\n def _repr_html_(self):\n return self.__class__.__name__ + '
' + self._frame._repr_html_()\n\n def count(self, group='entity'):\n \"\"\"\n Count number of observations by entity or time\n\n Parameters\n ----------\n group : {'entity', 'time'}\n Group to use in demeaning\n\n Returns\n -------\n count : DataFrame\n Counts according to type. Either (entity by var) or (time by var)\n \"\"\"\n v = self.panel.values\n axis = 1 if group == 'entity' else 2\n count = np.sum(np.isfinite(v), axis=axis)\n\n index = self.panel.minor_axis if group == 'entity' else self.panel.major_axis\n out = DataFrame(count.T, index=index, columns=self.vars)\n reindex = self.entities if group == 'entity' else self.time\n out = out.loc[reindex].astype(np.int64)\n out.index.name = group\n return out\n\n @property\n def index(self):\n \"\"\"Return the index of the multi-index dataframe view\"\"\"\n return self._frame.index\n\n def copy(self):\n \"\"\"Return a deep copy\"\"\"\n return PanelData(self._frame.copy(), var_name=self._var_name,\n convert_dummies=self._convert_dummies, drop_first=self._drop_first)\n\n def mean(self, group='entity', weights=None):\n \"\"\"\n Compute data mean by either entity or time group\n\n Parameters\n ----------\n group : {'entity', 'time'}\n Group to use in demeaning\n weights : PanelData, optional\n Weights to implement weighted averaging\n\n Returns\n -------\n mean : DataFrame\n Data mean according to type. Either (entity by var) or (time by var)\n \"\"\"\n level = 0 if group == 'entity' else 1\n if weights is None:\n mu = self._frame.groupby(level=level).mean()\n else:\n w = weights.values2d\n frame = self._frame.copy()\n frame = w * frame\n weighted_sum = frame.groupby(level=level).sum()\n frame.iloc[:, :] = w\n sum_weights = frame.groupby(level=level).sum()\n mu = weighted_sum / sum_weights\n\n reindex = self.entities if group == 'entity' else self.time\n out = mu.loc[reindex]\n\n return out\n\n def first_difference(self):\n \"\"\"\n Compute first differences of variables\n\n Returns\n -------\n diffs : PanelData\n Differenced values\n \"\"\"\n diffs = self.panel.values\n diffs = diffs[:, 1:] - diffs[:, :-1]\n diffs = Panel(diffs, items=self.panel.items,\n major_axis=self.panel.major_axis[1:],\n minor_axis=self.panel.minor_axis)\n diffs = diffs.swapaxes(1, 2).to_frame(filter_observations=False)\n diffs = diffs.reindex(self._frame.index).dropna(how='any')\n return PanelData(diffs)\n\n @staticmethod\n def _minimize_multiindex(df):\n index_cols = list(df.index.names)\n orig_names = index_cols[:]\n for i, col in enumerate(index_cols):\n col = ensure_unique_column(col, df)\n index_cols[i] = col\n df.index.names = index_cols\n df = df.reset_index()\n df = df.set_index(index_cols)\n df.index.names = orig_names\n return df\n\n def dummies(self, group='entity', drop_first=False):\n \"\"\"\n Generate entity or time dummies\n\n Parameters\n ----------\n group : {'entity', 'time'}, optional\n Type of dummies to generate\n drop_first : bool, optional\n Flag indicating that the dummy column corresponding to the first\n entity or time period should be dropped\n\n Returns\n -------\n dummies : DataFrame\n Dummy variables\n \"\"\"\n if group not in ('entity', 'time'):\n raise ValueError\n axis = 0 if group == 'entity' else 1\n labels = self._frame.index.labels\n levels = self._frame.index.levels\n cat = pd.Categorical(levels[axis][labels[axis]])\n dummies = pd.get_dummies(cat, drop_first=drop_first)\n cols = self.entities if group == 'entity' else self.time\n return dummies[[c for c in cols if c in dummies]].astype(np.float64)\n","sub_path":"linearmodels/panel/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":19575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494997944","text":"#!/usr/bin/env python\n\nfrom numpy import *\n\ndef match(arr1, arr2, arr2_sorted=False, arr2_index=None):\n \"\"\"\n For each element in arr1 return the index of the element with the\n same value in arr2, or -1 if there is no element with the same value.\n Setting arr2_sorted=True will save some time if arr2 is already sorted\n into ascending order.\n\n A precomputed sorting index for arr2 can be supplied using the\n arr2_index parameter. This can save time if the routine is called\n repeatedly with the same arr2 but arr2 is not already sorted.\n\n It is assumed that each element in arr1 only occurs once in arr2.\n \"\"\"\n\n # Workaround for a numpy bug (<=1.4): ensure arrays are native endian\n # because searchsorted ignores endian flag\n if not(arr1.dtype.isnative):\n arr1_n = asarray(arr1, dtype=arr1.dtype.newbyteorder(\"=\"))\n else:\n arr1_n = arr1\n if not(arr2.dtype.isnative):\n arr2_n = asarray(arr2, dtype=arr2.dtype.newbyteorder(\"=\"))\n else:\n arr2_n = arr2\n\n # Sort arr2 into ascending order if necessary\n tmp1 = arr1_n\n if arr2_sorted:\n tmp2 = arr2_n\n idx = slice(0,len(arr2_n))\n else:\n if arr2_index is None:\n idx = argsort(arr2_n)\n tmp2 = arr2_n[idx]\n else:\n # Use supplied sorting index\n idx = arr2_index\n tmp2 = arr2_n[arr2_index]\n\n # Find where elements of arr1 are in arr2\n ptr = searchsorted(tmp2, tmp1)\n\n # Make sure all elements in ptr are valid indexes into tmp2\n # (any out of range entries won't match so they'll get set to -1\n # in the next bit)\n ptr[ptr>=len(tmp2)] = 0\n ptr[ptr<0] = 0\n\n # Return -1 where no match is found\n ind = tmp2[ptr] != tmp1\n ptr[ind] = -1\n\n # Put ptr back into original order\n ind = arange(len(arr2_n))[idx]\n ptr = where(ptr>= 0, ind[ptr], -1)\n \n return ptr\n\n\n","sub_path":"python/virgo/util/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222398668","text":"import numpy as np\n\nimport jax\n\nimport pytest\n\nimport scico.numpy as snp\nfrom scico.linop import Convolve, DiagonalStack, Identity, Sum, VerticalStack\nfrom scico.test.linop.test_linop import adjoint_test\n\n\nclass TestVerticalStack:\n def setup_method(self, method):\n self.key = jax.random.PRNGKey(12345)\n\n @pytest.mark.parametrize(\"jit\", [False, True])\n def test_construct(self, jit):\n # requires a list of LinearOperators\n I = Identity((42,))\n with pytest.raises(ValueError):\n H = VerticalStack(I, jit=jit)\n\n # checks input sizes\n A = Identity((3, 2))\n B = Identity((7, 2))\n with pytest.raises(ValueError):\n H = VerticalStack([A, B], jit=jit)\n\n # in general, returns a BlockArray\n A = Convolve(jax.device_put(np.ones((3, 3))), (7, 11))\n B = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n H = VerticalStack([A, B], jit=jit)\n x = np.ones((7, 11))\n y = H @ x\n assert y.shape == ((9, 13), (8, 12))\n\n # ... result should be [A@x, B@x]\n assert np.allclose(y[0], A @ x)\n assert np.allclose(y[1], B @ x)\n\n # by default, collapse to jax array when possible\n A = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n B = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n H = VerticalStack([A, B], jit=jit)\n x = np.ones((7, 11))\n y = H @ x\n assert y.shape == (2, 8, 12)\n\n # ... result should be [A@x, B@x]\n assert np.allclose(y[0], A @ x)\n assert np.allclose(y[1], B @ x)\n\n # let user turn off collapsing\n A = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n B = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n H = VerticalStack([A, B], collapse=False, jit=jit)\n x = np.ones((7, 11))\n y = H @ x\n assert y.shape == ((8, 12), (8, 12))\n\n @pytest.mark.parametrize(\"collapse\", [False, True])\n @pytest.mark.parametrize(\"jit\", [False, True])\n def test_adjoint(self, collapse, jit):\n # general case\n A = Convolve(jax.device_put(np.ones((3, 3))), (7, 11))\n B = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n H = VerticalStack([A, B], collapse=collapse, jit=jit)\n adjoint_test(H, self.key)\n\n # collapsable case\n A = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n B = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n H = VerticalStack([A, B], collapse=collapse, jit=jit)\n adjoint_test(H, self.key)\n\n @pytest.mark.parametrize(\"collapse\", [False, True])\n @pytest.mark.parametrize(\"jit\", [False, True])\n def test_algebra(self, collapse, jit):\n # adding\n A = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n B = Convolve(jax.device_put(np.ones((2, 2))), (7, 11))\n H = VerticalStack([A, B], collapse=collapse, jit=jit)\n\n A = Convolve(jax.device_put(np.random.rand(2, 2)), (7, 11))\n B = Convolve(jax.device_put(np.random.rand(2, 2)), (7, 11))\n G = VerticalStack([A, B], collapse=collapse, jit=jit)\n\n x = np.ones((7, 11))\n S = H + G\n\n # test correctness of adding\n assert S.output_shape == H.output_shape\n assert S.input_shape == H.input_shape\n np.testing.assert_allclose((S @ x)[0], (H @ x + G @ x)[0])\n np.testing.assert_allclose((S @ x)[1], (H @ x + G @ x)[1])\n\n # result of adding two conformable stacks should be a stack\n assert isinstance(S, VerticalStack)\n assert isinstance(H - G, VerticalStack)\n\n # scalar multiplication\n assert isinstance(1.0 * H, VerticalStack)\n\n # op scaling\n scalars = [2.0, 3.0]\n y1 = S @ x\n S2 = S.scale_ops(scalars)\n y2 = S2 @ x\n\n np.testing.assert_allclose(scalars[0] * y1[0], y2[0])\n\n\nclass TestBlockDiagonalLinearOperator:\n def test_apply(self):\n S1 = (3, 4)\n S2 = (3, 5)\n S3 = (2, 2)\n A1 = Identity(S1)\n A2 = 2 * Identity(S2)\n A3 = Sum(S3)\n H = DiagonalStack((A1, A2, A3))\n\n x = snp.ones((S1, S2, S3))\n y = H @ x\n y_expected = snp.blockarray((snp.ones(S1), 2 * snp.ones(S2), snp.sum(snp.ones(S3))))\n\n assert y == y_expected\n\n def test_adjoint(self):\n S1 = (3, 4)\n S2 = (3, 5)\n S3 = (2, 2)\n A1 = Identity(S1)\n A2 = 2 * Identity(S2)\n A3 = Sum(S3)\n H = DiagonalStack((A1, A2, A3))\n\n y = snp.ones((S1, S2, ()), dtype=snp.float32)\n x = H.T @ y\n x_expected = snp.blockarray(\n (\n snp.ones(S1),\n snp.ones(S2),\n snp.ones(S3),\n )\n )\n\n assert x == x_expected\n\n def test_input_collapse(self):\n S = (3, 4)\n A1 = Identity(S)\n A2 = Sum(S)\n\n H = DiagonalStack((A1, A2))\n assert H.input_shape == (2, *S)\n\n H = DiagonalStack((A1, A2), allow_input_collapse=False)\n assert H.input_shape == (S, S)\n\n def test_output_collapse(self):\n S1 = (3, 4)\n S2 = (5, 3, 4)\n A1 = Identity(S1)\n A2 = Sum(S2, axis=0)\n\n H = DiagonalStack((A1, A2))\n assert H.output_shape == (2, *S1)\n\n H = DiagonalStack((A1, A2), allow_output_collapse=False)\n assert H.output_shape == (S1, S1)\n","sub_path":"scico/test/linop/test_stack.py","file_name":"test_stack.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"111666973","text":"import numpy as np\n\nfrom a0_logistic_regression import logistic_regression, predict\nfrom a0_neural_network import load_data\n\n# 这个包是评价报告\n\nraw_x, raw_y = load_data('ex3data1.mat')\nprint(raw_x.shape)\nprint(raw_y.shape)\n\n# add intercept=1 for x0\n# 插入了第一列(全部为1)\nx = np.insert(raw_x, 0, values=np.ones(raw_x.shape[0]), axis=1)\nprint(\"x.shape = \", x.shape)\n\n# y have 10 categories here. 1..10, they represent digit 0 as category 10 because matlab index start at 1\n# I'll ditit 0, index 0 again\n# 扩展 5000*1 到 5000*10\n# 比如 y=10 -> [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]: ndarray\n\ny_matrix = []\n\nfor k in range(1, 11):\n y_matrix.append((raw_y == k).astype(int))\n\n# last one is k==10, it's digit 0, bring it to the first position,最后一列k=10,都是0,把最后一列放到第一列\ny_matrix = [y_matrix[-1]] + y_matrix[:-1]\ny = np.array(y_matrix)\n\nprint(\"y.shape = \", y.shape)\n\n# train 1 model(训练一维模型)\nt0 = logistic_regression(x, y[0])\nprint(t0.shape)\ny_pred = predict(x, t0)\nprint('Accuracy={}'.format(np.mean(y[0] == y_pred)))\n","sub_path":"code/ex3-neural network/a2_train_1_model.py","file_name":"a2_train_1_model.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423650683","text":"import json\nfrom os import mkdir\nfrom os.path import expanduser, exists\n\nimport numpy as np\nimport pandas as pd\n\nimport seaborn as sns\n\n\nHOME = expanduser(\"~\") + \"/Dropbox/Twitter/\"\n\nINPUT_PATH = HOME + \"Filtered.Distributions/\"\nOUTPUT_PATH = HOME + \"Merchant.Distributions/\"\n\n\ndef get_freqs(values):\n values = np.array(values).astype(float)\n return values / np.sum(values)\n\n\ndef handle_merchant(_infile, _outfile):\n if not exists(OUTPUT_PATH):\n mkdir(OUTPUT_PATH)\n\n with open(_infile, 'r') as infile, open(_outfile, 'wb') as outfile:\n points = xrange(50)\n outfile.write(\"id,{0:s}\\n\".format(\",\".join([str(x) for x in points])))\n for line in infile.readlines():\n jd = json.loads(line)\n key = jd.keys()[0]\n values = get_freqs(jd[key])\n if len(values) >= 10:\n values = np.round(map(lambda x: values[x] if len(values) > x else 0.0, points), 5).astype(str)\n outfile.write(\"{0:s},{1:s}\\n\".format(key, \",\".join(values)))\n\n\nfor i in ['like', 'mention', 'retweet', 'union']:\n handle_merchant(INPUT_PATH + i + '.mat', OUTPUT_PATH + i + '.csv')\n\n# for i in ['like', 'mention', 'retweet', 'union']:\n# data = pd.read_csv(OUTPUT_PATH + i + '.csv')\n# print data.head(3)\n# sns.set(style=\"ticks\")\n# sns.pairplot(data=data, hue='3')\n#\n# sns.plt.show()\n# break","sub_path":"src/merchant/merchant.py","file_name":"merchant.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100201680","text":"s = int(input())\n\na = []\na.append(s)\n\ni = 0\nwhile True:\n if a[i] % 2 == 0:\n tmp = a[i]/2\n else:\n tmp = a[i]*3+1\n\n if tmp in a:\n print(i+2)\n break\n else:\n a.append(tmp)\n i += 1\n","sub_path":"ABC_B/ABC116_B.py","file_name":"ABC116_B.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346994674","text":"# -*- coding: UTF-8 -*-\n\nimport tensorflow as tf\nimport math\nfrom tensorflow.examples.tutorials.mnist import input_data as mnist_data\n\ntf.set_random_seed(0)\n# 下载数据集\nmnist = mnist_data.read_data_sets(\"data\", one_hot=True, reshape=False, validation_size=0)\n\nX = tf.placeholder(tf.float32, [None, 28, 28, 1])\nY_ = tf.placeholder(tf.float32, [None, 10])\nlr = tf.placeholder(tf.float32)\nK = 4 # first convolutional layer output depth\nL = 8 # second convolutional layer output depth\nM = 12 # third convolutional layer\nN = 200 # fully connected layer\n# truncated_normal:截断正态分布\nW1 = tf.Variable(tf.truncated_normal([5, 5, 1, K], stddev=0.1)) # 5x5 patch, 1 input channel, K output channels\nB1 = tf.Variable(tf.ones([K]) / 10)\nW2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))\nB2 = tf.Variable(tf.ones([L]) / 10)\nW3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))\nB3 = tf.Variable(tf.ones([M]) / 10)\nW4 = tf.Variable(tf.truncated_normal([7 * 7 * M, N], stddev=0.1))\nB4 = tf.Variable(tf.ones([N]) / 10)\nW5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))\nB5 = tf.Variable(tf.ones([10]) / 10)\n\n# 构建模型\nstride = 1 # output is 28x28\nY1 = tf.nn.relu(tf.nn.conv2d(X, W1, strides=[1, stride, stride, 1], padding='SAME') + B1)\nstride = 2 # output is 14x14\nY2 = tf.nn.relu(tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME') + B2)\nstride = 2 # output is 7x7\nY3 = tf.nn.relu(tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME') + B3)\n# reshape the output from the third convolution for the fully connected layer\nYY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])\nY4 = tf.nn.relu(tf.matmul(YY, W4) + B4)\nYlogits = tf.matmul(Y4, W5) + B5\nY = tf.nn.softmax(Ylogits)\n\n# 最小化交叉熵\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)\ncross_entropy = tf.reduce_mean(cross_entropy) * 100\ntrain_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\n\n# 正确率\ncorrect_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# 初始化变量\ninit = tf.global_variables_initializer()\n# 启动图 (graph)\nsess = tf.Session()\nsess.run(init)\n\n# 开始训练模型,这里我们让模型循环训练1001次\nfor i in range(1001):\n batch_X, batch_Y = mnist.train.next_batch(100)\n\n # learning rate decay\n max_learning_rate = 0.003\n min_learning_rate = 0.0001\n decay_speed = 2000.0 # 0.003-0.0001-2000=>0.9826 done in 5000 iterations\n learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i / decay_speed)\n\n if i % 10 == 0:\n a, c = sess.run([accuracy, cross_entropy], feed_dict={X: batch_X, Y_: batch_Y})\n print(str(i) + \": accuracy:\" + str(a) + \" loss: \" + str(c) + \" (lr:\" + str(learning_rate) + \")\")\n\n if i % 50 == 0:\n a, c = sess.run([accuracy, cross_entropy], feed_dict={X: mnist.test.images, Y_: mnist.test.labels})\n print(str(i) + \": ********* epoch \" + str(i * 100 // mnist.train.images.shape[0] + 1) + \" ********* \", end='')\n print(\"test accuracy:\" + str(a) + \" test loss: \" + str(c))\n\n sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y, lr: learning_rate})\n","sub_path":"mnist_3.0_convolutional.py","file_name":"mnist_3.0_convolutional.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499689444","text":"\"\"\"\nServer names and aliases that point to an ERDDAP instance\n\n\"\"\"\n\n\nserver_alias = {\n \"National Glider Data Assembly Center\": \"https://gliders.ioos.us/erddap\",\n \"NGDAC\": \"https://gliders.ioos.us/erddap\",\n \"IOOS\": \"https://gliders.ioos.us/erddap\",\n \"Ocean Observatories Initiative\": \"https://erddap.dataexplorer.oceanobservatories.org/erddap/index.html\",\n \"OOI\": \"https://erddap.dataexplorer.oceanobservatories.org/erddap/index.html\",\n \"Institut français de recherche pour l'exploitation de la mer\": \"https://www.ifremer.fr/erddap\",\n \"ifremer\": \"https://www.ifremer.fr/erddap\",\n \"ifremer.fr\": \"https://www.ifremer.fr/erddap\",\n}\n\nserver_vars = {\n \"https://gliders.ioos.us/erddap\": [\n \"pressure\",\n \"latitude\",\n \"longitude\",\n \"salinity\",\n \"temperature\",\n \"time\",\n ],\n \"http://www.ifremer.fr/erddap\": [\n \"time\",\n \"latitude\",\n \"longitude\",\n \"PSAL\",\n \"TEMP\",\n \"PRES\",\n ],\n \"https://erddap-uncabled.oceanobservatories.org/uncabled/erddap\": [\n \"latitude\",\n \"longitude\",\n \"ctdgv_m_glider_instrument_practical_salinity\",\n \"ctdgv_m_glider_instrument_sci_water_temp\",\n \"ctdgv_m_glider_instrument_sci_water_pressure_dbar\",\n \"time\",\n ],\n}\n\nserver_parameter_rename = {\n \"latitude (degrees_north)\": \"latitude\",\n \"longitude (degrees_east)\": \"longitude\",\n \"salinity (1)\": \"salinity\",\n \"psal (psu)\": \"salinity\",\n \"ctdgv_m_glider_instrument_practical_salinity (1)\": \"salinity\",\n \"temperature (celsius)\": \"temperature\",\n \"temp (degree_celsius)\": \"temperature\",\n \"ctdgv_m_glider_instrument_sci_water_temp (deg_c)\": \"temperature\",\n \"pres (decibar)\": \"pressure\",\n \"pressure (dbar)\": \"pressure\",\n \"ctdgv_m_glider_instrument_sci_water_pressure_dbar (dbar)\": \"pressure\",\n \"dataset_url\": \"dataset_url\",\n}\n\n\ndef server_select(server_string):\n \"\"\"\n Attempts to match the supplied string to a known ERDDAP server by address or alias\n \"\"\"\n if server_string in server_vars.keys():\n # If string matches exactly, return unchanged\n return server_string\n for server in server_vars.keys():\n # If string contains base ERDDAP address, return base ERDDAP address\n if server in server_string:\n return server\n for alias in server_alias:\n # If string matches one of the aliases, return the corresponding ERDDAP address\n if server_string.lower() == alias.lower():\n return server_alias[alias]\n # If the server is not recognised, print options of working servers and exit\n raise ValueError(\n \"Supplied server/alias not recognised. Please use one of the following supported servers:\\n\"\n f\"{str(server_vars.keys())[10:-1]}\"\n )\n","sub_path":"gliderpy/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505180905","text":"import pygame\nimport colorsys\nfrom pathlib import Path\nimport os\nimport math\n\nspeed = 5\nbulletspeed = 7\n\npygame.init()\nscreen = pygame.display.set_mode((800,600))\n\n#image loader:\nimgdir = Path(__file__).parent\n\n#imgdir = Path(os.getcwd())\n#imgdir = os.path.join(imgdir,Path(\"SupraTonks\"))\nprint(imgdir)\ndef LoadImage(name):\n return pygame.image.load(os.path.join(imgdir,Path(name)))\n\ntonkimg = LoadImage('tonk.png')\nbulletimg = LoadImage('bullet.png')\n\n#bcolorgen\ndef createcol(h):\n col = colorsys.hsv_to_rgb(h/360, 1.0, 1.0) \n return (col[0]*255,col[1]*255,col[2]*255) \n\n#healthbars\ndef drawhealthbar(x,y,w,h,value,maxval,fillbg):\n col = colorsys.hsv_to_rgb(((value/maxval)*128)/360, 1.0, 1.0) \n if(fillbg):\n pygame.draw.rect(screen, (180,180,180), (x-1,y-1,w+2,h+2), 0)\n pygame.draw.rect(screen, (col[0]*255,col[1]*255,col[2]*255), (x,y,int(w*(value/maxval)),h), 0)\n\n#collison\ndef intersect(colbox1,colbox2):\n return(colbox2[0] <= colbox1[0]+colbox1[2] and colbox2[1] <= colbox1[1]+colbox1[3]) and (colbox2[0]+colbox2[2] >= colbox1[0] and colbox2[1]+colbox2[3] >= colbox1[1])\n\n\nclass bullet:\n def __init__(self,x,y,dx,dy):\n self.x = x\n self.y = y\n self.dx = dx\n self.dy = dy\n self.bounds = (self.x,self.y,4,4) #bounds for checking collision\n \n def draw(self):\n screen.blit(bulletimg,(int(self.x),int(self.y)))\n\n def move(self):\n self.x += self.dx*tick\n self.y += self.dy*tick\n self.bounds = (self.x,self.y,4,4) #bounds update\n \nclass tonk:\n def __init__(self,x,y,col,nick):\n #coordinates\n self.x = x\n self.y = y\n \n self.bounds = (x,y,32,32) \n #velosity\n self.dx = 0\n self.dy = 0\n self.dir = 1\n \n #display \n self.col = col\n self.nick = nick\n\n #extras\n self.health = 20\n self.surf = pygame.Surface((32, 32))\n \n def draw(self):\n intx = int(self.x)\n inty = int(self.y)\n drawsurf = pygame.Surface((32, 32))\n \n alphacol = (2,2,2)\n drawsurf.fill(alphacol)\n \n #tank color\n pygame.draw.rect(self.surf, self.col, (0,6,32,20), 0)\n pygame.draw.rect(self.surf, self.col, (14,0,4,10), 0)\n #drawing tank sprite\n self.surf.blit(tonkimg,(0,0))\n #tank rotation\n drawsurf = pygame.transform.rotate(self.surf, -90*self.dir)\n #transparancy\n drawsurf.set_colorkey(alphacol)\n #drawing tank\n screen.blit(drawsurf,(intx,inty))\n #healthbar\n drawhealthbar(intx-2,inty+36,36,8,self.health,20,True)\n #nick\n font = pygame.font.Font('freesansbold.ttf', 14) \n text = font.render(self.nick, True, (255,255,255))\n textRect = text.get_rect() \n textRect.center = (int(self.x+16), int(self.y-22+12))\n #drawing extras\n screen.blit(text, textRect)\n \n def shoot(self):\n bl = ''\n if(self.dir == 1):\n bl = bullet(self.x+32,self.y+14,bulletspeed,0)\n elif(self.dir == 3):\n bl = bullet(self.x-4,self.y+14,-bulletspeed,0) \n elif(self.dir == 2):\n bl = bullet(self.x+14,self.y+32,0,bulletspeed)\n elif(self.dir == 4):\n bl = bullet(self.x+14,self.y-4,0,-bulletspeed)\n bullets.append(bl)\n\n def move(self):\n #changing direction\n if(self.dx > 0):\n self.dir = 1\n elif(self.dx < 0):\n self.dir = 3\n elif(self.dy > 0):\n self.dir = 2\n elif(self.dy < 0):\n self.dir = 4\n #checking collision with other tanks\n nextbounds = (self.x+self.dx,self.y+self.dy,32,32)\n for k in tonks:\n if(tonks[k] != self):\n if(intersect(nextbounds, tonks[k].bounds)):\n return\n \n #movement\n self.x += self.dx*tick\n self.y += self.dy*tick\n\n #looparound\n if(self.x > 800):\n self.x -=832\n if(self.y > 600):\n self.y -=632\n if(self.x < -32):\n self.x +=832\n if(self.y < -32):\n self.y +=632\n \n #updating bounds\n self.bounds = (self.x,self.y,32,32)\n \n \n\n#initialising\nbullets = []\ntonks = {}\n\ntk = tonk(20,20,createcol(0),'Vany')\ntk2 = tonk(800-20-32,600-20-32,createcol(180),'Vany2')\ntonks['Vany'] = tk\ntonks['Vany2'] = tk2\n\n\nklok = pygame.time.Clock()\nFPS = 60\n\nplay = True\nwhile play:\n ms = klok.tick(FPS)\n tick = ms/20.0\n \n for event in pygame.event.get(): \n if event.type == pygame.QUIT: \n play = False \n if event.type == pygame.KEYUP:\n k = event.key\n #movement for 1st tank\n if k == pygame.K_RIGHT or k == pygame.K_LEFT:\n tk.dx = 0\n if k == pygame.K_DOWN or k == pygame.K_UP:\n tk.dy = 0\n #movement for 2nd tank\n if k == pygame.K_d or k == pygame.K_a:\n tk2.dx = 0\n if k == pygame.K_s or k == pygame.K_w:\n tk2.dy = 0\n \n if event.type == pygame.KEYDOWN:\n #movement for 1st tank\n if event.key == pygame.K_DOWN:\n tk.dx = 0\n tk.dy = speed\n if event.key == pygame.K_UP:\n tk.dx = 0\n tk.dy = -speed\n if event.key == pygame.K_LEFT:\n tk.dx = -speed\n tk.dy = 0\n if event.key == pygame.K_RIGHT:\n tk.dx = speed\n tk.dy = 0\n if event.key == pygame.K_SPACE:\n tk.shoot()\n #movement for 2st tank\n if event.key == pygame.K_s:\n tk2.dx = 0\n tk2.dy = speed\n if event.key == pygame.K_w:\n tk2.dx = 0\n tk2.dy = -speed\n if event.key == pygame.K_a:\n tk2.dx = -speed\n tk2.dy = 0\n if event.key == pygame.K_d:\n tk2.dx = speed\n tk2.dy = 0\n if event.key == pygame.K_LSHIFT:\n tk2.shoot()\n\n #applying changes:\n tk.move()\n tk2.move()\n\n #redraw\n screen.fill((0,0,0))\n \n #draw bullets\n for b in bullets:\n b.move()\n b.draw()\n\n #draw tanks\n for k in tonks:\n #check bullet collision\n for b in bullets:\n #check if bullet outside the room\n if(b.x > 810 or b.x < -10 or b.y > 610 or b.y < 0):\n bullets.remove(b)\n elif(intersect(b.bounds,tonks[k].bounds)):\n tonks[k].health -= 1\n bullets.remove(b)\n tonks[k].draw()\n\n pygame.display.flip()","sub_path":"SupraTonks/SupraTonks.py","file_name":"SupraTonks.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85814664","text":"# EPA_NEI.py (flowsa)\n# !/usr/bin/env python3\n# coding=utf-8\n\"\"\"\nPulls EPA National Emissions Inventory (NEI) data for nonpoint sources\n\"\"\"\n\nimport io\nimport zipfile\nimport pandas as pd\nfrom flowsa.flowbyfunctions import assign_fips_location_system\nfrom flowsa.common import convert_fba_unit\n\n\ndef epa_nei_url_helper(**kwargs):\n \"\"\"\n This helper function uses the \"build_url\" input from flowbyactivity.py, which\n is a base url for data imports that requires parts of the url text string\n to be replaced with info specific to the data year.\n This function does not parse the data, only modifies the urls from which data is obtained.\n :param kwargs: potential arguments include:\n build_url: string, base url\n config: dictionary, items in FBA method yaml\n args: dictionary, arguments specified when running flowbyactivity.py\n flowbyactivity.py ('year' and 'source')\n :return: list, urls to call, concat, parse, format into Flow-By-Activity format\n \"\"\"\n\n # load the arguments necessary for function\n build_url = kwargs['build_url']\n args = kwargs['args']\n\n urls = []\n url = build_url\n\n url = url.replace('__year__', args['year'])\n\n if args['year'] == '2017':\n url = url.replace('__version__', '2017v1/2017neiApr')\n elif args['year'] == '2014':\n url = url.replace('__version__', '2014v2/2014neiv2')\n elif args['year'] == '2011':\n url = url.replace('__version__', '2011v2/2011neiv2')\n elif args['year'] == '2008':\n url = url.replace('__version__', '2008neiv3')\n urls.append(url)\n return urls\n\n\ndef epa_nei_call(**kwargs):\n \"\"\"\n Convert response for calling url to pandas dataframe, begin parsing df into FBA format\n :param kwargs: potential arguments include:\n url: string, url\n response_load: df, response from url call\n args: dictionary, arguments specified when running\n flowbyactivity.py ('year' and 'source')\n :return: pandas dataframe of original source data\n \"\"\"\n # load arguments necessary for function\n response_load = kwargs['r']\n\n z = zipfile.ZipFile(io.BytesIO(response_load.content))\n # create a list of files contained in the zip archive\n znames = z.namelist()\n # retain only those files that are in .csv format\n znames = [s for s in znames if '.csv' in s]\n # initialize the dataframe\n df = pd.DataFrame()\n # for all of the .csv data files in the .zip archive,\n # read the .csv files into a dataframe\n # and concatenate with the master dataframe\n for i in range(len(znames)):\n df = pd.concat([df, pd.read_csv(z.open(znames[i]))])\n return df\n\n\ndef epa_nei_global_parse(**kwargs):\n \"\"\"\n Combine, parse, and format the provided dataframes\n :param kwargs: potential arguments include:\n dataframe_list: list of dataframes to concat and format\n args: dictionary, used to run flowbyactivity.py ('year' and 'source')\n :return: df, parsed and partially formatted to flowbyactivity specifications\n \"\"\"\n # load arguments necessary for function\n dataframe_list = kwargs['dataframe_list']\n args = kwargs['args']\n\n df = pd.concat(dataframe_list, sort=True)\n\n # rename columns to match flowbyactivity format\n if args['year'] == '2017':\n df = df.rename(columns={\"pollutant desc\": \"FlowName\",\n \"total emissions\": \"FlowAmount\",\n \"scc\": \"ActivityProducedBy\",\n \"fips code\": \"Location\",\n \"emissions uom\": \"Unit\",\n \"pollutant code\": \"Description\"})\n\n elif args['year'] == '2014':\n df = df.rename(columns={\"pollutant_desc\": \"FlowName\",\n \"total_emissions\": \"FlowAmount\",\n \"scc\": \"ActivityProducedBy\",\n \"state_and_county_fips_code\": \"Location\",\n \"uom\": \"Unit\",\n \"pollutant_cd\": \"Description\"})\n\n elif args['year'] == '2011' or args['year'] == '2008':\n df = df.rename(columns={\"description\": \"FlowName\",\n \"total_emissions\": \"FlowAmount\",\n \"scc\": \"ActivityProducedBy\",\n \"state_and_county_fips_code\": \"Location\",\n \"uom\": \"Unit\",\n \"pollutant_cd\": \"Description\"})\n\n # make sure FIPS are string and 5 digits\n df['Location'] = df['Location'].astype('str').apply('{:0>5}'.format)\n # remove records from certain FIPS\n excluded_fips = ['78', '85', '88']\n df = df[~df['Location'].str[0:2].isin(excluded_fips)]\n excluded_fips2 = ['777']\n df = df[~df['Location'].str[-3:].isin(excluded_fips2)]\n\n # drop all other columns\n df.drop(df.columns.difference(['FlowName',\n 'FlowAmount',\n 'ActivityProducedBy',\n 'Location',\n 'Unit',\n 'Description']), 1, inplace=True)\n\n # to align with other processed NEI data (Point from StEWI), units are\n # converted during FBA creation instead of maintained\n df = convert_fba_unit(df)\n\n # add hardcoded data\n df['FlowType'] = \"ELEMENTARY_FLOW\"\n df['Class'] = \"Chemicals\"\n df['SourceName'] = args['source']\n df['Compartment'] = \"air\"\n df['Year'] = args['year']\n df = assign_fips_location_system(df, args['year'])\n\n return df\n\n\ndef epa_nei_onroad_parse(**kwargs):\n \"\"\"\n Combine, parse, and format the provided dataframes\n :param kwargs: potential arguments include:\n dataframe_list: list of dataframes to concat and format\n args: dictionary, used to run flowbyactivity.py ('year' and 'source')\n :return: df, parsed and partially formatted to flowbyactivity specifications\n \"\"\"\n\n df = epa_nei_global_parse(**kwargs)\n\n # Add DQ scores\n df['DataReliability'] = 3\n df['DataCollection'] = 1\n\n return df\n\n\ndef epa_nei_nonroad_parse(**kwargs):\n \"\"\"\n Combine, parse, and format the provided dataframes\n :param kwargs: potential arguments include:\n dataframe_list: list of dataframes to concat and format\n args: dictionary, used to run flowbyactivity.py ('year' and 'source')\n :return: df, parsed and partially formatted to flowbyactivity specifications\n \"\"\"\n\n df = epa_nei_global_parse(**kwargs)\n\n # Add DQ scores\n df['DataReliability'] = 3\n df['DataCollection'] = 1\n\n return df\n\n\ndef epa_nei_nonpoint_parse(**kwargs):\n \"\"\"\n Combine, parse, and format the provided dataframes\n :param kwargs: potential arguments include:\n dataframe_list: list of dataframes to concat and format\n args: dictionary, used to run flowbyactivity.py ('year' and 'source')\n :return: df, parsed and partially formatted to flowbyactivity specifications\n \"\"\"\n\n df = epa_nei_global_parse(**kwargs)\n\n # Add DQ scores\n df['DataReliability'] = 3\n df['DataCollection'] = 5 # data collection scores are updated in fbs as\n # a function of facility coverage from point source data\n\n return df\n\n\ndef clean_NEI_fba(fba):\n \"\"\"\n Clean up the NEI FBA for use in FBS creation\n :param fba: df, FBA format\n :return: df, modified FBA\n \"\"\"\n fba = remove_duplicate_NEI_flows(fba)\n fba = drop_GHGs(fba)\n # Remove the portion of PM10 that is PM2.5 to eliminate double counting,\n # rename FlowName and Flowable, and update UUID\n fba = remove_flow_overlap(fba, 'PM10 Primary (Filt + Cond)', ['PM2.5 Primary (Filt + Cond)'])\n # # link to FEDEFL\n # import fedelemflowlist\n # mapping = fedelemflowlist.get_flowmapping('NEI')\n # PM_df = mapping[['TargetFlowName',\n # 'TargetFlowUUID']][mapping['SourceFlowName']=='PM10-PM2.5']\n # PM_list = PM_df.values.flatten().tolist()\n PM_list = ['Particulate matter, > 2.5μm and ≤ 10μm',\n 'a320e284-d276-3167-89b3-19d790081c08']\n fba.loc[(fba['FlowName'] == 'PM10 Primary (Filt + Cond)'),\n ['FlowName','Flowable','FlowUUID']] = ['PM10-PM2.5',\n PM_list[0], PM_list[1]]\n return fba\n\n\ndef clean_NEI_fba_no_pesticides(fba):\n \"\"\"\n Clean up the NEI FBA with no pesicides for use in FBS creation\n :param fba: df, FBA format\n :return: df, modified FBA\n \"\"\"\n fba = drop_pesticides(fba)\n fba = clean_NEI_fba(fba)\n return fba\n\n\ndef remove_duplicate_NEI_flows(df):\n \"\"\"\n These flows for PM will get mapped to the primary PM flowable in FEDEFL\n resulting in duplicate emissions\n :param df: df, FBA format\n :return: df, FBA format with duplicate flows dropped\n \"\"\"\n flowlist = [\n 'PM10-Primary from certain diesel engines',\n 'PM25-Primary from certain diesel engines',\n ]\n\n df = df.loc[~df['FlowName'].isin(flowlist)]\n return df\n\n\ndef drop_GHGs(df):\n \"\"\"\n GHGs are included in some NEI datasets. If these data are not compiled together\n with GHGRP, need to remove them as they will be tracked from a different source\n :param df: df, FBA format\n :return: df\n \"\"\"\"\"\n # Flow names reflect source data prior to FEDEFL mapping, using 'FlowName'\n # instead of 'Flowable'\n flowlist = [\n 'Carbon Dioxide',\n 'Methane',\n 'Nitrous Oxide',\n 'Sulfur Hexafluoride',\n ]\n\n df = df.loc[~df['FlowName'].isin(flowlist)]\n\n return df\n\n\ndef drop_pesticides(df):\n \"\"\"\n To avoid overlap with other datasets, emissions of pesticides from pesticide\n application are removed.\n :param df: df, FBA format\n :return: df\n \"\"\"\n # Flow names reflect source data prior to FEDEFL mapping, using 'FlowName'\n # instead of 'Flowable'\n flowlist = [\n '2,4-Dichlorophenoxy Acetic Acid',\n 'Captan',\n 'Carbaryl',\n 'Methyl Bromide',\n 'Methyl Iodide',\n 'Parathion',\n 'Trifluralin',\n ]\n\n activity_list = [\n '2461800001',\n '2461800002',\n '2461850000',\n ]\n\n df = df.loc[~(df['FlowName'].isin(flowlist) &\n df['ActivityProducedBy'].isin(activity_list))]\n\n return df\n\n\ndef remove_flow_overlap(df, aggregate_flow, contributing_flows):\n \"\"\"\n Quantity of contributing flows is subtracted from aggregate flow and the\n aggregate flow quantity is updated. Modeled after function of same name in\n stewicombo.overlaphandler.py\n :param df: df, FBA format\n :param aggregate_flow: str, flowname to modify\n :param contributing_flows: list, flownames contributing to aggregate flow\n :return: df, FBA format, modified flows\n \"\"\"\n match_conditions = ['ActivityProducedBy', 'Compartment', 'Location', 'Year']\n\n df_contributing_flows = df.loc[df['FlowName'].isin(contributing_flows)]\n df_contributing_flows = df_contributing_flows.groupby(match_conditions,\n as_index=False)['FlowAmount'].sum()\n\n df_contributing_flows['FlowName'] = aggregate_flow\n df_contributing_flows['ContributingAmount'] = df_contributing_flows['FlowAmount']\n df_contributing_flows.drop(columns=['FlowAmount'], inplace=True)\n df = df.merge(df_contributing_flows, how='left', on=match_conditions.append('FlowName'))\n df[['ContributingAmount']] = df[['ContributingAmount']].fillna(value=0)\n df['FlowAmount'] = df['FlowAmount'] - df['ContributingAmount']\n df.drop(columns=['ContributingAmount'], inplace=True)\n\n # Make sure the aggregate flow is non-negative\n df.loc[((df.FlowName == aggregate_flow) & (df.FlowAmount <= 0)), \"FlowAmount\"] = 0\n return df\n","sub_path":"flowsa/data_source_scripts/EPA_NEI.py","file_name":"EPA_NEI.py","file_ext":"py","file_size_in_byte":11939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536707882","text":"import pytest\n\nfrom sanic import Sanic\nfrom sanic.errorpages import exception_response\nfrom sanic.exceptions import NotFound\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse\n\n\n@pytest.fixture\ndef app():\n app = Sanic(\"error_page_testing\")\n\n @app.route(\"/error\", methods=[\"GET\", \"POST\"])\n def err(request):\n raise Exception(\"something went wrong\")\n\n return app\n\n\n@pytest.fixture\ndef fake_request(app):\n return Request(b\"/foobar\", {}, \"1.1\", \"GET\", None, app)\n\n\n@pytest.mark.parametrize(\n \"fallback,content_type, exception, status\",\n (\n (None, \"text/html; charset=utf-8\", Exception, 500),\n (\"html\", \"text/html; charset=utf-8\", Exception, 500),\n (\"auto\", \"text/html; charset=utf-8\", Exception, 500),\n (\"text\", \"text/plain; charset=utf-8\", Exception, 500),\n (\"json\", \"application/json\", Exception, 500),\n (None, \"text/html; charset=utf-8\", NotFound, 404),\n (\"html\", \"text/html; charset=utf-8\", NotFound, 404),\n (\"auto\", \"text/html; charset=utf-8\", NotFound, 404),\n (\"text\", \"text/plain; charset=utf-8\", NotFound, 404),\n (\"json\", \"application/json\", NotFound, 404),\n ),\n)\ndef test_should_return_html_valid_setting(\n fake_request, fallback, content_type, exception, status\n):\n if fallback:\n fake_request.app.config.FALLBACK_ERROR_FORMAT = fallback\n\n try:\n raise exception(\"bad stuff\")\n except Exception as e:\n response = exception_response(fake_request, e, True)\n\n assert isinstance(response, HTTPResponse)\n assert response.status == status\n assert response.content_type == content_type\n\n\ndef test_auto_fallback_with_data(app):\n app.config.FALLBACK_ERROR_FORMAT = \"auto\"\n\n _, response = app.test_client.get(\"/error\")\n assert response.status == 500\n assert response.content_type == \"text/html; charset=utf-8\"\n\n _, response = app.test_client.post(\"/error\", json={\"foo\": \"bar\"})\n assert response.status == 500\n assert response.content_type == \"application/json\"\n\n _, response = app.test_client.post(\"/error\", data={\"foo\": \"bar\"})\n assert response.status == 500\n assert response.content_type == \"text/html; charset=utf-8\"\n\n\ndef test_auto_fallback_with_content_type(app):\n app.config.FALLBACK_ERROR_FORMAT = \"auto\"\n\n _, response = app.test_client.get(\n \"/error\", headers={\"content-type\": \"application/json\"}\n )\n assert response.status == 500\n assert response.content_type == \"application/json\"\n\n _, response = app.test_client.get(\n \"/error\", headers={\"content-type\": \"text/plain\"}\n )\n assert response.status == 500\n assert response.content_type == \"text/plain; charset=utf-8\"\n","sub_path":"tests/test_errorpages.py","file_name":"test_errorpages.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"355663546","text":"import sys\n\nprint(\"\\nWitaj w programie Kalkulator 1.0\\n\")\n\nwhile True:\n print(\"Podaj działanie jakie chcesz wykonać:\")\n print(\"1 - Dodawanie\",\"\\n2 - Odejmowanie\",\"\\n3 - Mnożenie\",\"\\n4 - Dzielenie\", \"\\n0 - KONIEC\")\n operation = int(input())\n if operation==0:\n print(\"Dziękuję za skorzystanie z Kalkulatora. Zapraszam ponownie. Do zobaczenia!\")\n sys.exit(0)\n\n while operation!=1 and operation!=2 and operation!=3 and operation!=4:\n print(\"Dokonałeś błędnego wyboru. Spróbuj ponownie:\")\n print(\"Podaj działanie jakie chcesz wykonać:\")\n print(\"1 - Dodawanie\", \"\\n2 - Odejmowanie\", \"\\n3 - Mnożenie\", \"\\n4 - Dzielenie\", \"\\n0 - KONIEC\")\n operation = int(input(\"\"))\n\n a = int(input(\"Podaj pierwszą liczbę:\\n\"))\n b = int(input(\"Podaj drugą liczbę:\\n\"))\n\n #Zastrzeżenie dzielenia przez 0\n while operation==4 and b==0:\n if operation==4 and b==0:\n print(\"Nie można dzielić przez 0! Podaj ponownie liczby do wykonania Dzielenia:\")\n a = int(input(\"Podaj pierwszą liczbę:\\n\"))\n b = int(input(\"Podaj drugą liczbę:\\n\"))\n\n #Główny kalkulator\n if operation==1:\n result_1 = a + b\n print(\"Wynik dodawania wynosi: \",result_1)\n elif operation==2:\n result_2 = a - b\n print(\"Wynik odejmowania wynosi: \",result_2)\n elif operation==3:\n result_3 = a * b\n print(\"Wynik mnożenie wynosi: \",result_3)\n elif operation==4:\n result_4 = a / b\n result_44 = result_4 % 2\n if result_44==0:\n print(\"Wynik dzielenie wynosi: \",int(result_4))\n else:\n print(\"Wynik dzielenie wynosi: \",float(result_4))\n\n","sub_path":"Python_Learn/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240979259","text":"import pygame\r\nimport time\r\n#import class_Game\r\n\r\n\r\n\r\n################ IMAGES ################\r\n\r\n#Logo\r\nlogoimg = pygame.image.load('img/logo.png')\r\n\r\n#Background ship\r\nbackgroundshipimg = pygame.image.load('img/background_ship.png')\r\n\r\n#Tutorial\r\ntutimg = [pygame.image.load('img/tutorial_place-ships.png'), pygame.image.load('img/tutorial_turn.png'), pygame.image.load('img/tutorial_move.png'), pygame.image.load('img/tutorial_attack.png')]\r\n\r\n#place ships: 0. turn: 1, move: 2, attack: 3\r\n\r\n#Columns\r\ncolumn_normalimg = [pygame.image.load('grid/column.png') , pygame.image.load('grid/column_over.png')]\r\ncolumn_choosable = [pygame.image.load('grid/column_choosable.png') , pygame.image.load('grid/column_choosable_over.png')]\r\ncolumn_boat_red = pygame.image.load('grid/column_boat_red.png')\r\ncolumn_boat_choosable_red = pygame.image.load('grid/column_choosable_boat_red.png')\r\ncolumn_boat_green = pygame.image.load('grid/column_boat_green.png')\r\ncolumn_boat_choosable_green = pygame.image.load('grid/column_choosable_boat_green.png')\r\n\r\n#Boats\r\nboat_red_size_2 = [pygame.image.load('boats/s2-p2-v2.png') , pygame.image.load('boats/s2-p2-v2_over.png')]\r\nboat_green_size_2 = [pygame.image.load('boats/s2-p1-v1.png') , pygame.image.load('boats/s2-p1-v1_over.png')]\r\n\r\n#Buttons\r\nback_but = [pygame.image.load('but/back_button.png') , pygame.image.load('but/back_button_over.png')]\r\nstartturn_but = [pygame.image.load('but/startturn_button.png') , pygame.image.load('but/startturn_button_over.png')]\r\nmainimenu_but = [pygame.image.load('but/mainmenu_button.png') , pygame.image.load('but/mainmenu_button_over.png')]\r\nx_but = [pygame.image.load('but/X_button.png') , pygame.image.load('but/X_button_over.png')]\r\nrightarrow_but = [pygame.image.load('but/rightarrow_button.png') , pygame.image.load('but/rightarrow_button_over.png')]\r\nleftarrow_but = [pygame.image.load('but/leftarrow_button.png') , pygame.image.load('but/leftarrow_button_over.png')]\r\nuparrow_but = [pygame.image.load('but/uparrow_button.png') , pygame.image.load('but/uparrow_button_over.png')]\r\ndownarrow_but = [pygame.image.load('but/downarrow_button.png') , pygame.image.load('but/downarrow_button_over.png')]\r\nattack_but = [pygame.image.load('but/attack_button.png') , pygame.image.load('but/attack_button_over.png')]\r\nmove_but = [pygame.image.load('but/move_button.png') , pygame.image.load('but/move_button_over.png')]\r\ncancel_but = [pygame.image.load('but/cancel_button.png') , pygame.image.load('but/cancel_button_over.png')]\r\nendturn_but = [pygame.image.load('but/End_Turn.png')]\r\ndefensive_left = [pygame.image.load('but/Defensive_Left.png')]\r\ndefensive_right = [pygame.image.load('but/Defensive_Right.png')]\r\ndefensive_inactive = [pygame.image.load('but/Defensive_Inactive.png')]\r\ncontinue_to_menu = [pygame.image.load('but/continue_to_menu_button.png')]\r\ncontine = [pygame.image.load('but/continue_button.png')]\r\nhelp_button_small = [pygame.image.load('but/Help_ingame_button.png')]\r\n##########################################\r\n\r\nclass Visual:\r\n\r\n def __init__ (self, game, gameDisplay, clock, width, height):\r\n\r\n self.Game = game\r\n self.Display = gameDisplay\r\n self.Clock = clock\r\n self.Width = width\r\n self.Height = height\r\n self.Size = (width, height)\r\n self.font = pygame.font.SysFont('Calibri', 20)\r\n self.headfont = pygame.font.SysFont('Calibri', 50)\r\n self.loop = False\r\n\r\n self.move_boat = False\r\n\r\n self.PositionPicked = self.Game.EmptyPosition\r\n self.ActionPicked = \"none\"\r\n self.MovementPicked = \"none\"\r\n self.CoordinatesClicked = (-1,-1)\r\n\r\n #Colors\r\n self.darkblue = (15,15,23)\r\n self.white = (255, 255, 255)\r\n self.red = (255, 0, 0)\r\n self.green = (0, 255, 0)\r\n self.Help = False\r\n\r\n def show_logo (self):\r\n\r\n pos_x = 15\r\n pos_y = 15\r\n pos = (pos_x,pos_y)\r\n self.Display.blit(logoimg,pos)\r\n\r\n def show_backgroundship (self):\r\n\r\n pos_x = (self.Width*0.5) - 360\r\n pos_y = (self.Height*0.5) - 177\r\n pos = (pos_x,pos_y)\r\n self.Display.blit(backgroundshipimg,pos)\r\n\r\n def show_tut(self):\r\n\r\n\r\n pos_x = (self.Width*0.5) - 400\r\n pos_y = (self.Height*0.5) - 177\r\n pos = (pos_x,pos_y)\r\n self.Display.blit(tutimg, pos)\r\n\r\n ##########################################\r\n\r\n ################ FUNCTIONS ################\r\n\r\n def display_refresh (self):\r\n\r\n pygame.display.flip()\r\n self.Clock.tick(15)\r\n\r\n def exit (self):\r\n\r\n pygame.quit()\r\n quit()\r\n#############################################################\r\n \r\n def helpopenbutton(self, x, y, width, height, event=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n self.Display.blit(help_button_small[0],(x , y))\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n if click[0] == 1 and event != None:\r\n self.showhelp(event)\r\n\r\n def ContinueToNextPlayerButton (self, button, x, y, width, height, event=None): \r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n self.Display.blit(button[0],(x,y))\r\n\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n if click[0] == 1 and event != None:\r\n self.ActionPicked = event \r\n\r\n def Movementbutton (self, mouse, click, button, x, y, width, height, event=None): \r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n self.Display.blit(button[0],(x,y))\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n if click[0] == 1 and event != None:\r\n self.MovementPicked = event \r\n\r\n def addRect(self, x, y, width, height, outline):\r\n self.rect = pygame.draw.rect(self.Display, self.white, (x, y, width, height), outline)\r\n\r\n def addText(self, text, x, y, width, height):\r\n self.Display.blit(self.font.render(text, True, self.white, (width, height)),(x+5,y+5))\r\n def addColorText(self, color, text, x, y, width, height):\r\n self.Display.blit(self.font.render(text, True, color, (width, height)),(x+5,y+5))\r\n def addHead(self, text, x, y, width, height):\r\n self.Display.blit(self.headfont.render(text, True, self.white, (width, height)),(x+5,y+5))\r\n\r\n def MessageBox(self, text, x, y, width, height):\r\n self.addRect(x, y, width, height, 1)\r\n self.addText(text, x, y, width, height)\r\n\r\n def Playerstats(self, x, y, width, height, line1, line2, line3, line4, line5, color):\r\n self.addRect(x, y, width, height, 2)\r\n self.addColorText(color, line1, x, y + 5, width, height)\r\n self.addText(line2, x, y + 35, width, height)\r\n self.addText(line3, x, y + 60, width, height)\r\n self.addText(line4, x, y + 85, width, height)\r\n self.addText(line5, x, y + 110, width, height)\r\n\r\n def Getstringfromboat(self,Boat):\r\n string = str(Boat.Name) + \" | Size: \" + str(Boat.Size) + \" | Health = \" + str(Boat.Health) + \"/\" + str(Boat.MaxHealth)\r\n return string\r\n\r\n def GetPlayerStatsString(self, Player):\r\n line1 = \" \"; line2 = \" \"; line3 = \" \"; line4 = \" \"; line5 = \" \"\r\n boats = Player.Boats\r\n line1 = str(Player.Name) + \"'s stats:\"\r\n if len(boats) > 0:\r\n localboat = boats[0]\r\n line2 = self.Getstringfromboat(boats[0])\r\n if len(Player.Boats) > 1:\r\n localboat = boats[1]\r\n line3 = self.Getstringfromboat(boats[1])\r\n if len(Player.Boats) > 2:\r\n localboat = boats[2]\r\n line4 = self.Getstringfromboat(boats[2])\r\n if len(Player.Boats) > 3:\r\n localboat = boats[3]\r\n line5 = self.Getstringfromboat(boats[3])\r\n return [line1, line2, line3, line4, line5]\r\n \r\n #all possiblegridcoordinates\r\n\r\n def NormalCoordininate (self, position, x, y):\r\n \r\n self.Display.blit(column_normalimg[0],(x,y))\r\n\r\n def ChoosableCoordinate (self, mouse, click, position, x, y, width, height, event_1=None, event_2=None):\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n \r\n self.Display.blit(column_choosable[1],(x,y))\r\n\r\n if click[0] == 1 and event_1 != None and event_2 != None:\r\n \r\n self.PositionPicked = position\r\n\r\n else:\r\n \r\n self.Display.blit(column_choosable[1],(x,y))\r\n\r\n def BoatCoordinate (self, position, x, y):\r\n\r\n if position.Boat.Player == self.Game.Player1:\r\n self.Display.blit(column_boat_red,(x,y))\r\n elif position.Boat.Player == self.Game.Player2:\r\n self.Display.blit(column_boat_green,(x,y)) \r\n\r\n def ChooseableboatCoordinate (self, mouse, click, position, x, y, width, height, event_1=None, event_2=None):\r\n\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n \r\n if position.Boat.Player == self.Game.Player1:\r\n self.Display.blit(column_boat_choosable_red,(x,y))\r\n elif position.Boat.Player == self.Game.Player2:\r\n self.Display.blit(column_boat_choosable_green,(x,y)) \r\n\r\n if click[0] == 1 and event_1 != None and event_2 != None:\r\n self.PositionPicked = position\r\n else:\r\n \r\n if position.Boat.Player == self.Game.Player1:\r\n self.Display.blit(column_boat_choosable_red,(x,y))\r\n elif position.Boat.Player == self.Game.Player2:\r\n self.Display.blit(column_boat_choosable_green,(x,y)) \r\n\r\n####\r\n\r\n def EndTurnButton (self, button, x, y, width, height, event=None):\r\n \r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n\r\n if click[0] == 1 and event != None:\r\n self.PositionPicked = event\r\n#choose action\r\n\r\n def ChooseActionButton (self, button, x, y, width, height, event=None):\r\n\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n if (x + width) > mouse[0] > x and (y + height) > mouse[1] > y:\r\n\r\n if click[0] == 1 and event != None:\r\n self.ActionPicked = event\r\n # else:\r\n #self.Display.blit(button[0],(x,y))\r\n \r\n#Draw the game\r\n\r\n def grid_position_x(self, n):\r\n\r\n x = (self.Width * 0.5) - 250\r\n\r\n for i in range (0 , n):\r\n\r\n x += 25\r\n\r\n return x\r\n\r\n def grid_position_y(self, n):\r\n\r\n y = (self.Height * 0.5) - 250\r\n\r\n for i in range (0, n):\r\n\r\n y += 25\r\n\r\n return y\r\n \r\n def draw_grid (self, Chooseablecoordinates, Chooseableboats):\r\n chooseableboatpositions = []\r\n\r\n for localchoosableboat in Chooseableboats:\r\n localboatcoordinates = localchoosableboat.GetLocalBoatsPositions(True, -1, -1, \"inactive\")\r\n chooseableboatpositions = chooseableboatpositions + localboatcoordinates\r\n\r\n AllBoatPositions = self.Game.GetAllBoatPositions([])\r\n\r\n ### SHOW GRID ###\r\n\r\n pos_y = (self.Height * 0.5) - 250\r\n\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n if self.Game.Player_Playing == self.Game.Player2:\r\n i = 0; j = 20; k = 1\r\n\r\n elif self.Game.Player_Playing == self.Game.Player1:\r\n i = 19; j = -1; k = -1\r\n\r\n for grid_y in range (i, j, k):\r\n\r\n pos_x = (self.Width * 0.5) - 250\r\n\r\n for grid_x in range (0, 20):\r\n \r\n LocalPositionClass = self.Game.GetPosition(grid_x, grid_y)\r\n\r\n\r\n if LocalPositionClass.Boat == self.Game.EmptyBoat:\r\n if LocalPositionClass in Chooseablecoordinates:\r\n #choosablecoordinate\r\n self.ChoosableCoordinate(mouse, click, LocalPositionClass, pos_x, pos_y, 25, 25, grid_x, grid_y)\r\n\r\n else:\r\n #normalcoordinate\r\n self.NormalCoordininate(LocalPositionClass, pos_x, pos_y)\r\n\r\n else:\r\n\r\n if LocalPositionClass in chooseableboatpositions:\r\n #choosablecoordinate\r\n self.ChooseableboatCoordinate(mouse, click, LocalPositionClass, pos_x, pos_y, 25, 25, grid_x, grid_y)\r\n\r\n else:\r\n #choosablecoordinate\r\n self.BoatCoordinate(LocalPositionClass, pos_x, pos_y)\r\n\r\n pos_x += 25\r\n\r\n pos_y += 25\r\n PositionClass = self.Game.GetPosition(pos_x , pos_y)\r\n \r\n def draw_game(self, MessageBox1Tekst, MessageBox2Tekst, help):\r\n self.show_backgroundship()\r\n self.show_logo()\r\n\r\n self.MessageBox(MessageBox1Tekst, ((self.Width * 0.5) - 350), self.Height * 0.5 - 325, 700, 25)\r\n\r\n self.MessageBox(MessageBox2Tekst, ((self.Width * 0.5) - 250), self.Height * 0.5 - 275, 500, 25)\r\n\r\n \r\n Player1Text = self.GetPlayerStatsString(self.Game.Player1)\r\n color = self.red\r\n \r\n self.Playerstats(10, 170, 350, 200, Player1Text[0], Player1Text[1], Player1Text[2], Player1Text[3], Player1Text[4], color)\r\n \r\n Player2Text = self.GetPlayerStatsString(self.Game.Player2)\r\n color = self.green\r\n self.Playerstats(10, 400, 350, 200, Player2Text[0], Player2Text[1], Player2Text[2], Player2Text[3], Player2Text[4], color)\r\n\r\n if help != -1:\r\n self.helpopenbutton(self.Width * 0.5 + 282, self.Height * 0.1 + 13 , 68, 68, help)\r\n \r\n\r\n#################### interactive funcitons ###############\r\n def drawscreen(self):\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n\r\n self.Display.fill(self.darkblue)\r\n self.draw_game(\"\",\"\", -1)\r\n self.draw_grid([], [])\r\n self.display_refresh()\r\n\r\n def selectcoordinate(self, Chooseablecoordinates, Chooseableboats, ShowEndTurnDisplay, MessageBox1Tekst, MessageBox2Tekst, help):\r\n self.PositionPicked = self.Game.EmptyPosition\r\n while self.PositionPicked == self.Game.EmptyPosition:\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n\r\n self.Display.fill(self.darkblue)\r\n \r\n self.draw_game(MessageBox1Tekst, MessageBox2Tekst, help)\r\n self.draw_grid(Chooseablecoordinates, Chooseableboats)\r\n\r\n x_pos = self.Width * 0.5\r\n y_pos = self.Height * 0.86\r\n x = x_pos - 134\r\n y = y_pos\r\n\r\n if ShowEndTurnDisplay:\r\n self.Display.blit(endturn_but[0],(x,y)) \r\n self.EndTurnButton(endturn_but, x, y, 268, 68, 'end turn')\r\n\r\n self.display_refresh()\r\n\r\n return self.PositionPicked\r\n \r\n def chooseaction(self, Boat, AbleToMove, AbleToAttackBoats, PositionsToAttack, MessageBox1Tekst, MessageBox2Tekst):\r\n self.ActionPicked = \"none\"\r\n self.Display.fill(self.darkblue)\r\n self.draw_game(MessageBox1Tekst,MessageBox2Tekst, 1)\r\n self.draw_grid(PositionsToAttack, [])\r\n if AbleToMove == True:\r\n x = self.Width * 0.5 - 134\r\n y = self.Height * 0.86\r\n self.Display.blit(move_but[0],(x,y))\r\n if AbleToAttackBoats == True:\r\n x = self.Width * 0.5 - 422\r\n y = self.Height * 0.86\r\n self.Display.blit(attack_but[0],(x,y))\r\n\r\n x = self.Width * 0.5 + 154\r\n y = self.Height * 0.86\r\n self.Display.blit(cancel_but[0],(x,y))\r\n\r\n self.display_refresh()\r\n\r\n while self.ActionPicked == \"none\":\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.86\r\n if AbleToMove == True:\r\n self.ChooseActionButton(move_but, x_pos- 134, y_pos, 268, 68, 'move')\r\n\r\n if AbleToAttackBoats == True:\r\n self.ChooseActionButton(attack_but, x_pos - 422, y_pos, 268, 68, 'attack')\r\n \r\n self.ChooseActionButton(cancel_but, x_pos + 154, y_pos, 268, 68, 'cancel')\r\n\r\n self.Clock.tick(15)\r\n self.helpopenbutton(self.Width * 0.5 + 282, self.Height * 0.1 + 13, 68, 68, 1)\r\n\r\n return self.ActionPicked\r\n\r\n####\r\n\r\n def ChooseActionPhase1(self, BoatsAbleForAction, BoatsAbleToMove, BoatsAbleToAttack, MessageBox1Tekst, MessageBox2Tekst): #AvaiblePlayCards_No): #returns boatclass for boataction, returns 'play cards' or 'end turn' \r\n #place ships: 0. turn: 1, move: 2, attack: 3\r\n PositionPicked = self.selectcoordinate([], BoatsAbleForAction, True, MessageBox1Tekst, MessageBox2Tekst, 1)\r\n if PositionPicked == \"end turn\":\r\n return \"end turn\"\r\n else:\r\n return PositionPicked.Boat\r\n\r\n def ChooseBoatActionPhase2(self, Boat, AbleToMove, AbleToAttackBoats, PositionsToAttack, MessageBox1Tekst, MessageBox2Tekst): #returns 'attack when pressed attack, returns 'move' when pressed move, returns 'cancle' when cancled\r\n Action = self.chooseaction(Boat, AbleToMove, AbleToAttackBoats, PositionsToAttack, MessageBox1Tekst, MessageBox2Tekst) #\"attack\", \"move\", \"cancle\"\r\n return Action\r\n \r\n def GetAttackActionPhase3(self, Boat, PositionsAbleToAttack, BoatsAbleToAttack, MessageBox1Tekst, MessageBox2Tekst): #returns [\"stance\", \"left\"/\"right\"/\"inactive\"] or [\"move\", \"left\"/\"right\",\"forward\",\"backward\"] or [\"stop\", \"stop] \r\n BoatPicked = self.selectcoordinate([], BoatsAbleToAttack, False, MessageBox1Tekst, MessageBox2Tekst, 3)\r\n boat = BoatPicked.Boat\r\n return boat\r\n\r\n def GetMovementActionPhase3(self, Boat, PossibleStanceActions, PossibleMovementActions, PositionsToAttack, MessageBox1Tekst, MessageBox2Tekst): #returns [\"stance\", \"left\"/\"right\"/\"inactive\"] or [\"move\", \"left\"/\"right\",\"forward\",\"backward\"] or [\"stop\", \"stop] \r\n self.MovementPicked = \"none\"\r\n\r\n while self.MovementPicked == \"none\":\r\n self.Help = False\r\n selectedboatpositions = Boat.GetLocalBoatsPositions(True, -1,-1,\"inactive\")\r\n\r\n MoveRight = False\r\n MoveLeft = False\r\n MoveForward = False\r\n MoveBackward = False\r\n for possiblemovement in PossibleMovementActions:\r\n if possiblemovement == \"left\":\r\n MoveLeft = True\r\n elif possiblemovement == \"right\":\r\n MoveRight = True\r\n elif possiblemovement == \"forward\":\r\n MoveForward = True\r\n elif possiblemovement == \"backward\":\r\n MoveBackward = True\r\n\r\n StanceLeft = False\r\n StanceRight = False\r\n StanceInactive = False\r\n for possiblestance in PossibleStanceActions:\r\n if possiblestance == \"left\":\r\n StanceLeft = True\r\n elif possiblestance == \"right\":\r\n StanceRight = True\r\n elif possiblestance == \"inactive\":\r\n StanceInactive = True\r\n \r\n self.Display.fill(self.darkblue)\r\n self.draw_game(MessageBox1Tekst, MessageBox2Tekst, 2)\r\n self.draw_grid(PositionsToAttack, [])\r\n \r\n while self.MovementPicked == \"none\" and self.Help == False:\r\n \r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n \r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n \r\n x_pos = self.Width * 0.8\r\n y_pos = self.Height * 0.75\r\n\r\n if MoveRight == True:\r\n self.Movementbutton(mouse, click, rightarrow_but, x_pos + 33, y_pos - 34, 68, 68, [\"move\", \"right\"])\r\n if MoveLeft == True:\r\n self.Movementbutton(mouse, click, leftarrow_but, x_pos - 101, y_pos - 34, 68, 68, [\"move\", \"left\"])\r\n if MoveForward == True:\r\n self.Movementbutton(mouse, click, uparrow_but, x_pos - 34, y_pos - 101, 68, 68, [\"move\", \"forward\"])\r\n if MoveBackward == True:\r\n self.Movementbutton(mouse, click, downarrow_but, x_pos - 34, y_pos + 33, 68, 68, [\"move\", \"backward\"])\r\n \r\n self.Movementbutton(mouse, click, x_but, x_pos - 34, y_pos - 34, 68, 68, [\"stop\", \"stop\"])\r\n\r\n self.Movementbutton(mouse, click, x_but, x_pos - 34, y_pos - 34, 68, 68, [\"stop\", \"stop\"])\r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.86\r\n\r\n if StanceLeft == True:\r\n self.Movementbutton(mouse, click, defensive_left, x_pos - 288, y_pos, 268, 68, [\"stance\", \"left\"])\r\n if StanceInactive == True:\r\n self.Movementbutton(mouse, click, defensive_inactive, x_pos - 144, y_pos, 268, 68, [\"stance\", \"inactive\"]) \r\n if StanceRight == True:\r\n self.Movementbutton(mouse, click, defensive_right, x_pos + 20, y_pos, 268, 68, [\"stance\", \"right\"]) \r\n\r\n self.helpopenbutton(self.Width * 0.5 + 282, self.Height * 0.1 + 13, 68, 68, 2)\r\n\r\n pygame.display.flip()\r\n self.Clock.tick(15)\r\n\r\n action = self.MovementPicked\r\n return action\r\n\r\n def DrawWinnerScreen(self):\r\n self.Display.fill(self.darkblue)\r\n self.show_backgroundship()\r\n self.show_logo()\r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.3\r\n self.addHead( \"Congratulations! \" + str(self.Game.Winner.Name) + \" won!\", x_pos - 300, y_pos - 25, 600, 50)\r\n\r\n self.display_refresh()\r\n time.sleep(1)\r\n self.ActionPicked = \"none\"\r\n while self.ActionPicked == \"none\":\r\n \r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n\r\n\r\n\r\n self.Display.fill(self.darkblue)\r\n self.show_backgroundship()\r\n self.show_logo()\r\n\r\n \r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.3\r\n\r\n self.addHead( \"Congratulations! \" + str(self.Game.Winner.Name) + \" won!\", x_pos - 300, y_pos - 25, 600, 50)\r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.7\r\n self.ContinueToNextPlayerButton (continue_to_menu, x_pos - 144, y_pos - 34, 268, 68, \"pressed\")\r\n \r\n\r\n self.display_refresh()\r\n\r\n def show_nextturn (self, player):\r\n \r\n \r\n self.Display.fill(self.darkblue)\r\n self.show_backgroundship()\r\n self.show_logo()\r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.3\r\n self.addHead(\"It is \" + str(player.Name) + \"'s turn!\", x_pos - 190, y_pos - 25, 380, 50)\r\n\r\n self.display_refresh()\r\n time.sleep(1)\r\n self.ActionPicked = \"none\"\r\n while self.ActionPicked == \"none\":\r\n \r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n\r\n\r\n\r\n self.Display.fill(self.darkblue)\r\n self.show_backgroundship()\r\n self.show_logo()\r\n\r\n \r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.3\r\n\r\n self.addHead(\"It is \" + str(player.Name) + \"'s turn!\", x_pos - 190, y_pos - 25, 380, 50)\r\n\r\n x_pos = self.Width * 0.5 \r\n y_pos = self.Height * 0.7\r\n self.ContinueToNextPlayerButton (contine, x_pos - 144, y_pos - 34, 268, 68, \"pressed\")\r\n \r\n\r\n self.display_refresh()\r\n self.drawscreen()\r\n time.sleep(0.5)\r\n\r\n def showhelp(self, helpscreen):\r\n self.Help = True\r\n self.ActionPicked = \"none\"\r\n while self.ActionPicked == \"none\":\r\n \r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n pygame.quit()\r\n self.exit()\r\n\r\n\r\n pos_x = (self.Width * 0.5) - 400\r\n pos_y = (self.Height * 0.5) - 300\r\n pos = (pos_x, pos_y)\r\n self.Display.blit(tutimg[helpscreen], pos)\r\n\r\n \r\n #draw the helpscreen\r\n x_pos = self.Width * 0.9\r\n y_pos = self.Height * 0.3\r\n self.ContinueToNextPlayerButton (x_but, x_pos - 34, y_pos - 34, 68, 68, \"pressed\")\r\n \r\n\r\n self.display_refresh()\r\n\r\n self.drawscreen()\r\n time.sleep(1)\r\n\r\n\r\n ####\r\n","sub_path":"class_Visual.py","file_name":"class_Visual.py","file_ext":"py","file_size_in_byte":25327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79676289","text":"#!/usr/bin/env python3\n\"\"\"\n Non humain primates anatomical segmentation pipeline based ANTS\n\n Adapted in Nipype from an original pipelin of Kepkee Loh wrapped.\n\n Description\n --------------\n TODO :/\n\n Arguments\n -----------\n -data:\n Path to the BIDS directory that contain subjects' MRI data.\n\n -out:\n Nipype's processing directory.\n It's where all the outputs will be saved.\n\n -subjects:\n IDs list of subjects to process.\n\n -ses\n session (leave blank if None)\n\n -params\n json parameter file; leave blank if None\n\n Example\n ---------\n python segment_kepkee.py -data [PATH_TO_BIDS] -out ../local_tests/ -subjects Elouk\n\n Requirements\n --------------\n This workflow use:\n - ANTS\n - AFNI\n - FSL\n\"\"\"\n\n# Authors : David Meunier (david.meunier@univ-amu.fr)\n# Bastien Cagna (bastien.cagna@univ-amu.fr)\n# Kepkee Loh (kepkee.loh@univ-amu.fr)\n# Julien Sein (julien.sein@univ-amu.fr)\n\nimport os\nimport os.path as op\n\nimport argparse\nimport json\nimport pprint\n\nimport nipype\n\nimport nipype.pipeline.engine as pe\nimport nipype.interfaces.utility as niu\n\nimport nipype.interfaces.fsl as fsl\nfsl.FSLCommand.set_default_output_type('NIFTI_GZ')\n\nfrom macapype.pipelines.full_pipelines import (\n create_full_spm_subpipes,\n create_full_ants_subpipes,\n create_full_T1_ants_subpipes,\n create_transfo_FLAIR_pipe,\n create_transfo_MD_pipe)\n\nfrom macapype.utils.utils_bids import (create_datasource_indiv_params,\n create_datasource,\n create_datasink)\n\nfrom macapype.utils.utils_tests import load_test_data, format_template\n\nfrom macapype.utils.utils_nodes import node_output_exists\n\nfrom macapype.utils.misc import show_files, get_first_elem\n\n###############################################################################\n\ndef create_main_workflow(data_dir, process_dir, soft, species, subjects, sessions,\n acquisitions, reconstructions, params_file,\n indiv_params_file, mask_file, nprocs,\n wf_name=\"macapype\",\n deriv=False, pad=False):\n\n # macapype_pipeline\n \"\"\" Set up the segmentatiopn pipeline based on ANTS\n\n Arguments\n ---------\n data_path: pathlike str\n Path to the BIDS directory that contains anatomical images\n\n out_path: pathlike str\n Path to the ouput directory (will be created if not alredy existing).\n Previous outputs maybe overwritten.\n\n soft: str\n Indicate which analysis should be launched; so for, only spm and ants\n are accepted; can be extended\n\n subjects: list of str (optional)\n Subject's IDs to match to BIDS specification (sub-[SUB1], sub-[SUB2]...)\n\n sessions: list of str (optional)\n Session's IDs to match to BIDS specification (ses-[SES1], ses-[SES2]...)\n\n acquisitions: list of str (optional)\n Acquisition name to match to BIDS specification (acq-[ACQ1]...)\n\n indiv_params_file: path to a JSON file\n JSON file that specify some parameters of the pipeline,\n unique for the subjects/sessions.\n\n params_file: path to a JSON file\n JSON file that specify some parameters of the pipeline.\n\n\n Returns\n -------\n workflow: nipype.pipeline.engine.Workflow\n\n\n \"\"\"\n\n soft = soft.lower()\n\n ssoft = soft.split(\"_\")\n\n new_ssoft = ssoft.copy()\n \n if 'test' in ssoft:\n new_ssoft.remove('test')\n \n if 'prep' in ssoft:\n new_ssoft.remove('prep')\n \n soft = \"_\".join(new_ssoft)\n\n # formating args\n data_dir = op.abspath(data_dir)\n\n\n try:\n os.makedirs(process_dir)\n except OSError:\n print(\"process_dir {} already exists\".format(process_dir))\n\n # species\n # params\n params = {}\n\n if params_file is None:\n\n if species is not None:\n\n species = species.lower()\n\n rep_species = {\"marmoset\":\"marmo\", \"chimpanzee\":\"chimp\"}\n\n if species in list(rep_species.keys()):\n species = rep_species[species]\n\n list_species = [\"macaque\", \"marmo\", \"baboon\", \"chimp\"]\n\n assert species in list_species, \\\n \"Error, species {} should in the following list {}\".format(\n species, list_species)\n\n package_directory = os.path.dirname(os.path.abspath(__file__))\n\n params_file = \"{}/params_segment_{}_{}.json\".format(\n package_directory, species, soft)\n\n else:\n print(\"Error, no -params or no -species was found (one or the \\\n other is mandatory)\")\n exit(-1)\n\n print(\"Params:\", params_file)\n\n assert os.path.exists(params_file), \"Error with file {}\".format(\n params_file)\n\n params = json.load(open(params_file))\n\n # indiv_params\n indiv_params = {}\n\n if indiv_params_file is None:\n\n print(\"No indiv params where found, modifing pipepline to default\")\n\n if \"short_preparation_pipe\" in params.keys():\n if \"crop_T1\" in params[\"short_preparation_pipe\"].keys():\n print(\"Deleting crop_T1\")\n del params[\"short_preparation_pipe\"][\"crop_T1\"]\n\n print(\"Adding automated bet_crop\")\n\n params[\"short_preparation_pipe\"][\"bet_crop\"] = {\"m\": True, \"aT2\": True, \"c\": 10, \"n\": 2}\n\n print(\"Using default bet_crop parameters: {}\".format(\n params[\"short_preparation_pipe\"][\"bet_crop\"]))\n\n print(\"New params after modification\")\n pprint.pprint(params)\n\n wf_name+=\"_bet_crop\"\n\n else:\n\n assert \"short_preparation_pipe\" in params.keys(),\\\n \"Error, short_preparation_pipe not found in params\"\n\n prep_pipe = \"short_preparation_pipe\"\n count_all_sessions=0\n count_T1_crops=0\n count_long_crops=0\n count_multi_long_crops=0\n\n print(\"Indiv Params:\", indiv_params_file)\n\n assert os.path.exists(indiv_params_file), \"Error with file {}\".format(\n indiv_params_file)\n\n indiv_params = json.load(open(indiv_params_file))\n\n wf_name+=\"_indiv_params\"\n\n pprint.pprint(indiv_params)\n\n if subjects is None or sessions is None:\n print(\"For whole BIDS dir, unable to assess if the indiv_params is correct\")\n print(\"Running with params as it is\")\n \n else:\n \n print(\"Will modify params if necessary, given specified subjects and sessions;\\n\")\n \n for sub in indiv_params.keys():\n\n if sub.split('-')[1] not in subjects:\n continue\n\n for ses in indiv_params[sub].keys():\n\n if ses.split('-')[1] not in sessions:\n continue\n\n count_all_sessions+=1\n\n print (indiv_params[sub][ses].keys())\n\n if \"crop_T1\" in indiv_params[sub][ses].keys():\n count_T1_crops+=1\n\n if \"crop_T2\" in indiv_params[sub][ses].keys() \\\n and 't1' not in ssoft:\n\n count_long_crops+=1\n\n if isinstance(\n indiv_params[sub][ses][\"crop_T1\"][\"args\"],\n list) and isinstance(\n indiv_params[sub][ses][\"crop_T2\"][\"args\"],\n list):\n\n count_multi_long_crops+=1\n\n print(\"count_all_sessions {}\".format(count_all_sessions))\n\n print(\"count_T1_crops {}\".format(count_T1_crops))\n print(\"count_long_crops {}\".format(count_long_crops))\n print(\"count_multi_long_crops {}\".format(count_multi_long_crops))\n\n if count_multi_long_crops==count_all_sessions:\n print(\"**** Found list of crops for T1 and T2 for all sub/ses \\\n in indiv -> long_multi_preparation_pipe\")\n\n wf_name+=\"_multi_crop_T1_T2\"\n\n prep_pipe = \"long_multi_preparation_pipe\"\n\n elif count_long_crops==count_all_sessions:\n\n print(\"**** Found crop for T1 and crop for T2 for all sub/ses \\\n in indiv -> long_single_preparation_pipe\")\n\n wf_name+=\"_crop_T1_T2\"\n\n prep_pipe = \"long_single_preparation_pipe\"\n\n elif count_T1_crops==count_all_sessions:\n\n print(\"**** Found crop for T1 for all sub/ses in indiv \\\n -> keeping short_preparation_pipe\")\n\n wf_name+=\"_crop_T1\"\n\n else:\n print(\"**** not all sub/ses have T1 and T2 crops \")\n print(\"Error\")\n exit(0)\n\n if prep_pipe != \"short_preparation_pipe\":\n\n params[prep_pipe]={\n \"prep_T1\": {\"crop_T1\": {\"args\": \"should be defined in indiv\"}},\n \"prep_T2\": {\"crop_T2\": {\"args\": \"should be defined in indiv\"}},\n \"align_T2_on_T1\": {\"dof\": 6, \"cost\": \"normmi\"}}\n\n if \"norm_intensity\" in params[\"short_preparation_pipe\"].keys():\n norm_intensity= params[\"short_preparation_pipe\"][\"norm_intensity\"]\n\n params[prep_pipe][\"prep_T1\"][\"norm_intensity\"]=norm_intensity\n params[prep_pipe][\"prep_T2\"][\"norm_intensity\"]=norm_intensity\n\n\n if \"denoise\" in params[\"short_preparation_pipe\"].keys():\n denoise= params[\"short_preparation_pipe\"][\"denoise\"]\n\n params[prep_pipe][\"prep_T1\"][\"denoise\"]=denoise\n params[prep_pipe][\"prep_T2\"][\"denoise\"]=denoise\n\n del params[\"short_preparation_pipe\"]\n\n\n # prep for testing only preparation part\n if \"prep\" in ssoft:\n print(\"Found prep in soft\")\n \n if \"brain_extraction_pipe\" in params.keys():\n del params[\"brain_extraction_pipe\"]\n print(\"Deleting brain_extraction_pipe\")\n \n \n if \"brain_segment_pipe\" in params.keys():\n del params[\"brain_segment_pipe\"]\n print(\"Deleting brain_segment_pipe\")\n \n pprint.pprint(params)\n \n # params_template\n assert (\"general\" in params.keys() and \\\n \"template_name\" in params[\"general\"].keys()), \\\n \"Error, the params.json should contains a general/template_name\"\n\n template_name = params[\"general\"][\"template_name\"]\n\n if \"general\" in params.keys() and \"my_path\" in params[\"general\"].keys():\n my_path = params[\"general\"][\"my_path\"]\n else:\n my_path = \"\"\n\n nmt_dir = load_test_data(template_name, path_to = my_path)\n params_template = format_template(nmt_dir, template_name)\n print (params_template)\n\n # soft\n wf_name += \"_{}\".format(soft)\n\n if mask_file is not None:\n wf_name += \"_mask\"\n\n assert \"spm\" in ssoft or \"spm12\" in ssoft or \"ants\" in ssoft, \\\n \"error with {}, should be among [spm12, spm, ants]\".format(ssoft)\n\n # main_workflow\n main_workflow = pe.Workflow(name= wf_name)\n\n main_workflow.base_dir = process_dir\n\n if \"spm\" in ssoft or \"spm12\" in ssoft:\n if 'native' in ssoft:\n space='native'\n else:\n space='template'\n\n segment_pnh_pipe = create_full_spm_subpipes(\n params_template=params_template, params=params, pad=pad,\n space=space)\n\n elif \"ants\" in ssoft:\n if \"template\" in ssoft:\n space=\"template\"\n else:\n space=\"native\"\n\n if \"t1\" in ssoft:\n segment_pnh_pipe = create_full_T1_ants_subpipes(\n params_template=params_template, params=params, space=space,\n pad=pad)\n else:\n segment_pnh_pipe = create_full_ants_subpipes(\n params_template=params_template, params=params,\n mask_file=mask_file, space=space, pad=pad)\n\n # list of all required outputs\n output_query = {}\n\n # T1 (mandatory, always added)\n output_query['T1'] = {\n \"datatype\": \"anat\", \"suffix\": \"T1w\",\n \"extension\": [\"nii\", \".nii.gz\"]\n }\n\n # T2 is optional, if \"_T1\" is added in the -soft arg\n if not 't1' in ssoft:\n output_query['T2'] = {\n \"datatype\": \"anat\", \"suffix\": \"T2w\",\n \"extension\": [\"nii\", \".nii.gz\"]}\n\n # FLAIR is optional, if \"_FLAIR\" is added in the -soft arg\n if 'flair' in ssoft:\n output_query['FLAIR'] = {\n \"datatype\": \"anat\", \"suffix\": \"FLAIR\",\n \"extension\": [\"nii\", \".nii.gz\"]}\n\n # MD and b0mean are optional, if \"_MD\" is added in the -soft arg\n if 'md' in ssoft:\n output_query['MD'] = {\n \"datatype\": \"dwi\", \"acquisition\": \"MD\", \"suffix\": \"dwi\",\n \"extension\": [\"nii\", \".nii.gz\"]}\n\n output_query['b0mean'] = {\n \"datatype\": \"dwi\", \"acquisition\": \"b0mean\", \"suffix\": \"dwi\",\n \"extension\": [\"nii\", \".nii.gz\"]}\n\n # indiv_params\n if indiv_params:\n datasource = create_datasource_indiv_params(\n output_query, data_dir, indiv_params, subjects, sessions,\n acquisitions, reconstructions)\n\n main_workflow.connect(datasource, \"indiv_params\",\n segment_pnh_pipe,'inputnode.indiv_params')\n else:\n datasource = create_datasource(\n output_query, data_dir, subjects, sessions, acquisitions,\n reconstructions)\n\n main_workflow.connect(datasource, 'T1',\n segment_pnh_pipe, 'inputnode.list_T1')\n\n if not \"t1\" in ssoft:\n main_workflow.connect(datasource, 'T2', \n segment_pnh_pipe, 'inputnode.list_T2')\n elif \"t1\" in ssoft and \"spm\" in ssoft:\n # cheating using T2 as T1\n main_workflow.connect(datasource, 'T1',\n segment_pnh_pipe, 'inputnode.list_T2')\n\n if \"flair\" in ssoft:\n\n transfo_FLAIR_pipe = create_transfo_FLAIR_pipe(params=params,\n params_template=params_template)\n\n if \"t1\" in ssoft:\n main_workflow.connect(segment_pnh_pipe, \"short_preparation_pipe.outputnode.preproc_T1\",\n transfo_FLAIR_pipe, 'inputnode.orig_T1')\n\n else:\n main_workflow.connect(segment_pnh_pipe, \"debias.t1_debiased_file\",\n transfo_FLAIR_pipe, 'inputnode.orig_T1')\n\n\n main_workflow.connect(segment_pnh_pipe, \"reg.transfo_file\",\n transfo_FLAIR_pipe, 'inputnode.lin_transfo_file')\n\n main_workflow.connect(datasource, ('FLAIR', get_first_elem),\n transfo_FLAIR_pipe, 'inputnode.FLAIR')\n\n if 'md' in ssoft:\n\n transfo_MD_pipe = create_transfo_MD_pipe(params=params,\n params_template=params_template)\n\n main_workflow.connect(segment_pnh_pipe,\n \"old_segment_pipe.outputnode.threshold_wm\",\n transfo_MD_pipe, 'inputnode.threshold_wm')\n\n main_workflow.connect(datasource, ('MD', get_first_elem),\n transfo_MD_pipe, 'inputnode.MD')\n\n main_workflow.connect(datasource, ('b0mean', get_first_elem),\n transfo_MD_pipe, 'inputnode.b0mean')\n\n main_workflow.connect(segment_pnh_pipe, \"debias.t1_debiased_file\",\n transfo_MD_pipe, 'inputnode.orig_T1')\n\n main_workflow.connect(segment_pnh_pipe, \"debias.t2_debiased_brain_file\",\n transfo_MD_pipe, 'inputnode.SS_T2')\n\n main_workflow.connect(segment_pnh_pipe, \"reg.transfo_file\",\n transfo_MD_pipe, 'inputnode.lin_transfo_file')\n\n main_workflow.connect(segment_pnh_pipe, \"reg.inv_transfo_file\",\n transfo_MD_pipe, 'inputnode.inv_lin_transfo_file')\n\n if deriv:\n\n datasink_name = os.path.join(\"derivatives\", \"macapype_\" + soft)\n\n if \"regex_subs\" in params.keys():\n params_regex_subs = params[\"regex_subs\"]\n else:\n params_regex_subs={}\n\n if \"subs\" in params.keys():\n params_subs = params[\"rsubs\"]\n else:\n params_subs={}\n\n datasink = create_datasink(iterables=datasource.iterables,\n name=datasink_name,\n params_subs=params_subs,\n params_regex_subs=params_regex_subs)\n\n datasink.inputs.base_directory = process_dir\n\n main_workflow.connect(\n segment_pnh_pipe, 'outputnode.brain_mask',\n datasink, '@brain_mask')\n\n main_workflow.connect(\n segment_pnh_pipe, 'outputnode.segmented_brain_mask',\n datasink, '@segmented_brain_mask')\n\n if 'flair' in ssoft :\n\n main_workflow.connect(\n transfo_FLAIR_pipe, 'outputnode.norm_FLAIR',\n datasink, '@norm_flair')\n\n main_workflow.write_graph(graph2use=\"colored\")\n main_workflow.config['execution'] = {'remove_unnecessary_outputs': 'false'}\n\n if nprocs is None:\n nprocs = 4\n\n if not \"test\" in ssoft:\n if \"seq\" in ssoft or nprocs==0:\n main_workflow.run()\n else:\n main_workflow.run(plugin='MultiProc',\n plugin_args={'n_procs' : nprocs})\ndef main():\n\n # Command line parser\n parser = argparse.ArgumentParser(\n description=\"PNH segmentation pipeline\")\n\n parser.add_argument(\"-data\", dest=\"data\", type=str, required=True,\n help=\"Directory containing MRI data (BIDS)\")\n parser.add_argument(\"-out\", dest=\"out\", type=str, #nargs='+',\n help=\"Output dir\", required=True)\n parser.add_argument(\"-soft\", dest=\"soft\", type=str,\n help=\"Sofware of analysis (SPM or ANTS are defined)\",\n required=True)\n parser.add_argument(\"-species\", dest=\"species\", type=str,\n help=\"Type of PNH to process\",\n required=False)\n parser.add_argument(\"-subjects\", \"-sub\", dest=\"sub\",\n type=str, nargs='+', help=\"Subjects\", required=False)\n parser.add_argument(\"-sessions\", \"-ses\", dest=\"ses\",\n type=str, nargs='+', help=\"Sessions\", required=False)\n parser.add_argument(\"-acquisitions\", \"-acq\", dest=\"acq\", type=str,\n nargs='+', default=None, help=\"Acquisitions\")\n parser.add_argument(\"-records\", \"-rec\", dest=\"rec\", type=str, nargs='+',\n default=None, help=\"Records\")\n parser.add_argument(\"-params\", dest=\"params_file\", type=str,\n help=\"Parameters json file\", required=False)\n parser.add_argument(\"-indiv_params\", \"-indiv\", dest=\"indiv_params_file\",\n type=str, help=\"Individual parameters json file\",\n required=False)\n parser.add_argument(\"-mask\", dest=\"mask_file\", type=str,\n help=\"precomputed mask file\", required=False)\n parser.add_argument(\"-nprocs\", dest=\"nprocs\", type=int,\n help=\"number of processes to allocate\", required=False)\n parser.add_argument(\"-deriv\", dest=\"deriv\", action='store_true',\n help=\"output derivatives in BIDS orig directory\",\n required=False)\n parser.add_argument(\"-pad\", dest=\"pad\", action='store_true',\n help=\"padding mask and seg_mask\",\n required=False)\n\n args = parser.parse_args()\n\n # main_workflow\n print(\"Initialising the pipeline...\")\n create_main_workflow(\n data_dir=args.data,\n soft=args.soft,\n process_dir=args.out,\n species=args.species,\n subjects=args.sub,\n sessions=args.ses,\n acquisitions=args.acq,\n reconstructions=args.rec,\n params_file=args.params_file,\n indiv_params_file=args.indiv_params_file,\n mask_file=args.mask_file,\n nprocs=args.nprocs,\n deriv=args.deriv,\n pad=args.pad)\n\nif __name__ == '__main__':\n main()\n","sub_path":"workflows/segment_pnh.py","file_name":"segment_pnh.py","file_ext":"py","file_size_in_byte":20458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516098296","text":"import time\n\nfrom hamcrest import assert_that, close_to, equal_to, same_instance\nimport mock\nimport pytest\n\nimport keeptabs.scheduler\n\ndef test_job_interface():\n job = keeptabs.scheduler.JobInterface()\n methods = [method for method in dir(job) if not method.startswith('__')]\n assert_that(set(methods), equal_to(set(['get_id', 'run'])))\n with pytest.raises(NotImplementedError):\n job.get_id()\n with pytest.raises(NotImplementedError):\n job.run()\n\ndef test_callback_job():\n callback = mock.Mock(return_value='Hello!')\n callback.func_name = 'greeter_function'\n job = keeptabs.scheduler.FunctionJob(callback)\n assert_that(job.get_id(), equal_to('greeter_function'))\n assert_that(job.run(), equal_to('Hello!'))\n callback.assert_called_with()\n\ndef test_combined_job():\n first_callback = mock.Mock(return_value=True)\n first_callback.func_name = 'first_callback'\n second_callback = mock.Mock(return_value=False)\n second_callback.func_name = 'second_callback'\n third_callback = mock.Mock()\n third_callback.func_name = 'third_callback'\n first_job = keeptabs.scheduler.FunctionJob(first_callback)\n second_job = keeptabs.scheduler.FunctionJob(second_callback)\n third_job = keeptabs.scheduler.FunctionJob(third_callback)\n combined_job = keeptabs.scheduler.CombinedJob([first_job, second_job, third_job])\n assert_that(combined_job.get_id(), equal_to('first_callback:second_callback:third_callback'))\n combined_job.run()\n first_callback.assert_called_with()\n second_callback.assert_called_with()\n third_callback.assert_not_called()\n\ndef test_constant_template():\n stub = mock.Mock()\n tpl = keeptabs.scheduler.make_constant_template(stub)\n assert_that(tpl({\"path\": \"/some/path\"}), same_instance(stub))\n assert_that(tpl({\"path\": \"/some/other/path\"}), same_instance(stub))\n\n@mock.patch('keeptabs.scheduler.subprocess.Popen')\ndef test_runs_immediately(popen):\n scheduler = keeptabs.scheduler.Scheduler()\n scheduler.schedule(keeptabs.scheduler.CommandJob(\"my command\"))\n scheduler.run_next()\n popen.assert_called_with(\"my command\", shell=True)\n\n@mock.patch('keeptabs.scheduler.subprocess.Popen')\ndef test_runs_later(popen):\n scheduler = keeptabs.scheduler.Scheduler()\n scheduler.debounce(.1)\n scheduler.schedule(keeptabs.scheduler.CommandJob(\"my command\"))\n scheduler.run_next()\n popen.assert_not_called()\n time.sleep(.1)\n scheduler.run_next()\n popen.assert_called_with(\"my command\", shell=True)\n\n@mock.patch('keeptabs.scheduler.subprocess.Popen')\ndef test_defers(popen):\n scheduler = keeptabs.scheduler.Scheduler()\n scheduler.debounce(.1)\n scheduler.schedule(keeptabs.scheduler.CommandJob(\"my command\"))\n time.sleep(.1)\n scheduler.schedule(keeptabs.scheduler.CommandJob(\"my command\"))\n scheduler.run_next()\n popen.assert_not_called()\n time.sleep(.1)\n scheduler.run_next()\n popen.assert_called_with(\"my command\", shell=True)\n\ndef test_returns_interval():\n scheduler = keeptabs.scheduler.Scheduler()\n scheduler.debounce(5)\n scheduler.schedule(keeptabs.scheduler.CommandJob(\"my command\"))\n interval = scheduler.run_next()\n assert_that(interval, close_to(5, .1))\n\ndef test_empty_queue_waits():\n scheduler = keeptabs.scheduler.Scheduler()\n scheduler.debounce(1000)\n assert_that(scheduler.run_next(), equal_to(1000))\n\ndef test_command_execution_times_out_and_reports_failure():\n job = keeptabs.scheduler.CommandJob(\"sleep 10\", timeout_seconds=.2)\n before = time.time()\n successful = job.run()\n elapsed = time.time() - before\n assert_that(elapsed, close_to(.2, .05))\n assert_that(successful, equal_to(False))\n","sub_path":"keeptabs/scheduler_test.py","file_name":"scheduler_test.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349793239","text":"# https://stackoverflow.com/questions/62436540/drag-and-drop-from-one-qlistview-to-another-qlistview\n\nimport sys\n\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\nfrom PyQt5.QtWidgets import QListView, QApplication, QWidget, QHBoxLayout\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n draggable_list_view = QListView()\n draggable_list_view.setDragEnabled(True)\n model1 = QStandardItemModel()\n draggable_list_view.setModel(model1)\n for it in [\"yo\", \"yi\", \"ya\"]:\n item = QStandardItem(it)\n model1.appendRow(item)\n\n droppable_list_view = QListView()\n droppable_list_view.setAcceptDrops(True)\n droppable_list_view.setDropIndicatorShown(True)\n model2 = QStandardItemModel()\n droppable_list_view.setModel(model2)\n\n lay = QHBoxLayout(self)\n lay.addWidget(draggable_list_view)\n lay.addWidget(droppable_list_view)\n\n self.setGeometry(300, 300, 300, 150)\n\n\ndef main():\n\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n app.exec_()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"200618-01-lw-drag.py","file_name":"200618-01-lw-drag.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349728191","text":"\n\nfrom rest_framework.views import APIView\nfrom django.http import JsonResponse\nfrom hr.models import Notification\n\n\n\nclass CheckDataUpdates(APIView):\n\t'''\n\tCheck for new notifications. Front-end checks for those messages automatically. We use Notification Model.\n\n\tWe just ntofiy the user there are some changes in the Catalog or HR structure of hte system so he updates the system; not the individual messages\n\t'''\n\tpermission_classes = []\n\n\tdef get(self,request,format=None):\n\t\treply={'data':0}\n\t\tif Notification.objects.filter(is_counted=0).exclude(user_id=request.user.id).exists():\n\t\t\treply['data']=1\n\t\t\n\t\tNotification.objects.filter(is_counted=0).update(is_counted=1)\n\n\t\treturn JsonResponse(reply,status=200)","sub_path":"logisticsapp/logapp/messaging/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189303220","text":"import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nurl = 'https://dhlottery.co.kr/gameResult.do?method=byWin'\n\ndef is_lotto_number(n):\n try:\n n = int(n)\n except:\n raise Exception('Value Error')\n \n is_integer = type(n) == type(0)\n is_bigger_then_0 = n > 0\n is_smaller_then_46 = n < 46\n if not (is_integer and is_bigger_then_0 and is_smaller_then_46):\n raise Exception('Number error')\n\ndef get_html_through_bs():\n response = requests.get(url).text\n bs = BeautifulSoup(response, 'html.parser')\n return bs\n\ndef get_round():\n bs = get_html_through_bs()\n return int(bs.select_one('h4 strong').text[:-1])\n\ndef get_new_data():\n bs = get_html_through_bs()\n win_result = bs.select_one('div.win_result')\n table_data = bs.select_one('table.tbl_data')\n\n try:\n num_data = win_result.select('.num')\n\n n = map(lambda x:x.text ,num_data[0].select('span'))\n num1, num2, num3, num4, num5, num6 = n\n bonus = num_data[1].select_one('span').text\n except Exception as exc:\n print(exc)\n return -1\n\n try:\n for num in n:\n is_lotto_number(num)\n is_lotto_number(bonus)\n except Exception as exc:\n print(exc)\n return -2\n\n round = int(win_result.select_one('h4 strong').text[:-1])\n \n date_string = win_result.select_one('p.desc').text.strip('(').strip(')').strip('추첨').strip().split()\n date_elements = map(lambda x:int(x[:-1]), date_string)\n date = datetime.date(*date_elements)\n \n first, second, third, fourth, fifth = map(lambda x: x.select('td') ,table_data.select('tbody tr'))\n \n first_count = int(''.join(first[2].text.split(',')))\n first_price = int(''.join(first[3].text.strip('원').split(',')))\n \n second_count = int(''.join(second[2].text.split(',')))\n second_price = int(''.join(second[3].text.strip('원').split(',')))\n \n third_count = int(''.join(third[2].text.split(',')))\n third_price = int(''.join(third[3].text.strip('원').split(',')))\n\n fourth_count = int(''.join(fourth[2].text.split(',')))\n fourth_price = int(''.join(fourth[3].text.strip('원').split(',')))\n \n fifth_count = int(''.join(fifth[2].text.split(',')))\n fifth_price = int(''.join(fifth[3].text.strip('원').split(',')))\n\n result_data = {\n 'round': round,\n 'date': date,\n 'first_count': first_count,\n 'first_price': first_price,\n 'second_count': second_count,\n 'second_price': second_price,\n 'third_count': third_count,\n 'third_price': third_price,\n 'fourth_count': fourth_count,\n 'fourth_price': fourth_price,\n 'fifth_count': fifth_count,\n 'fifth_price': fifth_price,\n 'num1': num1,\n 'num2': num2,\n 'num3': num3,\n 'num4': num4,\n 'num5': num5,\n 'num6': num6,\n 'bonus': bonus\n }\n\n return result_data\n","sub_path":"router/util/scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234560523","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 23 14:13:36 2017\n\n@author: lishiwang\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas import Series, DataFrame, Panel\nfrom datetime import datetime\nimport os\n\nfrom data import data\nfrom db_engine import db_engine\n\n# 维护数据库的类\n\nclass database(object):\n \"\"\" This is the class of handling database, including fetch and update data from database\n\n foo\n \"\"\"\n def __init__(self, *, start_date=None, end_date=pd.Timestamp(datetime.now().date().strftime('%Y-%m-%d')),\n market=\"83\"):\n # 储存交易日表\n self.trading_days = pd.Series()\n # 数据库取出来后整理成型的数据\n self.data = data()\n # 聚源数据库的引擎\n self.jydb_engine = None\n # smart quant数据库引擎,取常用的行情数据\n self.sq_engine = None\n # smart quant数据库中取出的数据\n self.sq_data = pd.DataFrame()\n # 朝阳永续数据库引擎,取分析师预期数据\n self.gg_engine = None\n # 所取数据的开始、截止日期,市场代码\n self.start_date = start_date\n self.end_date = end_date\n self.market = market\n # 标记本次取数据是否为更新数据\n self.is_update = False\n\n # 初始化jydb\n def initialize_jydb(self):\n self.jydb_engine = db_engine(server_type='mssql', driver='pymssql', username='lishi.wang', password='Zhengli1!',\n server_ip='192.168.66.12', port='1433', db_name='JYDB', add_info='')\n\n # 初始化sq\n def initialize_sq(self):\n self.sq_engine = db_engine(server_type='mssql', driver='pymssql', username='lishi.wang', password='Zhengli1!',\n server_ip='192.168.66.12', port='1433', db_name='SmartQuant', add_info='')\n\n # 初始化zyyx\n def initialize_gg(self):\n self.gg_engine = db_engine(server_type='mssql', driver='pymssql', username='lishi.wang', password='Zhengli1!',\n server_ip='192.168.66.12', port='1433', db_name='GOGOAL', add_info='')\n\n # 取交易日表\n def get_trading_days(self):\n sql_query = \"select TradingDate as trading_days from QT_TradingDayNew where SecuMarket=\" +\\\n self.market +\" and IfTradingDay=1 \"\n # 如果指定了开始结束日期,则选取开始结束日期之间的交易日\n if isinstance(self.start_date, pd.Timestamp):\n sql_query = sql_query + \"and TradingDate>=\" + \"'\" + str(self.start_date) + \"' \"\n if isinstance(self.end_date, pd.Timestamp):\n sql_query = sql_query + \"and TradingDate<=\" + \"'\" + str(self.end_date) + \"' \"\n sql_query = sql_query + 'order by trading_days'\n\n # 取数据\n trading_days = self.jydb_engine.get_original_data(sql_query)\n self.trading_days = trading_days['trading_days']\n\n # 设定数据的index和columns,index以交易日表为准,columns以sq中的return daily里的股票为准\n def get_labels(self):\n sql_query = \"select distinct SecuCode from ReturnDaily where TradingDay <= '\" + \\\n str(self.trading_days.iloc[-1]) + \"' order by SecuCode\"\n column_label = self.sq_engine.get_original_data(sql_query)\n column_label = column_label.ix[:, 0]\n index_label = self.trading_days\n\n # data中的所有交易日和股票数据都以这两个label为准,包括benchmark\n self.data.stock_price = pd.Panel(major_axis=index_label, minor_axis=column_label)\n self.data.raw_data = pd.Panel(major_axis=index_label, minor_axis=column_label)\n self.data.benchmark_price = pd.Panel(major_axis=index_label, minor_axis=column_label)\n self.data.if_tradable = pd.Panel(major_axis=index_label, minor_axis=column_label)\n self.data.const_data = pd.DataFrame(index=index_label)\n\n # 取ClosePrice_adj数据,将data中的panel数据index和columns都设置为ClosePrice_adj的index和columns\n # 先将所有的数据都取出来,之后不用再次从sq中取\n def get_sq_data(self):\n sql_query = \"select TradingDay, SecuCode, OpenPrice, HighPrice, LowPrice, ClosePrice, PrevClosePrice, \"\\\n \"TurnoverVolume as Volume, TurnoverValue, TotalShares as Shares, \" \\\n \"NonRestrictedShares as FreeShares, MarketCap as MarketValue, \" \\\n \"FloatMarketCap as FreeMarketValue, IndustryNameNew as Industry, \"\\\n \"IfSuspended as is_suspended \"\\\n \"from ReturnDaily where \"\\\n \"IfTradingDay=1 and TradingDay>='\" + str(self.trading_days.iloc[0]) + \"' and TradingDay<='\" + \\\n str(self.trading_days.iloc[-1]) + \"' order by TradingDay, SecuCode\"\n self.sq_data = self.sq_engine.get_original_data(sql_query)\n\n # 提取sq_data里所需要的各种数据\n self.get_ochl()\n self.get_PrevClosePrice()\n self.get_Volume()\n self.get_value_and_vwap()\n self.get_total_and_free_mv()\n self.get_total_and_free_shares()\n self.get_Industry()\n self.get_is_suspended()\n\n # 取open,close, high, low的价格数据\n def get_ochl(self):\n ochl = ['OpenPrice', 'ClosePrice', 'HighPrice', 'LowPrice']\n for data_name in ochl:\n curr_data = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values=data_name)\n self.data.stock_price[data_name] = curr_data\n\n # 取PrevClosePrice, 可用来算涨跌停价格, 也可用来算日收益率(后复权)\n def get_PrevClosePrice(self):\n PrevClosePrice = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='PrevClosePrice')\n self.data.stock_price['PrevClosePrice'] = PrevClosePrice\n\n # 取volumne\n def get_Volume(self):\n Volume = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='Volume')\n self.data.stock_price['Volume'] = Volume\n\n # 取turnover value以及vwap\n def get_value_and_vwap(self):\n TurnoverValue = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='TurnoverValue')\n self.data.stock_price['TurnoverValue'] = TurnoverValue\n vwap = self.data.stock_price['TurnoverValue'].div(self.data.stock_price['Volume'])\n self.data.stock_price['vwap'] = vwap\n\n # 取total shares和free shares\n def get_total_and_free_shares(self):\n Shares = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='Shares')\n self.data.stock_price['Shares'] = Shares\n FreeShares = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='FreeShares')\n self.data.stock_price['FreeShares'] = FreeShares\n\n # 取total mv和free mv\n def get_total_and_free_mv(self):\n MarketValue = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='MarketValue')\n self.data.stock_price['MarketValue'] = MarketValue\n FreeMarketValue = self.sq_data.pivot_table(index='TradingDay', columns='SecuCode', values='FreeMarketValue')\n self.data.stock_price['FreeMarketValue'] = FreeMarketValue\n\n # 取行业标签\n def get_Industry(self):\n Industry = self.sq_data.pivot(index='TradingDay', columns='SecuCode', values='Industry')\n self.data.raw_data['Industry'] = Industry\n\n # 取是否停牌\n def get_is_suspended(self):\n is_suspended = self.sq_data.pivot(index='TradingDay', columns='SecuCode', values='is_suspended')\n self.data.if_tradable['is_suspended'] = is_suspended\n\n # 从聚源数据库里取复权因子\n def get_AdjustFactor(self, *, first_date=pd.Timestamp('1900-01-01')):\n sql_query = \"select b.ExDiviDate, b.RatioAdjustingFactor, a.SecuCode from \" \\\n \"(select distinct InnerCode, SecuCode from SmartQuant.dbo.ReturnDaily) a \" \\\n \"left join (select * from JYDB.dbo.QT_AdjustingFactor where ExDiviDate >='\" + \\\n str(first_date) + \"' and ExDiviDate <='\" + str(self.trading_days.iloc[-1]) + \"') b \" \\\n \"on a.InnerCode=b.InnerCode \" \\\n \"order by SecuCode, ExDiviDate\"\n AdjustFactor = self.jydb_engine.get_original_data(sql_query)\n AdjustFactor = AdjustFactor.pivot_table(index='ExDiviDate', columns='SecuCode', values='RatioAdjustingFactor',\n aggfunc='first')\n\n # 要处理更新数据的时候可能出现的空数据的情况\n if AdjustFactor.empty:\n self.data.stock_price['AdjustFactor'] = np.nan\n # 如果是有数据, 但是是在更新数据, 就不在这里计算复权因子, 因为更新数据时,\n # 在不衔接新旧停牌数据的情况下计算的复权因子是不对的, 因此要在衔接后重新计算, 这里就不用再浪费时间计算了\n elif self.is_update:\n self.data.stock_price['AdjustFactor'] = AdjustFactor.fillna(method='ffill')\n self.data.stock_price['AdjustFactor'] = self.data.stock_price['AdjustFactor'].fillna(method='ffill')\n else:\n # 为了应对停牌期间复权因子发生变化的情况, 这里要取所有的停牌标记\n # 因为如果停牌标记只从取数据那天开始(2007-01-04), 则如果在此之前也一直在停牌,\n # 并且停牌期间复权因子已经发生变化的股票, 仍然会用已经变化的复权因子来进行计算(因为停牌标记只从07-01-04开始)\n # 尽管这些股票, 因为一来买不进去的缘故, 最终不会影响策略的回测结果, 但是它们错误的当天收益率,\n # 可能会影响一些其他指标的计算(例如, beta的计算)\n # 总之, 最后要实现的是把停牌期间因为复权因子变化带来的收益都挪到复牌第一天实现\n sql_query_sus = \"select TradingDay, SecuCode, IfSuspended as is_suspended from ReturnDaily where \" \\\n \"IfTradingDay=1 and TradingDay >= '\" + str(first_date) + \"' and TradingDay <= ' \" + \\\n str(self.trading_days.iloc[-1]) + \"' order by TradingDay, SecuCode\"\n suspended_mark = self.sq_engine.get_original_data(sql_query_sus)\n suspended_mark = suspended_mark.pivot_table(index='TradingDay', columns='SecuCode', values='is_suspended')\n # 首先将复权因子向前填充, 然后reindex到停牌数据上去\n AdjustFactor = AdjustFactor.fillna(method='ffill').reindex(index=suspended_mark.index,\n columns=suspended_mark.columns, method='ffill')\n # 对于停牌期间的股票, 要将它们的复权因子数据都改为nan\n AdjustFactor = AdjustFactor.where(np.logical_not(suspended_mark), np.nan)\n # 然后用停牌前最后一天的复权因子向前填充, 将停牌期间的复权因子都设置为停牌前最后一天的复权因子\n AdjustFactor = AdjustFactor.fillna(method='ffill')\n\n # 将数据直接储存进data.stock_price, 可以自动reindex, 最后再将nan填充成1\n self.data.stock_price['AdjustFactor'] = AdjustFactor.fillna(1)\n pass\n\n # 复权因子后, 计算调整后的价格\n def get_ochl_vwap_adj(self):\n ochl = ['OpenPrice', 'ClosePrice', 'HighPrice', 'LowPrice', 'vwap']\n for data_name in ochl:\n curr_data_adj = self.data.stock_price[data_name].mul(self.data.stock_price['AdjustFactor'])\n self.data.stock_price[data_name + '_adj'] = curr_data_adj\n pass\n\n\n # 取上市退市标记,即if_enlisted & if_delisted\n # 如果是第一次取数据(非更新数据)一些数据(包括财务数据)的起始日期并不是第一个交易日,\n # 即第一个交易日的数据在数据库里并不是标记为这个交易日的数据\n # 而���之前的数据,因此在非更新数据的情况下,起始日期选取为一个最小日期,以保证取到所有数据\n def get_list_status(self, *, first_date=pd.Timestamp('1900-01-01')):\n sql_query = \"select a.SecuCode, b.ChangeDate, b.ChangeType from \"\\\n \"(select distinct InnerCode, SecuCode from SmartQuant.dbo.ReturnDaily) a \" \\\n \"left join (select ChangeDate, ChangeType, InnerCode from LC_ListStatus where SecuMarket in \" \\\n \"(83,90) and ChangeDate>='\" + str(first_date) + \"' and ChangeDate<='\" + \\\n str(self.trading_days.iloc[-1]) + \"') b on a.InnerCode=b.InnerCode \"\\\n \" order by SecuCode, ChangeDate\"\n list_status = self.jydb_engine.get_original_data(sql_query)\n list_status = list_status.pivot_table(index='ChangeDate',columns='SecuCode',values='ChangeType',\n aggfunc='first')\n # 更新数据的时候可能出现更新时间段没有新数据的情况,要处理这种情况\n if list_status.empty:\n list_status = pd.DataFrame(np.nan, index=self.data.stock_price.major_axis,\n columns=self.data.stock_price.minor_axis)\n # 向前填充\n list_status = list_status.fillna(method='ffill')\n\n # 上市标记为1,找到那些为1的,然后将false全改为nan,再向前填充true,即可得到is_enlisted\n # 即一旦上市后,之后的is_enlisted都为true\n is_enlisted = list_status == 1\n is_enlisted = is_enlisted.replace(False, np.nan)\n is_enlisted = is_enlisted.fillna(method='ffill')\n # 将时间索引和标准时间索引对齐,向前填充\n is_enlisted = is_enlisted.reindex(self.data.stock_price.major_axis, method='ffill')\n # 将股票索引对其,以保证fillna时可以填充所有的股票\n is_enlisted = is_enlisted.reindex(columns=self.data.stock_price.minor_axis)\n # 股票上市前会变成nan,它们未上市,因此将它们填成false\n # 更新的时候,那些一列全是nan的不能填,要等衔接旧数据时填\n if self.is_update:\n is_enlisted = is_enlisted.apply(lambda x:x if x.isnull().all() else x.fillna(0), axis=0)\n else:\n is_enlisted = is_enlisted.fillna(0).astype(np.int)\n\n # 退市标记为4, 找到那些为4的,然后将false改为nan,向前填充true,即可得到is_delisted\n # 即一旦退市之后,之后的is_delisted都为true\n # 退市准备期标记为6,都在4的前面,其他标记9,也在4的前面,而且两者数量很少,暂不考虑\n is_delisted = list_status == 4\n is_delisted = is_delisted.replace(False, np.nan)\n is_delisted = is_delisted.fillna(method='ffill')\n # 将时间索引和标准时间索引对齐,向前填充\n is_delisted = is_delisted.reindex(self.data.stock_price.major_axis, method='ffill')\n # 将股票索引对其,以保证fillna时可以填充所有的股票\n is_delisted = is_delisted.reindex(columns=self.data.stock_price.minor_axis)\n # 未退市过的股票,因为没有出现过4,会出现全是nan的情况,将它们填成false\n # 股票退市前会变成nan,它们未退市,依然填成false\n # 更新的时候,那些一列全是nan的不能填,要等衔接旧数据时填\n if self.is_update:\n is_delisted = is_delisted.apply(lambda x:x if x.isnull().all() else x.fillna(0), axis=0)\n else:\n is_delisted = is_delisted.fillna(0).astype(np.int)\n\n self.data.if_tradable['is_enlisted'] = is_enlisted\n self.data.if_tradable['is_delisted'] = is_delisted\n\n # 取总资产,总负债和所有者权益\n # 取合并报表,即if_merged = 1\n # 报表会进行调整因此每个时间点上可能会有多个不同时间段的报表,类似于前复权\n def get_asset_liability_equity(self, *, first_date=pd.Timestamp('1900-01-01')):\n sql_query = \"select b.InfoPublDate, b.EndDate, a.SecuCode, b.TotalAssets, b.TotalLiability, \"\\\n \"b.TotalEquity from (\"\\\n \"select distinct CompanyCode, SecuCode from SmartQuant.dbo.ReturnDaily) a \" \\\n \"left join (select InfoPublDate, EndDate, CompanyCode, TotalAssets, TotalLiability, \" \\\n \"TotalShareholderEquity as TotalEquity from LC_BalanceSheetAll where IfMerged=1 \"\\\n \"and InfoPublDate>='\" + str(first_date) + \"' and InfoPublDate<='\" + \\\n str(self.trading_days.iloc[-1]) + \"') b on a.CompanyCode=b.CompanyCode \"\\\n \" order by InfoPublDate, SecuCode, EndDate\"\n balance_sheet_data = self.jydb_engine.get_original_data(sql_query)\n\n # 对资产负债和所有者权益,只取每个时间点上最近的那一期报告,\n # 因为每个时间点上只会使���当前时间点的最新值,不是涉及变化率的计算\n recent_data = balance_sheet_data.groupby(['InfoPublDate', 'SecuCode'],as_index=False).nth(-1)\n\n # 更新数据的时候可能出现更新时间段没有新数据的情况,要处理这种情况\n TotalAssets = recent_data.pivot_table(index='InfoPublDate', columns='SecuCode', values='TotalAssets',\n aggfunc='first')\n if TotalAssets.empty:\n TotalAssets = pd.DataFrame(np.nan, index=self.data.stock_price.major_axis,\n columns=self.data.stock_price.minor_axis)\n else:\n TotalAssets = TotalAssets.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n TotalLiability = recent_data.pivot_table(index='InfoPublDate', columns='SecuCode', values='TotalLiability',\n aggfunc='first')\n if TotalLiability.empty:\n TotalLiability = pd.DataFrame(np.nan, index=self.data.stock_price.major_axis,\n columns=self.data.stock_price.minor_axis)\n else:\n TotalLiability = TotalLiability.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n TotalEquity = recent_data.pivot_table(index='InfoPublDate', columns='SecuCode', values='TotalEquity',\n aggfunc='first')\n if TotalEquity.empty:\n TotalEquity = pd.DataFrame(np.nan, index=self.data.stock_price.major_axis,\n columns=self.data.stock_price.minor_axis)\n else:\n TotalEquity = TotalEquity.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n\n self.data.raw_data['TotalAssets'] = TotalAssets\n self.data.raw_data['TotalLiability'] = TotalLiability\n self.data.raw_data['TotalEquity'] = TotalEquity\n\n # 计算pb\n def get_pb(self):\n pb = self.data.stock_price.ix['MarketValue']/self.data.raw_data.ix['TotalEquity']\n self.data.raw_data['PB'] = pb\n\n # 取一致预期净利润\n def get_ni_fy1_fy2(self):\n sql_query = \"select STOCK_CODE, CON_DATE, C4*10000 as NI, \"\\\n \"ROW_NUMBER() over (partition by stock_code, con_date order by rpt_date) as fy from \"\\\n \"CON_FORECAST_STK where C4_TYPE!=0 and con_date>='\" + str(self.trading_days.iloc[0]) + \\\n \"' and con_date<='\" + str(self.trading_days.iloc[-1]) + \\\n \"' order by stock_code, con_date, rpt_date\"\n forecast_ni = self.gg_engine.get_original_data(sql_query)\n grouped_data = forecast_ni.groupby(['CON_DATE', 'STOCK_CODE'], as_index=False)\n fy1_data = grouped_data.nth(0)\n fy2_data = grouped_data.nth(1)\n ni_fy1 = fy1_data.pivot_table(index='CON_DATE', columns='STOCK_CODE', values='NI')\n ni_fy2 = fy2_data.pivot_table(index='CON_DATE', columns='STOCK_CODE', values='NI')\n ni_fy1 = ni_fy1.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n ni_fy2 = ni_fy2.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n\n self.data.raw_data['NetIncome_fy1'] = ni_fy1\n self.data.raw_data['NetIncome_fy2'] = ni_fy2\n\n # 取一致预期eps\n def get_eps_fy1_fy2(self):\n sql_query = \"select STOCK_CODE, CON_DATE, C1 as EPS, \" \\\n \"ROW_NUMBER() over (partition by stock_code, con_date order by rpt_date) as fy from \" \\\n \"CON_FORECAST_STK where CON_TYPE!=0 and con_date>='\" + str(self.trading_days.iloc[0]) + \\\n \"' and con_date<='\" + str(self.trading_days.iloc[-1]) + \\\n \"' order by stock_code, con_date, rpt_date\"\n forecast_eps = self.gg_engine.get_original_data(sql_query)\n grouped_data = forecast_eps.groupby(['CON_DATE', 'STOCK_CODE'], as_index=False)\n fy1_data = grouped_data.nth(0)\n fy2_data = grouped_data.nth(1)\n eps_fy1 = fy1_data.pivot_table(index='CON_DATE', columns='STOCK_CODE', values='EPS')\n eps_fy2 = fy2_data.pivot_table(index='CON_DATE', columns='STOCK_CODE', values='EPS')\n eps_fy1 = eps_fy1.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n eps_fy2 = eps_fy2.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n\n self.data.raw_data['EPS_fy1'] = eps_fy1\n self.data.raw_data['EPS_fy2'] = eps_fy2\n\n # 取cash earnings ttm\n def get_cash_related_ttm(self):\n sql_query = \"set query_governor_cost_limit 0\"\\\n \"select b.DataDate, a.SecuCode, b.cash_earnings_ttm, b.cfo_ttm from \" \\\n \"(select distinct InnerCode, SecuCode from ReturnDaily) a left join \" \\\n \"(select DataDate, CashEquivalentIncrease as cash_earnings_ttm, InnerCode, \" \\\n \"NetOperateCashFlow as cfo_ttm from \" \\\n \"TTM_LC_CashFlowStatementAll where DataDate>='\" + str(self.trading_days.iloc[0]) + \\\n \"' and DataDate<='\" + str(self.trading_days.iloc[-1]) + \"') b \" \\\n \"on a.InnerCode=b.InnerCode order by DataDate, SecuCode\"\n ttm_data = self.sq_engine.get_original_data(sql_query)\n cash_earnings_ttm = ttm_data.pivot_table(index='DataDate', columns='SecuCode', values='cash_earnings_ttm')\n cash_earnings_ttm = cash_earnings_ttm.fillna(method='ffill').reindex(self.data.stock_price.major_axis,\n method='ffill')\n self.data.raw_data['CashEarnings_ttm'] = cash_earnings_ttm\n cfo_ttm = ttm_data.pivot_table(index='DataDate', columns='SecuCode', values='cfo_ttm')\n cfo_ttm = cfo_ttm.fillna(method='ffill').reindex(self.data.stock_price.major_axis, method='ffill')\n self.data.raw_data['CFO_ttm'] = cfo_ttm\n\n # 取net income ttm\n def get_ni_ttm(self):\n sql_query = \"set query_governor_cost_limit 0\"\\\n \"select b.DataDate, a.SecuCode, b.ni_ttm from \" \\\n \"(select distinct InnerCode, SecuCode from ReturnDaily) a left join \" \\\n \"(select DataDate, NetProfit as ni_ttm, InnerCode from TTM_LC_IncomeStatementAll \" \\\n \"where DataDate>='\" + str(self.trading_days.iloc[0]) + \"' and DataDate<='\" + \\\n str(self.trading_days.iloc[-1]) + \"') b on a.InnerCode=b.InnerCode \" \\\n \"order by DataDate, SecuCode\"\n ttm_data = self.sq_engine.get_original_data(sql_query)\n ni_ttm = ttm_data.pivot_table(index='DataDate', columns='SecuCode', values='ni_ttm')\n ni_ttm = ni_ttm.fillna(method='ffill').reindex(self.data.stock_price.major_axis,\n method='ffill')\n self.data.raw_data['NetIncome_ttm'] = ni_ttm\n\n # 计算pe ttm\n def get_pe_ttm(self):\n pe_ttm = self.data.stock_price.ix['MarketValue']/self.data.raw_data.ix['NetIncome_ttm']\n self.data.raw_data['PE_ttm'] = pe_ttm\n\n # 取ni ttm, revenue ttm, eps_ttm的两年增长率\n def get_ni_revenue_eps_growth(self):\n sql_query = \"set query_governor_cost_limit 0\"\\\n \"select b.DataDate, a.SecuCode, b.EndDate, b.ni_ttm, b.revenue_ttm, b.eps_ttm from \" \\\n \"(select distinct InnerCode, SecuCode from ReturnDaily) a \" \\\n \"left join (select DataDate, InnerCode, EndDate, NetProfit as ni_ttm, \" \\\n \"TotalOperatingRevenue as revenue_ttm, BasicEPS as eps_ttm from TTM_LC_IncomeStatementAll_8Q \" \\\n \"where DataDate>='\" + str(self.trading_days.iloc[0]) + \\\n \"' and DataDate<='\" + str(self.trading_days.iloc[-1]) + \"') b \" \\\n \"on a.InnerCode=b.InnerCode order by DataDate, SecuCode, EndDate\"\n ttm_data_8q = self.sq_engine.get_original_data(sql_query)\n # 两年增长率,直接用每个时间点上的当前quarter的ttm数据除以8q以前的ttm数据减一\n grouped_data = ttm_data_8q.groupby(['DataDate', 'SecuCode'])\n # 定义计算两年增长率的函数\n from strategy_data import strategy_data\n def calc_growth(s):\n # 数据的期数, 有些数据可能并没有8期\n no_of_data = s.shape[0]\n # 根据数据的期数计算annualized term\n ann_term = no_of_data/4\n growth = strategy_data.get_ni_growth(s, lag=no_of_data-1, annualize_term=ann_term)\n return growth.iloc[-1]\n growth_data = grouped_data['ni_ttm','revenue_ttm','eps_ttm'].apply(calc_growth)\n time_index = growth_data.index.get_level_values(0)\n stock_index = growth_data.index.get_level_values(1)\n\n ni_ttm_growth_8q = growth_data.pivot_table(index=time_index, columns=stock_index, values='ni_ttm')\n ni_ttm_growth_8q = ni_ttm_growth_8q.fillna(method='ffill').reindex(self.data.stock_price.major_axis,\n method='ffill').replace(np.inf, np.nan)\n revenue_ttm_growth_8q = growth_data.pivot_table(index=time_index, columns=stock_index, values='revenue_ttm')\n revenue_ttm_growth_8q = revenue_ttm_growth_8q.fillna(method='ffill').reindex(self.data.stock_price.major_axis,\n method='ffill').replace(np.inf, np.nan)\n eps_ttm_growth_8q = growth_data.pivot_table(index=time_index, columns=stock_index, values='eps_ttm')\n eps_ttm_growth_8q = eps_ttm_growth_8q.fillna(method='ffill').reindex(self.data.stock_price.major_axis,\n method='ffill').replace(np.inf, np.nan)\n self.data.raw_data['NetIncome_ttm_growth_8q'] = ni_ttm_growth_8q\n self.data.raw_data['Revenue_ttm_growth_8q'] = revenue_ttm_growth_8q\n\n self.data.raw_data['EPS_ttm_growth_8q'] = eps_ttm_growth_8q\n\n # 取指数行情数据\n def get_index_price(self):\n sql_query = \"select b.TradingDay, a.SecuCode, b.ClosePrice, b.OpenPrice from \"\\\n \"(select distinct InnerCode, SecuCode from SecuMain \"\\\n \"where SecuCode in ('000016','000300','000902','000905','000906','H00016','H00300',\" \\\n \"'H00905','H00906', '399005', '399006', '399333', '399606') and SecuCategory=4) a \"\\\n \"left join (select InnerCode, TradingDay, ClosePrice, OpenPrice from QT_IndexQuote \"\\\n \"where TradingDay>='\" + str(self.trading_days.iloc[0]) + \"' and TradingDay<='\" + \\\n str(self.trading_days.iloc[-1]) + \"') b \"\\\n \"on a.InnerCode=b.InnerCode order by TradingDay, SecuCode\"\n index_data = self.jydb_engine.get_original_data(sql_query)\n index_close_price = index_data.pivot_table(index='TradingDay', columns='SecuCode', values='ClosePrice')\n index_close_price = index_close_price.reindex(self.data.stock_price.major_axis)\n index_open_price = index_data.pivot_table(index='TradingDay', columns='SecuCode', values='OpenPrice')\n index_open_price = index_open_price.reindex(self.data.stock_price.major_axis)\n # 鉴于指数行情的特殊性,将指数行情都存在benchmark price中的每个item的第一列\n index_name = {'000016': 'sz50', '000300': 'hs300', '000902': 'zzlt',\n '000905': 'zz500', '000906': 'zz800', '399005': 'zxb', '399006': 'cyb'}\n for key in index_name:\n self.data.benchmark_price.ix['ClosePrice_'+index_name[key], :, 0] = index_close_price[key].values\n self.data.benchmark_price.ix['OpenPrice_'+index_name[key], :, 0] = index_open_price[key].values\n # 全收益指数只有收盘价, 而且没有中证流通指数的全收益\n index_adj_name = {'H00016':'adj_sz50', 'H00300':'adj_hs300', 'H00905':'adj_zz500', 'H00906':'adj_zz800',\n '399333':'adj_zxb', '399606': 'adj_cyb'}\n for key in index_adj_name:\n self.data.benchmark_price.ix['ClosePrice_'+index_adj_name[key], :, 0] = index_close_price[key].values\n pass\n\n # 取指数权重数据\n def get_index_weight(self, *, first_date=pd.Timestamp('1900-01-01')):\n # 一些不那么重要的指数的权重数据,如中小板, 创业板指数, 没有放入SmartQuant里, 因此还是需要从这里取\n # 这些不重要的指数主要是用作研究用, 实际上并不能做空这些指数\n sql_query_minor = \"select b.EndDate, a.SecuCode as index_code, c.SecuCode as comp_code, \" \\\n \"b.Weight/100 as Weight from (select distinct InnerCode, SecuCode from SecuMain \"\\\n \"where SecuCode in ('399005', '399006') and SecuCategory=4) a \"\\\n \"left join (select EndDate, IndexCode, InnerCode, Weight from LC_IndexComponentsWeight \"\\\n \"where EndDate>='\" + str(first_date) + \"' and EndDate<='\" + \\\n str(self.trading_days.iloc[-1]) + \"') b \"\\\n \"on a.InnerCode=b.IndexCode \"\\\n \"left join (select distinct InnerCode, SecuCode from SecuMain) c \"\\\n \"on b.InnerCode=c.InnerCode \"\\\n \"order by EndDate, index_code, comp_code \"\n weight_data_minor = self.jydb_engine.get_original_data(sql_query_minor)\n\n # 从衔接了聚源数据和国泰安数据(2015年后)的表中取指数权重数据), 这个数据从2015年开始就有日度的指数权重数据了\n sql_query = \"select b.EndDate, a.SecuCode as index_code, c.SecuCode as comp_code, \" \\\n \"b.Weight/100 as Weight from (select distinct InnerCode, SecuCode from \" \\\n \"SecuMain where SecuCode in ('000016','000300','000902','000905','000906') \" \\\n \"and SecuCategory=4) a left join (select EndDate, IndexInnerCode, SecuInnerCode, \" \\\n \"Weight from SmartQuant.dbo.IndexComponentWeight where EndDate>='\" + \\\n str(first_date) + \"' and EndDate<='\" + str(self.trading_days.iloc[-1]) + \\\n \"') b on a.InnerCode=b.IndexInnerCode left join (select distinct InnerCode, \" \\\n \"SecuCode from SecuMain) c on b.SecuInnerCode=c.InnerCode \" \\\n \"order by EndDate, index_code, comp_code\"\n weight_data = self.jydb_engine.get_original_data(sql_query)\n # 把两组数据衔接起来一起使用\n weight_data = weight_data.append(weight_data_minor)\n index_weight = weight_data.pivot_table(index='EndDate', columns=['index_code', 'comp_code'],\n values='Weight', aggfunc='first')\n\n index_name = {'000016': 'sz50', '000300': 'hs300', '000902': 'zzlt',\n '000905': 'zz500', '000906': 'zz800', '399005': 'zxb', '399006': 'cyb'}\n # 更新数据的时候可能出现更新时间段没有新数据的情况,要处理这种情况\n if index_weight.empty:\n index_weight = pd.DataFrame(np.nan, index=self.data.stock_price.major_axis,columns=\n pd.MultiIndex.from_product([list(index_name.keys()), self.data.stock_price.minor_axis]))\n\n # 对指数进行循环储存\n for i in index_weight.columns.get_level_values(0).drop_duplicates():\n # 因为这里的数据,一行是所有指数数据都在一起,就会出现,如沪深300在这一期有数据,而中证500一个数据都没有的情况\n # 这种情况在其他数据中不会出现,其他数据是分开储存,如果一期都没有,那么原始数据就不会存在这一期,于是需要分开处理\n curr_weight = index_weight.ix[:, i]\n # 上面提到的更新数据时,出现的没有新数据的情况,不能dropna再填充,因为dropna会变成空dataframe\n if curr_weight.isnull().all().all():\n pass\n else:\n curr_weight = curr_weight.dropna(axis=0, how='all').reindex(curr_weight.index, method='ffill').\\\n reindex(self.data.stock_price.major_axis, method='ffill')\n self.data.benchmark_price['Weight_'+index_name[i]] = curr_weight\n # 将权重数据的nan填上0\n # 如果为更新数据,则一行全是nan的情况不填,一行有数据的情况才将nan填成0\n if self.is_update:\n self.data.benchmark_price['Weight_'+index_name[i]] = self.data.benchmark_price['Weight_'+index_name[i]].\\\n apply(lambda x:x if x.isnull().all() else x.fillna(0), axis=1)\n else:\n self.data.benchmark_price['Weight_'+index_name[i]] = \\\n self.data.benchmark_price['Weight_'+index_name[i]].fillna(0)\n pass\n\n # 从现在的因子库里取因子数据\n def get_runner_value(self, runner_id):\n sql_query = \"select runnerdate as TradingDay, stockticker as SecuCode, value as factor_value \" \\\n \"from RunnerValue where runnerdate>='\" + str(self.trading_days.iloc[0]) + \"' and \" \\\n \"runnerdate<='\" + str(self.trading_days.iloc[-1]) + \"' and runnerid=\" + str(runner_id) + \" \" \\\n \"order by TradingDay, SecuCode\"\n runner_value_data = self.sq_engine.get_original_data(sql_query)\n runner_value = runner_value_data.pivot_table(index='TradingDay', columns='SecuCode', values='factor_value')\n # 处理TradingDay数据类型不对的问题, 将其变为datetime\n runner_value = runner_value.set_index(pd.to_datetime(runner_value.index))\n\n # 储存数据\n self.data.stock_price['runner_value_'+str(runner_id)] = runner_value\n\n # 储存数据文件\n def save_data(self):\n data.write_data(self.data.stock_price)\n data.write_data(self.data.raw_data)\n data.write_data(self.data.benchmark_price)\n data.write_data(self.data.if_tradable)\n self.data.const_data.to_csv('const_data.csv', index_label='datetime', na_rep='NaN', encoding='GB18030')\n\n # 取数据的主函数\n # update_time为default时,则为首次取数据,需要更新数据时,传入更新的第一个交易日的时间给update_time即可\n def get_data_from_db(self, *, update_time=pd.Timestamp('1900-01-01')):\n self.initialize_jydb()\n self.initialize_sq()\n self.initialize_gg()\n self.get_trading_days()\n self.get_labels()\n self.get_sq_data()\n self.get_AdjustFactor(first_date=update_time)\n self.get_ochl_vwap_adj()\n print('get sq data has been completed...\\n')\n self.get_list_status(first_date=update_time)\n print('get list status has been completed...\\n')\n self.get_asset_liability_equity(first_date=update_time)\n print('get balancesheet data has been completed...\\n')\n self.get_pb()\n print('get pb has been completed...\\n')\n self.get_ni_fy1_fy2()\n self.get_eps_fy1_fy2()\n print('get forecast data has been completed...\\n')\n self.get_cash_related_ttm()\n print('get cash related ttm has been completed...\\n')\n self.get_ni_ttm()\n print('get netincome ttm has been completed...\\n')\n self.get_pe_ttm()\n print('get pe_ttm has been completed...\\n')\n self.get_ni_revenue_eps_growth()\n print('get growth ttm has been completed...\\n')\n self.get_index_price()\n self.get_index_weight(first_date=update_time)\n print('get index data has been completed...\\n')\n\n # 更新数据的情况先不能储存数据,只有非更新的情况才能储存\n if not self.is_update:\n self.save_data()\n\n # 更新���据的主函数\n def update_data_from_db(self, *, end_date=None):\n # 更新标记\n self.is_update = True\n # 首先读取ClosePrice_adj数据,将其当作更新数据时的参照标签\n data_mark = pd.read_csv('ClosePrice_adj.csv', parse_dates=True, index_col=0)\n # 更新的第一天为之前数据标签日期的最后一天\n # 因为有可能当时更新数据的时候,还没有得到那次的数据\n # 因此为了统一,更新的第一天都设置为那一天\n last_day = data_mark.iloc[-1].name\n self.start_date = last_day\n\n # 可以设置更新数据的更新截止日,默认为更新到当天\n if isinstance(end_date, pd.Timestamp):\n self.end_date = end_date\n\n # 更新数据\n self.get_data_from_db(update_time=self.start_date)\n\n # 读取以前的老数据\n stock_price_name_list = ['ClosePrice_adj', 'OpenPrice_adj', 'HighPrice_adj', 'LowPrice_adj',\n 'vwap', 'OpenPrice', 'ClosePrice', 'HighPrice', 'LowPrice',\n 'vwap_adj', 'PrevClosePrice', 'AdjustFactor', 'Volume', 'Shares',\n 'FreeShares', 'MarketValue', 'FreeMarketValue']\n raw_data_name_list = ['Industry', 'TotalAssets', 'TotalLiability', 'TotalEquity', 'PB', 'NetIncome_fy1',\n 'NetIncome_fy2', 'EPS_fy1', 'EPS_fy2', 'CashEarnings_ttm', 'CFO_ttm', 'NetIncome_ttm',\n 'PE_ttm', 'NetIncome_ttm_growth_8q', 'Revenue_ttm_growth_8q', 'EPS_ttm_growth_8q']\n if_tradable_name_list = ['is_suspended', 'is_enlisted', 'is_delisted']\n benchmark_index_name = ['sz50', 'hs300', 'zzlt', 'zz500', 'zz800', 'zxb', 'cyb']\n benchmark_data_type = ['ClosePrice', 'OpenPrice', 'Weight', 'ClosePrice_adj']\n benchmark_price_name_list = [a+'_'+b for a in benchmark_data_type for b in benchmark_index_name]\n # 注意中证流通指数没有closeprice adj, 即全收益数据\n benchmark_price_name_list.remove('ClosePrice_adj_zzlt')\n\n old_stock_price = data.read_data(stock_price_name_list, stock_price_name_list)\n old_raw_data = data.read_data(raw_data_name_list, raw_data_name_list)\n old_if_tradable = data.read_data(if_tradable_name_list, if_tradable_name_list)\n old_benchmark_price = data.read_data(benchmark_price_name_list, benchmark_price_name_list)\n old_const_data = data.read_data(['const_data'], ['const_data'])\n old_const_data = old_const_data.ix['const_data']\n\n # 新数据中的股票数可能与旧数据已经不同,要将旧数据中的股票索引换成新数据的索引\n new_stock_index = self.data.stock_price.minor_axis\n old_stock_price = old_stock_price.reindex(minor_axis=new_stock_index)\n old_raw_data = old_raw_data.reindex(minor_axis=new_stock_index)\n old_if_tradable = old_if_tradable.reindex(minor_axis=new_stock_index)\n old_benchmark_price = old_benchmark_price.reindex(minor_axis=new_stock_index)\n\n # 衔接新旧数据\n new_stock_price = pd.concat([old_stock_price.drop(last_day, axis=1).sort_index(),\n self.data.stock_price.sort_index()], axis=1)\n new_raw_data = pd.concat([old_raw_data.drop(last_day, axis=1).sort_index(),\n self.data.raw_data.sort_index()], axis=1)\n new_if_tradable = pd.concat([old_if_tradable.drop(last_day, axis=1).sort_index(),\n self.data.if_tradable.sort_index()], axis=1)\n new_benchmark_price = pd.concat([old_benchmark_price.drop(last_day, axis=1).sort_index(),\n self.data.benchmark_price.sort_index()], axis=1)\n new_const_data = pd.concat([old_const_data.drop(last_day, axis=0).sort_index(axis=1),\n self.data.const_data.sort_index(axis=1)], axis=0)\n\n self.data.stock_price = new_stock_price\n self.data.raw_data = new_raw_data\n self.data.if_tradable = new_if_tradable\n self.data.benchmark_price = new_benchmark_price\n self.data.const_data = new_const_data\n\n # 对需要的数据进行fillna,以及fillna后的重新计算\n # 主要是那些用到first_date参数的数据,以及涉及这些数据的衍生数据\n self.data.raw_data['TotalAssets'] = self.data.raw_data['TotalAssets'].fillna(method='ffill')\n self.data.raw_data['TotalLiability'] = self.data.raw_data['TotalLiability'].fillna(method='ffill')\n self.data.raw_data['TotalEquity'] = self.data.raw_data['TotalEquity'].fillna(method='ffill')\n self.get_pb()\n # 注意这两个数据在用旧数据向前填na之后,还要再fill一次na,因为更新的时候出现的新股票,之前的旧数据因为重索引的关系,也是nan\n self.data.if_tradable['is_enlisted'] = self.data.if_tradable['is_enlisted'].\\\n fillna(method='ffill').fillna(0).astype(np.int)\n self.data.if_tradable['is_delisted'] = self.data.if_tradable['is_delisted'].\\\n fillna(method='ffill').fillna(0).astype(np.int)\n # 指数权重数据也是这样, 在用旧数据向前填na之后, 还要再fill一次na, 原因与上面的上市退市数据是一样的\n for index_name in benchmark_index_name:\n self.data.benchmark_price['Weight_'+index_name] = self.data.benchmark_price['Weight_'+index_name].\\\n fillna(method='ffill').fillna(0.0)\n # 复权因子在更新的时候, 需要在衔接了停牌标记后, 在此进行复权因子(以及之后的后复权价格)的计算\n # 同直接取所有的复权因子数据时一样, 首先将停牌期间的复权因子设置为nan,\n # 然后使用停牌前最后一天的复权因子向前填充, 使得停牌期间的复权因子变化反映在复牌后第一天,\n # 最后需要把数据中的nan填成1\n self.data.stock_price['AdjustFactor'] = self.data.stock_price['AdjustFactor'].where(\n np.logical_not(self.data.if_tradable['is_suspended']), np.nan).\\\n fillna(method='ffill').fillna(1)\n # 因为衔接并向前填充了复权因子, 因此要重新计算后复权价格, 否则之前的后复权价格将是nan\n self.get_ochl_vwap_adj()\n\n self.save_data()\n\n # 重置标记\n self.is_update = False\n\nif __name__ == '__main__':\n import time\n start_time = time.time()\n db = database(start_date=pd.Timestamp('2007-01-01'), end_date=pd.Timestamp('2017-11-14'))\n # db.is_update=False\n # db.get_data_from_db()\n # db.update_data_from_db(end_date=pd.Timestamp('2017-11-14'))\n db.initialize_jydb()\n db.initialize_sq()\n db.initialize_gg()\n db.get_trading_days()\n db.get_labels()\n # db.get_AdjustFactor()\n # db.get_sq_data()\n # db.get_index_price()\n db.get_index_weight()\n data.write_data(db.data.benchmark_price)\n # for runner_id in [1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,24,27,30,31,32,35,36]:\n # db.get_runner_value(runner_id)\n # db.data.stock_price.to_hdf('runner_value', '123')\n # data.write_data(db.data.stock_price, file_name=['runner_value_5'])\n print(\"time: {0} seconds\\n\".format(time.time()-start_time))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":44422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189469627","text":"def selection_sort(arr):\n #Move through the entire array\n for i in range(len(arr)): \n \n # Find the minimum element in remaining \n # unsorted array \n min_idx = i \n for j in range(i+1, len(arr)): \n if arr[min_idx] > arr[j]: \n min_idx = j \n \n # Swap the found minimum element with \n # the first element \n arr[i], arr[min_idx] = arr[min_idx], arr[i] \n\n return arr\n\n\n# TO-DO: implement the Bubble Sort function below\ndef bubble_sort(arr): \n n = len(arr) \n \n # Move through all array elements \n for i in range(n): \n swapped = False\n \n # Last i elements are already \n # in place \n for j in range(0, n-i-1): \n \n # traverse the array from 0 to \n # n-i-1. Swap if the element \n # found is greater than the \n # next element \n if arr[j] > arr[j+1] : \n arr[j], arr[j+1] = arr[j+1], arr[j] \n swapped = True\n \n # IF no two elements were swapped \n # by inner loop, then break \n if swapped == False: \n break\n \n return arr\n\n'''\nSTRETCH: implement the Counting Sort function below\n\nCounting sort is a sorting algorithm that works on a set of data where\nwe specifically know the maximum value that can exist in that set of\ndata. The idea behind this algorithm then is that we can create \"buckets\"\nfrom 0 up to the max value. This is most easily done by initializing an\narray of 0s whose length is the max value + 1 (why do we need this \"+ 1\"?).\n\nEach buckets[i] then is responsible for keeping track of how many times \nwe've seen `i` in the input set of data as we iterate through it.\nOnce we know exactly how many times each piece of data in the input set\nshowed up, we can construct a sorted set of the input data from the \nbuckets. \n\nWhat is the time and space complexity of the counting sort algorithm?\n'''\n# def counting_sort(arr, maximum=None):\n# # Your code here\n\n\n# return arr\n","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"406567306","text":"from collections import defaultdict\nd = defaultdict(list)\nl = list()\nn, m = map(int, input().split())\nfor i in range(1, n+1):\n d[input()].append(str(i))\n for i in range(1, m+1):\n element = input()\n if element in d:\n print(' '.join(d[element]))\n else:\n print(-1)\n","sub_path":"python/Collections/defaultdict.py","file_name":"defaultdict.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33875038","text":"# File: CreditCard.py\r\n# Description:\r\n# Student Name: Gabrielle Ransom\r\n# Student UT EID: gmr729\r\n# Course Name: CS 303E\r\n# Unique Number: 51200\r\n# Date Created: 11/14/16\r\n# Date Last Modified: 11/14/16\r\n\r\ndef is_valid (cc_num):\r\n\tnum_list = []\r\n\tprint(\"\")\r\n\t\r\n\tif(len(cc_num) < 15 or len(cc_num) > 16):\r\n\t\tprint(\"Not a 15 or 16-digit number.\")\r\n\t\treturn \r\n\telse:\r\n\t\tfor digits in range(len(cc_num)):\r\n\t\t\tcc_num[digits] = int(cc_num[digits])\r\n\t\t\tnum_list.append(cc_num[digits])\r\n\r\n\t\tnum_list.reverse()\r\n\t\tfor i in range(1, len(num_list), 2):\r\n\t\t\tnum_list[i] *= 2\r\n\t\t\tif(num_list[i] > 9):\r\n\t\t\t\tnum_list[i] = 1 + num_list[i] - 10\r\n\r\n\t\tif(sum(num_list) % 10 == 0):\r\n\t\t\treturn True\r\n\r\n\r\ndef cc_type(cc_num):\r\n\tnums = \"\".join(str(num) for num in cc_num)\r\n\tfirstTwo = \"\".join(str(nums[0:2]))\r\n\tfirstThree = \"\".join(str(nums[0:3]))\r\n\tfirstFour = \"\".join(str(nums[0:4]))\r\n\r\n\tif(firstTwo == \"34\" or firstTwo == \"37\"):\r\n\t\treturn \"Valid American Express\"\r\n\telif(firstTwo == \"50\" or firstTwo == \"51\" or firstTwo == \"52\" or firstTwo == \"53\" or firstTwo == \"54\" or firstTwo == \"55\"):\r\n\t\treturn \"Valid MasterCard\"\r\n\telif(firstTwo == \"65\" or firstThree == \"644\" or firstFour == \"6011\"):\r\n\t\treturn \"Valid Discover\"\r\n\telif(nums[0:1] == \"4\"):\r\n\t\treturn \"Valid Visa\"\r\n\telse:\r\n\t\treturn \"Invalid\"\r\n\r\ndef main():\r\n\tprint()\r\n\tcreditCard = str(input(\"Enter a 15 or 16-digit number: \"))\r\n\tcreditCard = list(creditCard)\r\n\r\n\tif is_valid(creditCard):\r\n\t\tprint(cc_type(creditCard) + \" credit card number.\")\r\n\r\n\r\nmain()","sub_path":"CreditCard.py","file_name":"CreditCard.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599954127","text":"# def mulmodulo(a, b, m):\n# res = 0\n# for i in range(b):\n# res = (res + a) % m\n# print(i, res)\n# return res\n\n\ndef expmodulo(b, e, m):\n if e == 0:\n return 1\n res = 1\n for i in range(e):\n res *= b\n res %= m\n return res\n\n\ndef main():\n l = list(input())\n l.reverse()\n sum = 0\n for i in range(len(l)):\n if l[i] == '1':\n sum += expmodulo(2, i, 3)\n if sum % 3 == 0:\n return(1)\n else:\n return(0)\n\n\nt = int(input())\nfor _ in range(t):\n print(main())\n","sub_path":"GFG/Is Binary Number Multiple of 3.py","file_name":"Is Binary Number Multiple of 3.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87295394","text":"import os\r\nimport pickle\r\nimport warnings\r\nimport numpy as np\r\nimport streamlit as st\r\nfrom datetime import date\r\nimport pandas_datareader as web\r\nimport matplotlib.pyplot as plt\r\nplt.style.use(\"fivethirtyeight\")\r\nwarnings.filterwarnings('ignore')\r\nfrom tensorflow.keras.models import load_model\r\n\r\n##### Header section #####\r\nst.markdown(\"

💸 Stock Price Forecasting 📈

\", unsafe_allow_html=True)\r\n\r\n\r\n##### Dropdown to choose ticker #####\r\nticker = st.selectbox(\"Choose ticker :\", ['AAPL [Apple]'\r\n #'FB [Facebook]',\r\n #'HDB [HDFC Bank Limited]',\r\n #'MSFT [Microsoft]',\r\n #'TSLA [Tesla, Inc.]'\r\n ])\r\n\r\n# Extract stock ticker\r\nstock_ticker = ticker.split(' ')[0]\r\nstock_comp = ticker.split(' ')[1]\r\nstock_comp = stock_comp[1:-1]\r\n\r\n\r\n##### Displaying Slider and Data table #####\r\n(start_date, end_date) = st.slider(\"Select date range :\", date(2012, 1, 1), date.today(), \r\n (date(2012, 1, 1), date.today()),\r\n format=\"DD/MM/YYYY\")\r\n\r\nplot_df = web.DataReader(stock_ticker, data_source='yahoo', start=start_date, end=end_date)\r\nst.write(plot_df)\r\n\r\n\r\n##### Displaying `Close` Price chart ##### \r\nfig = plt.figure(figsize=(12,5))\r\nplt.title(stock_comp + \" - Stock Price History\")\r\nplt.plot(plot_df['Close'])\r\nplt.xlabel(\"Years\")\r\nplt.ylabel(\"Close Price in USD($)\")\r\nst.pyplot(fig)\r\n\r\n# Insert empty line\r\nst.progress(100)\r\n\r\nnb_days = st.selectbox(\"Get Stock Price Forecast for :\", ['1 Day', '2 Days', '3 Days'])\r\n\r\n# define a placeholder\r\nph = st.empty()\r\n\r\n# Logger function for UI\r\ndef logger(ph, message):\r\n if message == \"\":\r\n ph.write(message)\r\n else:\r\n ph.write(\"[INFO] \" + message + \"...\")\r\n\r\n# Function to get forecast after button click\r\ndef getForecast(ticker, nb_days):\r\n \r\n # Get the stock price data and store in temp datagframe\r\n curr_date = str(date.today())\r\n df = web.DataReader(stock_ticker, data_source='yahoo', start='2010-01-01', end=curr_date)\r\n temp_df = df.iloc[len(df)-90 : len(df), :]\r\n \r\n \r\n # get 'Close' price and covert to numpy array\r\n user_data = temp_df['Close'].values\r\n \r\n \r\n # Load model and scaler\r\n model_path, scaler_path = \"\", \"\"\r\n cwd = os.getcwd()\r\n if ticker == 'AAPL':\r\n if nb_days == '1 Day':\r\n # NOTE : COMMENTED PATH WORK ON LOCAL SYSTEM AND UNCOMMENTED ONES WORK FOR WEB APP\r\n #model_path = cwd + '\\\\Apple\\\\AAPL_1_day_SPF_model'\r\n #scaler_path = cwd + '\\\\Apple\\\\AAPL_1_day_SPF_scaler.pkl'\r\n model_path = cwd + '/Apple/AAPL_1_day_SPF_model'\r\n scaler_path = cwd + '/Apple/AAPL_1_day_SPF_scaler.pkl'\r\n elif nb_days == '2 Days':\r\n #model_path = cwd + '\\\\Apple\\\\AAPL_2_days_SPF_model'\r\n #scaler_path = cwd + '\\\\Apple\\\\AAPL_2_days_SPF_scaler.pkl'\r\n model_path = cwd + '/Apple/AAPL_2_days_SPF_model'\r\n scaler_path = cwd + '/Apple/AAPL_2_days_SPF_scaler.pkl'\r\n else:\r\n #model_path = cwd + '\\\\Apple\\\\AAPL_3_days_SPF_model'\r\n #scaler_path = cwd + '\\\\Apple\\\\AAPL_3_days_SPF_scaler.pkl'\r\n model_path = cwd + '/Apple/AAPL_3_days_SPF_model'\r\n scaler_path = cwd + '/Apple/AAPL_3_days_SPF_scaler.pkl'\r\n logger(ph, \"Loading saved model\")\r\n model = load_model(model_path)\r\n logger(ph, \"Loading saved scaler\")\r\n f = open(scaler_path, 'rb')\r\n scaler = pickle.load(f)\r\n \r\n \r\n # Scale the data \r\n user_data = user_data.reshape(1, user_data.shape[0])\r\n user_data = scaler.transform(user_data)\r\n \r\n \r\n # Convert shape and form of data that is accepted by LSTM RNN\r\n user_data = np.reshape(user_data, (user_data.shape[0], user_data.shape[1], 1))\r\n \r\n \r\n # Predict next day stock price\r\n logger(ph, \"Predicting stock price(s)\")\r\n prediction = model.predict(user_data)\r\n print(prediction)\r\n return prediction\r\n\r\n##### Button to get desired forecast #####\r\nif st.button(\"Get Forecast\"):\r\n priceForecast = getForecast(stock_ticker, nb_days)\r\n logger(ph, \"\")\r\n if nb_days == '1 Day':\r\n priceForecastDay1 = round(priceForecast[0][0], 4)\r\n st.success(\"Next \" + nb_days + \" stock price forecast (in $) :\\n\\n \" + \r\n \"**Day 1** : \" + str(priceForecastDay1))\r\n elif nb_days == '2 Days':\r\n priceForecastDay1 = round(priceForecast[0][0], 4)\r\n priceForecastDay2 = round(priceForecast[0][1], 4)\r\n st.success(\"Next \" + nb_days + \" stock price forecast (in $) :\\n\\n\" + \r\n \"**Day 1** : \" + str(priceForecastDay1) + \"\\n\\n\" \r\n \"**Day 2** : \" + str(priceForecastDay2))\r\n else:\r\n priceForecastDay1 = round(priceForecast[0][0], 4)\r\n priceForecastDay2 = round(priceForecast[0][1], 4)\r\n priceForecastDay3 = round(priceForecast[0][2], 4)\r\n st.success(\"Next \" + nb_days + \" stock price forecast (in $) :\\n\\n\" + \r\n \"**Day 1** : \" + str(priceForecastDay1) + \"\\n\\n\" + \r\n \"**Day 2** : \" + str(priceForecastDay2) + \"\\n\\n\" + \r\n \"**Day 3** : \" + str(priceForecastDay3)) \r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601895464","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\"\"\"\r\nCENTRO DE INVESTIGACION EN MATEMATICAS\r\nDOCTORADO EN CIENCIAS DE LA COMPUTACION\r\nFERNANDO CERVANTES SANCHEZ\r\n\r\nFILE NAME: segmentation.py\r\n\r\nPURPOSE: Segments the main coronary artery from the background, using thresholding methods.\r\n\r\nFILE REFERENCES:\r\nName I/O Description\r\nNone ---- ----------\r\n\r\nABNORMAL TERMINATION CONDITIONS, ERROR AND WARNING MESSAGES:\r\n\r\nDEVELOPMENT HISTORY:\r\nDate Author Change Id Release Description Of Change\r\n10/Jul/2017 Fernando C. 0 1.0 Creation\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numba import jit\r\nimport cv2\r\nimport sys\r\n\r\n\r\n@jit(cache=True)\r\ndef showOrientations(original, img, resp_angs, img_seg, img_skl):\r\n my_height, my_width, = img_seg.shape\r\n resp_angs_in_seg = resp_angs * img_seg\r\n \r\n y_seg, x_seg = np.where(img_seg > 0)\r\n dir_x = np.zeros(len(x_seg))\r\n dir_y = np.zeros(len(y_seg))\r\n color_xy = np.zeros(len(x_seg))\r\n\r\n for i in range(len(y_seg)):\r\n y, x = y_seg[i], x_seg[i]\r\n dir_y[i] = 0.5 * np.sin(resp_angs[y, x] / 180. * np.pi + np.pi/2.)\r\n dir_x[i] = 0.5 * np.cos(resp_angs[y, x] / 180. * np.pi + np.pi/2.)\r\n \r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img, cmap = 'gray', interpolation='none', extent = [0, my_width, my_height, 0])\r\n plt.quiver(x_seg + 0.5, y_seg + 0.5, dir_x, dir_y, scale = 1.0, scale_units='xy', edgecolor = 'w', linewidth=1.)\r\n \r\n y_skl, x_skl = np.where(img_skl > 0)\r\n dir_x = np.zeros(len(x_skl))\r\n dir_y = np.zeros(len(y_skl))\r\n color_xy = np.zeros(len(x_skl))\r\n\r\n for i in range(len(y_skl)):\r\n y, x = y_skl[i], x_skl[i]\r\n dir_y[i] = 0.5 * np.sin(resp_angs[y, x] / 180. * np.pi + np.pi/2.)\r\n dir_x[i] = 0.5 * np.cos(resp_angs[y, x] / 180. * np.pi + np.pi/2.)\r\n \r\n plt.subplot(1, 2, 2)\r\n original_rgb = np.zeros([original.shape[0], original.shape[1], 3], dtype=np.uint8)\r\n original_rgb[:, :, 2] = original\r\n original_rgb[:, :, 1] = original\r\n original_rgb[:, :, 0] = img_seg * 255\r\n \r\n plt.imshow(original_rgb, interpolation='none', extent = [0, my_width, my_height, 0])\r\n plt.quiver(x_skl + 0.5, y_skl + 0.5, dir_x, dir_y, scale = 1.0, scale_units='xy', edgecolor = 'w', linewidth=1.)\r\n plt.quiver(x_skl + 0.5, y_skl + 0.5, -dir_x, -dir_y, scale = 1.0, scale_units='xy', edgecolor = 'w', linewidth=1.)\r\n plt.show()\r\n\r\n\r\n@jit(cache=True)\r\ndef thresh_OTSU(img):\r\n ret, img_seg = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\r\n return img_seg\r\n\r\n\r\n@jit(cache=True)\r\ndef length_filtering(img_seg, img, HOLE_SIZE = 49):\r\n my_height, my_width, = img.shape\r\n \r\n mask_hole = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.uint8)\r\n hole_response = cv2.filter2D(img_seg/255, -1, mask_hole)\r\n lf0_img_seg = img_seg.copy()\r\n lf0_img_seg[hole_response == 4] = 255\r\n \r\n lf1_img_seg = 255 - lf0_img_seg\r\n \r\n # Check for holes inside large regions\r\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(lf1_img_seg, 8, cv2.CV_32S)\r\n \r\n for i, npix in enumerate(stats[:]):\r\n if (npix[4] < HOLE_SIZE):\r\n lf1_img_seg[labels == i] = 0\r\n\r\n lf_img_seg = 255 - lf1_img_seg\r\n \r\n # Check the labels of the main artery\r\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(lf_img_seg, 8, cv2.CV_32S)\r\n \r\n y_max, x_max = np.where(img == np.max(img))\r\n y_max, x_max = y_max[0], x_max[0]\r\n \r\n lf_img_seg[ labels != labels[y_max, x_max] ] = 0\r\n\r\n return lf_img_seg\r\n\r\n\r\n@jit(cache=True)\r\ndef segmentate(img):\r\n otsu_resp = thresh_OTSU(img)\r\n seg_resp = np.uint8(length_filtering(otsu_resp, img))\r\n # seg_resp = otsu_resp\r\n return seg_resp\r\n\r\n\r\n@jit(cache=True)\r\ndef metrics(img_seg, img_gt, img, print_metrics = False):\r\n\r\n TP_resp = np.logical_and(img_seg, img_gt)\r\n TN_resp = np.logical_and(255 - img_seg, 255 - img_gt)\r\n FP_resp = np.logical_and(img_seg, 255 - img_gt)\r\n FN_resp = np.logical_and(255 - img_seg, img_gt)\r\n\r\n TP = np.sum(TP_resp/255.0)\r\n TN = np.sum(TN_resp/255.0)\r\n FP = np.sum(FP_resp/255.0)\r\n FN = np.sum(FN_resp/255.0)\r\n\r\n Acc = ((TP+TN)/(TP+FP+TN+FN))\r\n\r\n # Sensitivity or True positive rate\r\n Sensitivity = TP / (TP + FN)\r\n\r\n # Specificity or True negative rate\r\n Specificity = TN / (TN + FP)\r\n\r\n # Precision or Positive predictive value\r\n PPV = TP / (TP + FP)\r\n\r\n # Fall-out or False positive rate\r\n NPV = TN / (TN + FN)\r\n\r\n # Matthews correlation coefficient [-1, 1]\r\n MCC = (TP*TN - FP*FN) / np.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\r\n\r\n # Jaccard similarity coefficient\r\n Jac = (TP /(FP+FN+TP))\r\n\r\n # Sorensen-Dice coefficient\r\n Dice= 2*TP / (FP+FN+(2*TP))\r\n\r\n #• Uniformity\r\n N_val = (np.max(img) - np.min(img))**2 / 2.0\r\n\r\n Uniformity = 1.0 - (np.var(img[img_seg > 127]) + np.var(img[img_seg <= 127])) / N_val\r\n\r\n if (print_metrics):\r\n print('Accuracy: {}'.format(Acc))\r\n print('Sensitivity: {}'.format(Sensitivity))\r\n print('Specificity: {}'.format(Specificity))\r\n print('Positive predictive value: {}'.format(PPV))\r\n print('Negative predictive value: {}'.format(NPV))\r\n print('Matthews correlation coefficient: {}'.format(MCC))\r\n print('Uniformity: {}'.format(Uniformity))\r\n print('Jaccard similarity coefficient: {}'.format(Jac))\r\n print('Sorensen-Dice coefficient: {}'.format(Dice))\r\n\r\n return Acc, Sensitivity, Specificity, PPV, NPV, MCC, Uniformity, Jac, Dice\r\n\r\n\r\nif (__name__ == \"__main__\"):\r\n print(\"\\tArtery segmentation\")\r\n # Read arguments:\r\n if (len(sys.argv) < 2):\r\n print(\"This program requires:\")\r\n print('1) Path to an image')\r\n print('2) Path to the ground-truth')\r\n sys.exit()\r\n\r\n img = cv2.imread(sys.argv[1])\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n img_seg = thresh_OTSU(img)\r\n\r\n img_seg = length_filtering(img_seg, img)\r\n\r\n output_path = 'otsu_{}'.format(sys.argv[1])\r\n cv2.imwrite(output_path, img_seg)\r\n\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img, 'gray')\r\n plt.subplot(1, 2, 2)\r\n plt.imshow(img_seg, 'gray')\r\n plt.show()\r\n\r\n if (len(sys.argv) > 2):\r\n img_gt = cv2.imread(sys.argv[2])\r\n img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)\r\n\r\n metrics(img_seg, img_gt, img, True)\r\n","sub_path":"src/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302648514","text":"##########################\n# SnippetManager model\n##########################\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nimport editarea\nfrom django.db.models import Q\n# Create your models here.\nclass CodeManager(models.Manager):\n\tdef getShortSnippets(self, user):\n\t\tsnippets = Code.objects.filter(\n\t\t\tQ(user=user),\n\t\t\tQ(delete_date=None)\n\t\t).order_by('-create_date')[:200]\n\n\t\treturn_snippet = []\n\t\tfor snippet in snippets:\n\t\t\tcode = snippet.code\n\t\t\ts_code = code[:500] + '...'\n\t\t\tdesc = snippet.description\n\t\t\ts_desc = desc[:100] + '...'\n\t\t\ttitle = snippet.title\n\t\t\ts_title = title[:25] + '...'\n\n\t\t\tnew_snippet = {\n\t\t\t\t'id': snippet.id,\n\t\t\t\t'code': s_code,\n\t\t\t\t'description': s_desc,\n\t\t\t\t'title': s_title,\n\t\t\t\t'category_name': snippet.category.name,\n\t\t\t\t'public': snippet.public,\n\t\t\t\t'improve': snippet.improvement\n\t\t\t}\n\t\t\treturn_snippet.append(new_snippet)\n\n\t\treturn return_snippet\n\n\tdef getShortSnippetsSorted(self, project, cat, project_id, cat_id, user, keyword):\n\t\tsnippets_list = {}\n\n\t\tif project_id == 0 and cat_id != 0:\n\t\t\tsnippets_list = Code.objects.filter(\n\t\t\t\tQ(user=user),\n\t\t\t\tQ(category=cat),\n\t\t\t\tQ(delete_date=None),\n\t\t\t\tQ(title__icontains=keyword) | Q(description__icontains=keyword)\n\t\t\t).order_by('-create_date')[:200]\n\n\t\tif cat_id == 0 and project_id != 0:\n\t\t\tsnippets_list = Code.objects.filter(\n\t\t\t\tQ(user=user),\n\t\t\t\tQ(project=project),\n\t\t\t\tQ(delete_date=None),\n\t\t\t\tQ(title__icontains=keyword) | Q(description__icontains=keyword)\n\t\t\t).order_by('-create_date')[:200]\n\n\t\tif project_id != 0 and cat_id != 0:\n\t\t\tsnippets_list = Code.objects.filter(\n\t\t\t\tQ(user=user),\n\t\t\t\tQ(category=cat),\n\t\t\t\tQ(project=project),\n\t\t\t\tQ(delete_date=None),\n\t\t\t\tQ(title__icontains=keyword) | Q(description__icontains=keyword)\n\t\t\t).order_by('-create_date')[:200]\n\n\t\tif project_id == 0 and cat_id == 0:\n\t\t\tsnippets_list = Code.objects.filter(\n\t\t\t\tQ(user=user),\n\t\t\t\tQ(delete_date=None),\n\t\t\t\tQ(title__icontains=keyword) | Q(description__icontains=keyword)\n\t\t\t).order_by('-create_date')[:200]\n\n\t\treturn_snippet = []\n\t\tfor snippet in snippets_list:\n\t\t\tcode = snippet.code\n\t\t\ts_snippet = code[:500] + '...'\n\t\t\tdesc = snippet.description\n\t\t\ts_desc = desc[:100] + '...'\n\t\t\ttitle = snippet.title\n\t\t\ts_title = title[:25] + '...'\n\n\t\t\tnew_snippet = {\n\t\t\t\t'id': snippet.id,\n\t\t\t\t'title': s_title,\n\t\t\t\t'description': s_desc,\n\t\t\t\t'category_name': snippet.category.name,\n\t\t\t\t'code': s_snippet,\n\t\t\t\t'public': snippet.public,\n\t\t\t\t'improve': snippet.improvement\n\t\t\t}\n\t\t\t\n\t\t\treturn_snippet.append(new_snippet)\n\n\t\treturn return_snippet\n\n\tdef getWorldShortSnippetsSorted(self, cat, cat_id, keyword):\n\t\tsnippets_list = {}\n\n\t\tif cat_id != 0:\n\t\t\tsnippets_list = Code.objects.filter(\n\t\t\t\tQ(category=cat),\n\t\t\t\tQ(title__icontains=keyword) | Q(description__icontains=keyword),\n\t\t\t\tQ(delete_date=None),\n\t\t\t\tQ(public=True)\n\t\t\t).order_by('-create_date')\n\t\telse:\n\t\t\tsnippets_list = Code.objects.filter(\n\t\t\t\tQ(title__icontains=keyword) | Q(description__icontains=keyword),\n\t\t\t\tQ(delete_date=None),\n\t\t\t\tQ(public=True)\n\t\t\t).order_by('-create_date')\n\n\t\treturn_snippet = []\n\t\tfor snippet in snippets_list:\n\t\t\tcode = snippet.code\n\t\t\ts_snippet = code[:500] + '...'\n\t\t\tdesc = snippet.description\n\t\t\ts_desc = desc[:100] + '...'\n\t\t\ttitle = snippet.title\n\t\t\ts_title = title[:25] + '...'\n\n\t\t\tnew_snippet = {\n\t\t\t\t'id': snippet.id,\n\t\t\t\t'title': s_title,\n\t\t\t\t'description': s_desc,\n\t\t\t\t'category_name': snippet.category.name,\n\t\t\t\t'code': s_snippet,\n\t\t\t\t'public': snippet.public,\n\t\t\t\t'improve': snippet.improvement\n\t\t\t}\n\t\t\t\n\t\t\treturn_snippet.append(new_snippet)\n\n\t\treturn return_snippet\n\nclass Category(models.Model):\n\tname = models.CharField(max_length=25, unique=True)\n\tcreate_date = models.DateTimeField()\n\tdelete_date = models.DateTimeField(null=True)\n\tuser = models.ForeignKey(User)\n\nclass Project(models.Model):\n\tname = models.CharField(max_length=50)\n\tuser = models.ForeignKey(User)\n\tdelete_date = models.DateTimeField(null=True)\n\nclass Code(models.Model):\n\ttitle = models.CharField(max_length=50)\n\tdescription = models.TextField(max_length=500)\n\tcode = editarea.EditAreaField()\n\tuser = models.ForeignKey(User)\n\tproject = models.ForeignKey(Project)\n\tcategory = models.ForeignKey(Category)\n\tpublic = models.BooleanField()\n\timprovement = models.BooleanField()\n\tcreate_date = models.DateTimeField()\n\tupdate_date = models.DateTimeField()\n\tdelete_date = models.DateTimeField(null=True)\n\tliked = models.PositiveIntegerField()\n\tobjects = CodeManager()\n\nclass Vote(models.Model):\n\tdate = models.DateTimeField()\n\tscore = models.PositiveSmallIntegerField()\n\tuser = models.ForeignKey(User)\n\tcode = models.ForeignKey(Code)\n\t\nclass Rating(models.Model):\n\tuser = models.ForeignKey(User)\n\tscore = models.PositiveIntegerField()\n\tupdate_date = models.DateTimeField()\n\nclass Points(models.Model):\n\tpoints = models.PositiveIntegerField()\n\tuser = models.ForeignKey(User)\n\nclass Viewlog(models.Model):\n\tview_date = models.DateTimeField()\n\tuser = models.ForeignKey(User)\n\tcode = models.ForeignKey(Code)\n\nclass DisqusComment(models.Model):\n\tuser = models.ForeignKey(User)\n\tdate = models.DateTimeField()\n\tdisqus_id = models.PositiveIntegerField()\n\tcomment = models.TextField()\n\tcode = models.ForeignKey(Code)\n\nclass SnippetImprovement(models.Model):\n\tuser = models.ForeignKey(User)\n\tcode = models.ForeignKey(Code)\n\tsubmit_date = models.DateTimeField()\n\tnew_code = editarea.EditAreaField()\n\tapprove_date = models.DateTimeField(null=True)\n\tdisapprove_date = models.DateTimeField(null=True)\n\nclass FacebookLogin(models.Model):\n\tfacebook_id = models.PositiveIntegerField()\n\tuser = models.ForeignKey(User)\n\nclass GoogleLogin(models.Model):\n\tgoogle_id = models.CharField(max_length=30)\n\tuser = models.ForeignKey(User)\n","sub_path":"snippetmanager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262686164","text":"#!/usr/bin/env python\nimport re\n\npattern = r\"[a-z]+\"\n\ns = \"\"\"This is 10 times\nthe 2 dogs belonging to Bob have barked\"\"\"\n\n\ns = (\"This is 10 times \"\n\"the 2 dogs belonging to Bob have barked\"\n)\n\nfound = re.findall(pattern, s)\nprint(found, '\\n')\n\nwombat = re.compile(pattern, re.I)\n\nfound = wombat.findall(s)\nprint(found)\n","sub_path":"re_compile_vs_string.py","file_name":"re_compile_vs_string.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535994739","text":"import ROOT as r\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nf = r.TFile('data-out/full_scan_019.root')\nt = f.Get('t')\n\nN = t.GetEntries()\n\nlaser_phi = np.empty(N)\nlaser_rad = np.empty(N)\nb_dipole = np.empty(N)\nb_n_quad = np.empty(N)\n\nfor i in xrange(N):\n t.GetEntry(i)\n laser_rad[i] = t.r_2\n laser_phi[i] = t.phi_2\n b_dipole[i] = t.multipole[0]\n b_n_quad[i] = t.multipole[1]\n\n\nindices = np.where(laser_rad > 0.0)\n\nlaser_phi = laser_phi[indices]\nb_dipole = b_dipole[indices]\nb_n_quad = b_n_quad[indices]\n\ndb1 = (b_dipole[2:] - b_dipole[1:-1]) / (laser_phi[2:] - laser_phi[1:-1])\ndb2 = (b_dipole[1:-1] - b_dipole[:-2]) / (laser_phi[1:-1] - laser_phi[:-2])\n\ndiff = db2 - db1\n\nindices = np.where(diff != np.nan)\n\ndiff = diff[indices]\nlaser_phi = laser_phi[1:-1]\nlaser_phi = laser_phi[indices]\nb_dipole = b_dipole[1:-1]\nb_dipole = b_dipole[indices]\nb_n_quad = b_n_quad[1:-1]\nb_n_quad = b_n_quad[indices]\n\nplt.clf()\nplt.plot(laser_phi)\nplt.savefig('laser.png')\n\nplt.clf()\nplt.scatter(diff, laser_phi)\nplt.savefig('diff.png')\n\nplt.clf()\nplt.scatter(b_n_quad, laser_phi)\nplt.savefig('nquad.png')\n\n\nplt.clf()\nplt.scatter(diff, b_n_quad, alpha=0.2)\nplt.xlim([-15, 15])\nplt.ylim([-50, 20])\nplt.savefig('corr.png')\n","sub_path":"ring-geometry/src/compare_dipole_derviative_and_n_quad.py","file_name":"compare_dipole_derviative_and_n_quad.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79312727","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 21 19:35:57 2021\n\n@author: colinko\n\"\"\"\n\n\"\"\"\nMining Tweets Through Tweepy\n\nIf we want to stream tweets through tweepy\n\"\"\"\nimport tweepy\nimport os\n\npath=''\n\n#Keys entries\naccess_token=''\naccess_token_secret=''\n\nconsumer_key=''\nconsumer_secret=''\n\n#Verifying Credentials & test authentication\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\nauth.set_access_token(access_token,access_token_secret)\napi = tweepy.API(auth)\n# test authentication\ntry:\n api.verify_credentials()\n print(\"Authentication OK\")\nexcept:\n print(\"Error during authentication\")\n\n#Mining Tweets (Streaming)\nclass listener(tweepy.StreamListener):\n def on_status(self, status):\n with open('data-streaming-tweets.txt', 'a',encoding=\"utf-8\") as f:\n if hasattr(status, 'retweeted_status'):\n f.write('retweeted'+\" : \"+\\\n status.user.screen_name + ' : ' + \\\n str(status.user.followers_count) + ' : ' + \\\n str(status.created_at) + ' : ' + \\\n status.text + '\\n') \n else:\n f.write('not_retweet'+\" : \"+\\\n status.user.screen_name + ' : ' + \\\n str(status.user.followers_count) + ' : ' + \\\n str(status.created_at) + ' : ' + \\\n status.text + '\\n')\n \n return True\n \n def on_error(self, status_code):\n print(status_code)\n return True\n \n def on_limit(self,status): #restart if disconnect\n print (\"Rate Limit Exceeded, Sleep for 2 Mins\")\n time.sleep(2 * 60)\n return True\n\nmystream = \\\n tweepy.Stream(\n auth=api.auth,\n listener=listener())\n \nmystream.filter(track=['XRP','Xrp','xrp','RIPPLE','Ripple','ripple'],is_async=True)\n\n#is_async=True #background streaming ,will not run in console\n \n#if we set is_async=False, control c to stop \n\n#disconnecting background stream (if is_async=True) \nmystream.disconnect()\n\n#Close file after mining\nf.close()\n\n#Read txt file\nf = open(path+os.sep+'data-streaming-tweets.txt', \"r\",encoding=\"utf-8\")\nf.read()\nf.close()\n","sub_path":"streaming_tweets_through_tweepy_to_github (not necessary).py","file_name":"streaming_tweets_through_tweepy_to_github (not necessary).py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572820885","text":"from django.shortcuts import render_to_response\nfrom django.shortcuts import render\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.db.models import Q\nfrom www.models import *\nfrom www.forms import *\nfrom www.tasks import *\nimport os, datetime, pdb\n\n# Create your views here.\ndef tools_index(request):\n return render_to_response('tools_index.html')\n\ndef index(request):\n if request.method == 'POST':\n form_snap = SnapUploadForm(request.POST, request.FILES)\n if form_snap.is_valid():\n newfile = SnapModel(snapname = request.FILES['snapname'],\\\n snapcompany = CompanyModel.objects.get(id = request.POST['snapcustomer']))\n newfile.save()\n output = snapHandler(newfile.id)\n if not output:\n return render_to_response('input_result.html', {'result': '1'})\n if output[1]:\n if output[1][1]:\n# server_check = ServerModel.objects.filter(serial = output[1][1], hostname = output[1][0])\n# if server_check:\n# server_check = ServerModel.objects.get(serial = output[1][1], hostname = output[1][0])\n# server_id = server_check.id\n# else:\n server = SnapModel.objects.get(id=newfile.id)\n server.hostname = output[1][0]\n server.serial = output[1][1]\n server.typemodel = output[1][2]\n server.oslevel = output[1][3]\n server.vioslevel = output[1][5]\n server.microcode = output[1][4]\n server.snapcompany = CompanyModel.objects.get(id = request.POST['snapcustomer'])\n server.save()\n# server_id = server.id\n snapupdate = SnapModel.objects.get(id=newfile.id)\n snapupdate.snapoutgeneral = output[1][6]\n snapupdate.snapoutent = output[2]\n snapupdate.snapoutvhost = output[3]\n snapupdate.snapoutfcs = output[4]\n snapupdate.snapouthdiskpath = output[5]\n snapupdate.snapoutfilesys = output[6]\n snapupdate.snapouttapes = output[7]\n snapupdate.snapouterrpt = output[8]\n# snapupdate.snapserver = ServerModel.objects.get(id = server_id)\n snapupdate.save()\n# zuncompress(record.snapname.name)\n# return render_to_response('input_result.html', {'result': 'Snap was successfuly uploaded: ',\\\n# 'data_sample': snapupdate.snapdate,\\\n# 'unpack_result': output[0],\\\n# 'general_text': output[1][6],\\\n# 'ent_text': output[2],\\\n# 'vhost_text': output[3],\\\n# 'fcs_text': output[4],\\\n# 'hdisk_text': output[5],\\\n# 'filesystem_test': output[6]},)\n return HttpResponseRedirect('/snaper/diag_show/?servid=%s' % newfile.id)\n else:\n return render_to_response('input_result.html', {'result': '2'})\n else:\n form_snap = SnapUploadForm()\n return render(request, 'main.html', {'form': form_snap}, context_instance=RequestContext(request))\n\ndef customer(request, customer_name):\n return render_to_response('customer.html', {'custname':customer_name, 'path':request.path})\n\ndef display_meta(request):\n values = request.META.items()\n #values.sort()\n html = []\n for k, v in values:\n html.append('%s%s' % (k, v))\n return HttpResponse('%s
' % '\\n'.join(html))\n\ndef search(request):\n return render_to_response('search_form.html')\n\ndef search_req(request):\n if 'q' in request.GET and request.GET['q']:\n q = request.GET['q']\n customers = CompanyModel.objects.filter(name__icontains=q)\n servers = SnapModel.objects.filter(Q(hostname__icontains=q) | Q(serial__icontains=q)).order_by('serial','hostname', '-snapdate')\n contact = ContactModel.objects.filter(contact_name__icontains=q)\n return render_to_response('search_results.html',\n {'customers': customers,\\\n 'contacts': contact,\\\n 'servers' : servers,\\\n 'query': q})\n else:\n return render(request, 'search_results.html')\n\ndef diag_show(request):\n if 'servid' in request.GET and request.GET['servid']:\n serverid = request.GET['servid']\n diag_info = SnapModel.objects.filter(id=serverid)\n return render_to_response('diag_show.html', {'diag_info': diag_info})\n\ndef input_company(request):\n if request.method == 'POST':\n form = CompanyForm(request.POST)\n if form.is_valid():\n if request.POST['cname'] == '':\n return render_to_response('input_result.html', {'result':'3'})\n else:\n Customer_record = CompanyModel.objects.create(name=request.POST['cname'],\n address=request.POST['caddress'],\n city=request.POST['ccity'],\n website=request.POST['cwebsite'])\n message = \"Data was saved sucsessfully.\"\n return render_to_response('input_result.html', {'result': '4'})\n else:\n form = CompanyForm()\n# return render_to_response('input_data.html', {'form': form}, context_instance=RequestContext(request))\n return render(request, 'input_data.html', {'form': form}, context_instance=RequestContext(request))\n\ndef input_customer(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n if request.POST['contname'] == '':\n return render_to_response('input_result.html', {'result':'5'})\n else:\n Contact_record = ContactModel.objects.create(contact_name=request.POST['contname'],\n company=CompanyModel.objects.get(id = request.POST['contcompany']),\n phone=request.POST['contphone'],\n email=request.POST['contemail'])\n message = \"Contact was saved sucsessfully.\"\n return render_to_response('input_result.html', {'result': '4'})\n else:\n form = ContactForm()\n return render(request, 'input_cdata.html', {'form': form}, context_instance=RequestContext(request))\n\ndef help(request):\n return render_to_response('help.html')\n\n","sub_path":"www/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418271883","text":"#!/usr/bin/env python2.6 -ttt\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n\ntest_realdata.py\n\n@author: (c) 2011 Dr. Wolfram Schroers\n\n@summary: Plot and histogram the relative returns of real-world data.\n\n\"\"\"\n\n\n# ============================================================================\n# Preamble\n# ============================================================================\n\nfrom __future__ import print_function, division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nfrom Datasource import *\n\n\n# ============================================================================\n# Executable program section\n# ============================================================================\n\n# Step 1: Read in the real data and get the relative returns.\n\nfh = open('realdata.dat', 'r')\ncontent = fh.readlines()\nfh.close()\nadj_close = np.array([float(x) for x in content])\ndata = TimeSeries(adj_close).relative_returns()\n\n\n# Step 2: Plot the data as a time series.\n\nplt.plot(np.arange(len(data)), data)\nraw_input('Press return ...')\n\n\n# Step 3: Histogram the data.\n\nfig = plt.figure()\nhistgraph = fig.add_subplot(111)\nn, bins, patches = histgraph.hist(data, 50, normed=True)\nprint('Data in bins: ', n)\n\n\n# Step 4: For comparison, add a Gaussian curve.\n\nxvals = np.arange(-0.20, 0.15, 0.001)\nhistgraph.plot(xvals,\n np.vectorize(lambda x:29.*math.exp(-(x/0.02)**2))(xvals),\n '--',\n linewidth=2)\n#fig.savefig('test_realdata.png')\n#fig.show()\nplt.show()\nraw_input('Press return ...')\n\n\n# Step 4: Logarithmic plot of part of the tail.\n\nfig = plt.figure()\nzoomed = fig.add_subplot(111)\nzoomed.semilogy(bins[:28], n[:28],\n marker='+',\n markersize=10)\n\nxvals = np.arange(-0.10, -0.01, 0.001)\nzoomed.semilogy(xvals,\n np.vectorize(lambda x:29.*math.exp(-(x/0.02)**2))(xvals),\n '--',\n linewidth=2)\nfig.savefig('test_realdata-zoom.png')\n#fig.show()\nplt.show()\nraw_input('Press return ...')\n\n\n# ============================================================================\n# End of test_realdata.py\n# ============================================================================\n\n","sub_path":"BastelPython/src/Pycon2011/2011/test_realdata.py","file_name":"test_realdata.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638886294","text":"#! python\nimport sys\nimport math\nimport euler\nimport itertools\n\ndef main():\n digits = '0123456789'\n primes = [2, 3, 5, 7, 11, 13, 17]\n total = 0\n for p in itertools.permutations(digits):\n s = ''.join(p)\n is_div = True\n for i in range(1, len(s) - 2):\n n = int(s[i:i + 3])\n if n % primes[i - 1] != 0:\n is_div = False\n break\n if is_div:\n total += int(s)\n print(total)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"p43.py","file_name":"p43.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511105548","text":"import pygame as pg\n\n\nclass Packet:\n def __init__(self, source, dest, vector, time):\n self.packet = dict()\n self.packet[\"source\"] = source\n self.packet[\"dest\"] = dest\n self.packet[\"vector\"] = vector\n self.packet[\"query\"] = False\n\n self.source = source\n\n self.start_time = time\n\n self.time = source.links[dest]\n\n self.speed = [(dest.x - source.x) / self.time, (dest.y - source.y) / self.time]\n\n def draw(self, screen, time):\n pg.draw.circle(\n screen,\n self.source.color,\n (\n self.source.x + self.speed[0] * (time - self.start_time),\n self.source.y + self.speed[1] * (time - self.start_time),\n ),\n 20,\n 20,\n )\n pg.draw.circle(\n screen,\n (255, 255, 255),\n (\n self.source.x + self.speed[0] * (time - self.start_time),\n self.source.y + self.speed[1] * (time - self.start_time),\n ),\n 20,\n 5,\n )\n","sub_path":"DUAL/Basic/Packet.py","file_name":"Packet.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458967138","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser(description='Quickly find if variants overlap regions of interest')\nparser.add_argument('GFF_file', help='A file in GFF3 format with exons and genes')\nparser.add_argument('chromosome', help='Variant chromosome')\nparser.add_argument('position', help='Variant position')\nargs = parser.parse_args()\n\nchroms = {'1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, '11':11, '12':12, '13':13, '14':14, '15':15, '16':16, '17':17, '18':18, '19':19, '20':20, '21':21, '22':22, 'X':23, 'Y':24, 'MT':25, 'chr1':1, 'chr2':2, 'chr3':3, 'chr4':4, 'chr5':5, 'chr6':6, 'chr7':7, 'chr8':8, 'chr9':9, 'chr10':10, 'chr11':11, 'chr12':12, 'chr13':13, 'chr14':14, 'chr15':15, 'chr16':16, 'chr17':17, 'chr18':18, 'chr19':19, 'chr20':20, 'chr21':21, 'chr22':22, 'chrX':23, 'chrY':24, 'chrM':25}\n\nwith open(args.GFF_file, 'r') as f:\n for line in f:\n if line[0] == '#':\n continue\n line = line.rstrip().split()\n if not line[0] in chroms:\n sys.exit('Chromosome not found')\n if chroms[line[0]] == chroms[args.chromosome]:\n if int(args.position) < int(line[3]):\n sys.exit(0)\n else:\n if int(args.position) < int(line[4]):\n print('\\t'.join(line))\n else:\n pass\n else:\n continue\n","sub_path":"scripts/Don_Scripts/Untracked/find_gene.py","file_name":"find_gene.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72207774","text":"#!/usr/bin/python\n\"\"\"\nDescription: Insers an item into a MariaDB database\n\nRequires: mysql-connector\n\n- transport into mariadb:\n username: root\n password: passwd\n host: localhost\n db_name: test\n sql: ISERT INTO table (field1, field2) VALUES (?)\n values: [ value1, value2 ]\n\"\"\"\nfrom mdatapipe.core.plugin import PipelinePlugin\nimport sys\nimport mysql.connector\n\n\nclass Plugin(PipelinePlugin):\n\n def on_start(self):\n user = self.config.get(\"user\", \"root\")\n password = self.config.get(\"password\", None)\n host = self.config.get(\"host\", \"localhost\")\n database = self.config.get('database', \"test\")\n\n self._conn = mysql.connector.connect(\n user=user,\n password=password,\n host=host,\n database=database,\n )\n\n def on_input(self, item): # NOQA: C901\n sql = self.config['sql']\n command = sql.upper().strip().split()[0]\n values = self.config.get(\"values\", [])\n if not isinstance(values, list): # Convert single values to a list\n values = [values]\n # print(\"SQL:\", self.config['sql'], file=sys.stderr)\n # for v in values:\n # print(\"VAL:\", type(v), v, file=sys.stderr)\n cursor = self._conn.cursor(dictionary=True)\n try:\n cursor.execute(sql, values)\n except (mysql.connector.OperationalError, mysql.connector.ProgrammingError):\n print(\"SQL:\", self.config['sql'], file=sys.stderr)\n print(\"VALUES:\", values, file=sys.stderr)\n raise\n except (mysql.connector.IntegrityError):\n if not self.config.get(\"ignore_integrity_errors\", False):\n raise\n if command == \"SELECT\":\n result_set = cursor.fetchall()\n for result in result_set:\n self.put(result)\n # If delete was sucessfull, just pass the record\n if command in ['INSERT', 'DELETE', \"UPDATE\"]:\n if cursor.rowcount == 1:\n self.put(item)\n elif command != \"SELECT\":\n self.put(item)\n self._conn.commit()\n cursor.close()\n","sub_path":"mdatapipe-old/mdatapipe/plugins/transform/item/mariadb.py","file_name":"mariadb.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528254081","text":"from pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession, Row\nimport csv\n\n# Jamel Peralta Coss\n# 802-13-5870\n# JamelProject BD2\n# Part 1: Hashtag Trending vs Count\n\ndef getSparkSessionInstance(sparkConf):\n if ('sparkSessionInstance' not in globals()):\n globals()['sparkSessionInstance'] = SparkSession.builder.config(conf=sparkConf) \\\n .enableHiveSupport().getOrCreate()\n return globals()['sparkSessionInstance']\n\ndef output():\n spark = getSparkSessionInstance(sc.getConf())\n df = spark.sql(\"use default\")\n\n # Query\n df = spark.sql(\"select hashtag, sum(total) as suma from hashtag_table where timestamp between cast('2018-12-06 20:00:00' as timestamp)- INTERVAL 1 HOUR and cast('2018-12-06 21:00:00' as timestamp) group by hashtag order by suma desc limit 10\")\n df.show()\n\n # Write query into a csv in the hdfs\n df.repartition(1).write.csv(\"/data/jameloutput.csv\")\n\nif __name__ == \"__main__\":\n sc = SparkContext(appName=\"Save CSV\")\n output()\n","sub_path":"sourcecode/genhashtag.py","file_name":"genhashtag.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572711551","text":"import os\n\nfrom flask import jsonify\n\nfrom psycopg2 import OperationalError\nfrom psycopg2.pool import SimpleConnectionPool\n\nPROJECT_ID = os.getenv('POSTGRES_USER', 'panoptes-survey')\nBUCKET_NAME = os.getenv('BUCKET_NAME', 'panoptes-survey')\n\nCONNECTION_NAME = os.getenv(\n 'INSTANCE_CONNECTION_NAME',\n 'panoptes-survey:us-central1:panoptes-meta'\n)\nDB_USER = os.getenv('POSTGRES_USER', 'panoptes')\nDB_PASSWORD = os.getenv('POSTGRES_PASSWORD', None)\nDB_NAME = os.getenv('POSTGRES_DATABASE', 'metadata')\n\npg_config = {\n 'user': DB_USER,\n 'password': DB_PASSWORD,\n 'dbname': DB_NAME\n}\n\n# Connection pools reuse connections between invocations,\n# and handle dropped or expired connections automatically.\npg_pool = None\n\n\n# Entry point\ndef update_state(request):\n \"\"\"Updates the sequence or image state.\n\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response `.\n \"\"\"\n request_json = request.get_json()\n\n sequence_id = request_json.get('sequence_id')\n image_id = request_json.get('image_id')\n state = request_json.get('state')\n\n if sequence_id is None and image_id is None:\n return jsonify(success=False, msg='Need either a sequence_id or an image_id')\n\n if state is None or state == '':\n return jsonify(success=False, msg='State required')\n\n table = 'sequences'\n field = sequence_id\n if sequence_id is None:\n table = 'images'\n field = image_id\n\n field = field.replace('/', '_')\n\n try:\n update_state_call(table, field, state)\n except Exception as e:\n return jsonify(success=False, msg=f'Failed to update state: {e!r}')\n\n return jsonify(success=True, msg=f'Updated {field} to {state}')\n\n\ndef update_state_call(table, field, state):\n \"\"\"Inserts arbitrary key/value pairs into a table.\n\n Args:\n table (str): Table in which to insert.\n\n Returns:\n tuple|None: Returns the inserted row or None.\n \"\"\"\n global pg_pool\n\n # Initialize the pool lazily, in case SQL access isn't needed for this\n # GCF instance. Doing so minimizes the number of active SQL connections,\n # which helps keep your GCF instances under SQL connection limits.\n if not pg_pool:\n try:\n __connect(f'/cloudsql/{CONNECTION_NAME}')\n except OperationalError as e:\n print(e)\n # If production settings fail, use local development ones\n __connect('localhost')\n\n conn = pg_pool.getconn()\n conn.set_isolation_level(0)\n with conn.cursor() as cursor:\n update_sql = f\"UPDATE {table} SET state=%s WHERE id=%s\"\n\n try:\n cursor.execute(update_sql, [state, field])\n cursor.connection.commit()\n print(f'{field} set to state {state}')\n except Exception:\n try:\n print('Updating of state ({field}={state}) failed, rolling back and trying again')\n cursor.connection.rollback()\n cursor.execute(update_sql, [state, field])\n cursor.connection.commit()\n print(f'{field} set to state {state}')\n except Exception as e:\n print(f\"Error in insert (error): {e!r}\")\n print(f\"Error in insert (sql): {update_sql}\")\n return False\n finally:\n cursor.close()\n pg_pool.putconn(conn)\n\n return True\n\n\ndef __connect(host):\n \"\"\"\n Helper function to connect to Postgres\n \"\"\"\n global pg_pool\n pg_config['host'] = host\n pg_pool = SimpleConnectionPool(1, 1, **pg_config)\n","sub_path":"cf-update-state/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591562253","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport unitypack\nfrom PIL import ImageOps\n\n\nSUPPORTED_FORMATS = (\n\t\"AudioClip\",\n\t\"Shader\",\n\t\"TextAsset\",\n\t\"Texture2D\",\n)\n\n\ndef get_output_path(filename):\n\tbasedir = \"out\"\n\tpath = os.path.join(basedir, filename)\n\tdirs = os.path.dirname(path)\n\tif not os.path.exists(dirs):\n\t\tos.makedirs(dirs)\n\treturn path\n\n\ndef write_to_file(filename, contents, mode=\"w\"):\n\tpath = get_output_path(filename)\n\twith open(path, mode) as f:\n\t\twritten = f.write(contents)\n\n\tprint(\"Written %i bytes to %r\" % (written, path))\n\n\ndef handle_asset(asset):\n\tprint(asset)\n\tfor id, obj in asset.objects.items():\n\t\tif obj.type not in SUPPORTED_FORMATS:\n\t\t\tprint(\"Skipping %r\" % (obj))\n\t\t\tcontinue\n\n\t\td = obj.read()\n\n\t\tif obj.type == \"AudioClip\":\n\t\t\twrite_to_file(d.name + \".fsb\", d.data, mode=\"wb\")\n\n\t\telif obj.type == \"Shader\":\n\t\t\twrite_to_file(d.name + \".cg\", d.script)\n\n\t\telif obj.type == \"TextAsset\":\n\t\t\twrite_to_file(d.name + \".txt\", d.script)\n\n\t\telif obj.type == \"Texture2D\":\n\t\t\tprint(\"Decoding %r\" % (d))\n\t\t\timg = ImageOps.flip(d.image)\n\t\t\tpath = get_output_path(d.name + \".png\")\n\t\t\timg.save(path)\n\n\ndef main():\n\tfiles = sys.argv[1:]\n\tfor file in files:\n\t\tif file.endswith(\".assets\"):\n\t\t\twith open(file, \"rb\") as f:\n\t\t\t\tasset = unitypack.Asset.from_file(f)\n\t\t\thandle_asset(asset)\n\t\t\tcontinue\n\n\t\twith open(file, \"rb\") as f:\n\t\t\tbundle = unitypack.load(f)\n\n\t\tfor asset in bundle.assets:\n\t\t\thandle_asset(asset)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370914756","text":"import sys\nimport json\nfrom PyQt5.QtWidgets import (\n QApplication, QMainWindow, QWidget, QFormLayout,\n QHBoxLayout, QLineEdit, QTextEdit, QComboBox, QPushButton,\n QCheckBox, QFileDialog, QAction, QLabel\n)\n\n\nstatic_nest = {\n \"text\": \"A New Story\",\n \"main\": False,\n \"main_img\": \"\",\n \"actor\": \"\",\n \"img\": \"\",\n \"choice_text\": \"\",\n\n \"path\": []\n}\n\n\nclass newTC(QMainWindow):\n def __init__(self, nest):\n super().__init__()\n self.widget = QWidget()\n self.nest = nest\n self.cur_nest = self.nest\n self.cur_path_index = 0\n self.back = []\n\n self.init_ui()\n\n @staticmethod\n def new_line_edit(text=\"\", min_w=100, min_h=10):\n a_line = QLineEdit(text)\n a_line.setMinimumWidth(min_w)\n a_line.setMinimumHeight(min_h)\n return a_line\n\n @staticmethod\n def new_text_edit(text=\"\", min_w=100, min_h=100):\n t_line = QTextEdit(text)\n t_line.setMinimumWidth(min_w)\n t_line.setMinimumHeight(min_h)\n return t_line\n\n @staticmethod\n def new_combo_box(min_w=100, min_h=10):\n cbox = QComboBox()\n cbox.setMinimumWidth(min_w)\n cbox.setMinimumHeight(min_h)\n return cbox\n\n @staticmethod\n def new_button(text=\"\", min_w=20, min_h=5):\n button = QPushButton(text)\n button.setMinimumWidth(min_w)\n button.setMinimumHeight(min_h)\n return button\n\n def json_out(self, file_name):\n f = open(file_name, 'r')\n x = json.load(f)\n f.close()\n self.nest = x\n self.cur_nest = self.nest\n self.cur_path_index = 0\n\n self.choice_reset()\n self.check_box_reset()\n self.actor_reset()\n self.img_dir_reset()\n self.main_img_dir_reset()\n\n @staticmethod\n def json_in(file_name, nest):\n f = open(file_name, 'w')\n json.dump(nest, f)\n f.close()\n\n def init_ui(self):\n f_layout = QFormLayout()\n\n # init menubar\n bar = self.menuBar()\n file_menu = bar.addMenu(\"File\")\n file_menu.addAction(\"New\")\n\n save_act = QAction(\"Save\", self)\n save_act.setShortcut(\"Ctrl+S\")\n file_menu.addAction(save_act)\n\n open_act = QAction(\"Open\", self)\n open_act.setShortcut(\"Ctrl+O\")\n file_menu.addAction(open_act)\n\n # init buttons combobox and lines\n choice_line_label = QLabel('Inside Choice:')\n choice_label = QLabel('A choice to next path:')\n self.choice = self.new_combo_box(200, 20)\n self.text_line = self.new_text_edit()\n self.text_line.setPlaceholderText(\n \"This is the text that will \"\n \"appear when the player have \"\n \"chosen this path.\"\n )\n self.choice_line = self.new_line_edit(\n self.cur_nest['choice_text'], 150, 30\n )\n self.actor_line = self.new_line_edit()\n self.actor_line.setPlaceholderText('Character Name')\n self.main_img_dir_line = self.new_line_edit()\n self.main_img_dir_line.setPlaceholderText(\n 'image dir for main character'\n )\n self.img_dir_line = self.new_line_edit()\n self.img_dir_line.setPlaceholderText(\n 'image dir for a character'\n )\n self.main_chk_box = QCheckBox('Is Main?')\n\n self.browse_img_but = self.new_button('Browse')\n self.browse_main_img_but = self.new_button('Browse')\n self.change_choice_but = self.new_button('Apply Change Choice Text')\n self.next_but = self.new_button('Next Path')\n self.del_but = self.new_button('Delete Path')\n self.to_start_but = self.new_button('To Start')\n self.add_path_but = self.new_button('Add new Choice')\n back_but = self.new_button('Back')\n\n self.choice.setEditable(True)\n self.choice_line.setEnabled(False)\n\n # set formlayout\n f_layout.addRow(choice_line_label, self.choice_line)\n\n f_layout.addRow(self.actor_line)\n hbox = QHBoxLayout()\n hbox.addWidget(self.main_chk_box)\n hbox.addWidget(self.browse_main_img_but)\n hbox.addWidget(self.main_img_dir_line)\n hbox.addWidget(self.browse_img_but)\n hbox.addWidget(self.img_dir_line)\n f_layout.addRow(hbox)\n\n f_layout.addRow(choice_label, self.choice)\n f_layout.addRow(self.change_choice_but)\n\n hbox = QHBoxLayout()\n hbox.addWidget(self.next_but)\n hbox.addWidget(back_but)\n hbox.addWidget(self.del_but)\n hbox.addWidget(self.to_start_but)\n hbox.addWidget(self.add_path_but)\n f_layout.addRow(hbox)\n f_layout.addRow(self.text_line)\n\n # connect functions\n # self.text_line.textChanged.connect(self.change_choice_text)\n self.choice.currentIndexChanged.connect(self.change_choice)\n self.change_choice_but.clicked.connect(self.change_choice_text)\n\n self.del_but.clicked.connect(self.del_path)\n self.next_but.clicked.connect(self.next_path)\n self.to_start_but.clicked.connect(self.to_start)\n self.add_path_but.clicked.connect(self.add_path)\n self.browse_img_but.clicked.connect(self.get_img)\n self.browse_main_img_but.clicked.connect(self.get_main_img)\n back_but.clicked.connect(self.back_path)\n\n save_act.triggered.connect(self.write_present)\n open_act.triggered.connect(self.load_present)\n\n # run functions once\n self.check_box_reset()\n self.actor_reset()\n self.choice_reset()\n self.main_img_dir_reset()\n self.img_dir_reset()\n\n self.widget.setLayout(f_layout)\n self.setCentralWidget(self.widget)\n self.resize(600, 800)\n self.setWindowTitle('stregum')\n self.show()\n\n def check_box_reset(self):\n if self.cur_nest['path']:\n self.main_chk_box.setEnabled(True)\n self.choice.setEnabled(True)\n self.del_but.setEnabled(True)\n self.next_but.setEnabled(True)\n self.to_start_but.setEnabled(True)\n self.browse_img_but.setEnabled(True)\n self.browse_main_img_but.setEnabled(True)\n self.change_choice_but.setEnabled(True)\n index = self.choice.currentIndex()\n if 'main' in self.cur_nest['path'][index]:\n self.main_chk_box.setChecked(\n bool(self.cur_nest['path'][index]['main'])\n )\n else:\n self.main_chk_box.setChecked(False)\n else:\n self.main_chk_box.setEnabled(False)\n self.choice.setEnabled(False)\n self.del_but.setEnabled(False)\n self.next_but.setEnabled(False)\n self.to_start_but.setEnabled(False)\n self.browse_img_but.setEnabled(False)\n self.browse_main_img_but.setEnabled(False)\n self.change_choice_but.setEnabled(False)\n\n def actor_reset(self):\n if self.cur_nest['path']:\n index = self.choice.currentIndex()\n if 'actor' in self.cur_nest['path'][index]:\n self.actor_line.setText(\n self.cur_nest['path'][index]['actor']\n )\n else:\n self.actor_line.setText(\n 'Actor'\n )\n\n def main_img_dir_reset(self):\n if self.cur_nest['path']:\n self.main_img_dir_line.setEnabled(True)\n index = self.choice.currentIndex()\n if 'main_img' in self.cur_nest['path'][index]:\n self.main_img_dir_line.setText(\n self.cur_nest['path'][index]['main_img']\n )\n else:\n self.main_img_dir_line.setText(\n 'img'\n )\n else:\n self.main_img_dir_line.setEnabled(False)\n\n def img_dir_reset(self):\n if self.cur_nest['path']:\n self.img_dir_line.setEnabled(True)\n index = self.choice.currentIndex()\n if 'main_img' in self.cur_nest['path'][index]:\n self.img_dir_line.setText(\n self.cur_nest['path'][index]['main_img']\n )\n else:\n self.img_dir_line.setText(\n 'img'\n )\n else:\n self.img_dir_line.setEnabled(False)\n\n def choice_reset(self):\n self.choice.clear()\n for i in self.cur_nest['path']:\n self.choice.addItem(i['choice_text'])\n\n def change_choice(self):\n sender = self.sender()\n self.text_line.setPlaceholderText(\n \"This is the text that will \"\n \"appear when the player have \"\n \"chosen this path({})\".format(self.choice.currentText())\n )\n index = sender.currentIndex()\n self.cur_path_index = index\n if self.cur_nest['path']:\n self.text_line.setText(self.cur_nest['path'][index]['text'])\n else:\n self.text_line.clear()\n self.check_box_reset()\n self.actor_reset()\n self.img_dir_reset()\n self.main_img_dir_reset()\n\n def change_choice_text(self):\n self.text_line.setPlaceholderText(\n \"This is the text that will \"\n \"appear when the player have \"\n \"chosen this path({})\".format(self.choice.currentText())\n )\n index = self.choice.currentIndex()\n self.choice.setItemText(\n index, self.choice.currentText()\n )\n if self.cur_nest['path']:\n self.cur_nest['path'][index]['main'] = \\\n self.main_chk_box.isChecked()\n self.cur_nest['path'][index]['main_img'] = \\\n self.main_img_dir_line.text()\n self.cur_nest['path'][index]['img'] = \\\n self.img_dir_line.text()\n self.cur_nest['path'][index]['actor'] = \\\n self.actor_line.text()\n self.cur_nest['path'][index]['choice_text'] = \\\n self.choice.currentText()\n self.cur_nest['path'][index]['text'] = \\\n self.text_line.toPlainText()\n else:\n print('Add path first!')\n\n def next_path(self):\n if self.cur_nest['path']:\n self.back.append(self.cur_path_index)\n self.cur_nest = self.cur_nest['path'][self.cur_path_index]\n print(self.cur_nest['choice_text'])\n self.choice_reset()\n self.check_box_reset()\n self.actor_reset()\n self.img_dir_reset()\n self.main_img_dir_reset()\n self.choice_line.setText(self.cur_nest['choice_text'])\n\n def to_start(self):\n self.cur_nest = self.nest\n # self.choice_line.clear()\n self.check_box_reset()\n self.actor_reset()\n self.img_dir_reset()\n self.main_img_dir_reset()\n self.choice_reset()\n\n def add_path(self):\n new_nest = {\n \"text\": \"\",\n \"main\": False,\n \"main_img\": \"\",\n \"actor\": \"\",\n \"img\": \"\",\n \"choice_text\": \"\",\n\n \"path\": []\n }\n self.cur_nest['path'].append(new_nest)\n self.choice_reset()\n self.img_dir_reset()\n self.main_img_dir_reset()\n\n def del_path(self):\n if self.cur_nest['path']:\n index = self.choice.currentIndex()\n self.cur_nest['path'].pop(index)\n self.choice_reset()\n\n def back_path(self):\n if self.back:\n self.cur_nest = self.nest\n for n in self.back:\n self.cur_nest = self.cur_nest['path'][n]\n self.check_box_reset()\n self.actor_reset()\n self.img_dir_reset()\n self.main_img_dir_reset()\n self.choice_reset()\n self.back.pop()\n\n def get_img(self):\n dir_path = QFileDialog.getOpenFileName(\n self, \"Choose Image Directory\", \"D:\\\\\"\n )\n if dir_path[0]:\n self.img_dir_line.setText(dir_path[0])\n print(dir_path)\n\n def get_main_img(self):\n dir_path = QFileDialog.getOpenFileName(\n self, \"Choose Image Directory\", \"D:\\\\\"\n )\n if dir_path[0]:\n self.main_img_dir_line.setText(dir_path[0])\n print(dir_path)\n\n def write_present(self):\n dir_path = QFileDialog.getSaveFileName(\n self, \"Choose Image Directory\", \"D:\\\\\", \"Json files (*.json)\"\n )\n if dir_path[0]:\n self.json_in(dir_path[0], self.nest)\n print(dir_path)\n\n def load_present(self):\n dir_path = QFileDialog.getOpenFileName(\n self, \"Choose Image Directory\", \"D:\\\\\", \"Json files (*.json)\"\n )\n if dir_path[0]:\n self.json_out(dir_path[0])\n print(dir_path)\n\n\ndef main():\n app = QApplication(sys.argv)\n tc = newTC(static_nest)\n print(tc.isActiveWindow())\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Message Simulator v0.1/build_present.py","file_name":"build_present.py","file_ext":"py","file_size_in_byte":12937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521316524","text":"\"\"\"\nGeneric bodies classes\nThese classes will be used for more advanced classes:\n - new and old YAMS body classes for Sympy\n - YAMS body for numerical yams\n\"\"\"\nfrom welib.yams.utils import translateInertiaMatrixToCOG, translateInertiaMatrixFromCOG\nfrom welib.yams.utils import rigidBodyMassMatrix \nfrom welib.yams.utils import R_x, R_y, R_z\nfrom welib.yams.flexibility import GMBeam, GKBeam, GKBeamStiffnening, GeneralizedMCK_PolyBeam\n# from welib.yams.utils import skew\n\n\n__all__ = ['Body','InertialBody','RigidBody','FlexibleBody']\n\n# --- For harmony with sympy\nimport numpy as np\nfrom numpy import eye, cross, cos ,sin\ndef Matrix(m):\n return np.asarray(m)\ndef zeros(m,n):\n return np.zeros((m,n))\n\n# --------------------------------------------------------------------------------}\n# --- Generic Body \n# --------------------------------------------------------------------------------{\nclass Body(object):\n \"\"\"\n Base class for rigid bodies and flexible bodies\n \"\"\"\n def __init__(self, name='', r_O=[0,0,0], R_b2g=np.eye(3)):\n self.name = name\n self._r_O = np.asarray(r_O).ravel()\n self._R_b2g = np.asarray(R_b2g)\n\n self._mass=None\n self.MM = None # To be defined by children\n\n def __repr__(B):\n s=':\\n'.format(type(self).__name__)\n return s\n\n @property\n def Mass(self):\n raise Exception('`Mass` is an old interface, use `mass` instead')\n\n @property\n def mass(self):\n return self._mass\n\n @property \n def pos_global(self):\n \"\"\" Position of origin in global coordinates \"\"\"\n return self._r_O\n\n @pos_global.setter\n def pos_global(self, r_O):\n self._r_O = np.asarray(r_O).ravel()\n \n @property\n def R_b2g(self):\n \"\"\" Transformation matrix from body to global \"\"\"\n return self._R_b2g\n\n @R_b2g.setter\n def R_b2g(self, R_b2g):\n self._R_b2g = R_b2g\n\n @property\n def R_g2b(self):\n \"\"\" Transformation matrix from global to body \"\"\"\n return self._R_b2g.transpose() \n\n def pos_local(self, x_gl):\n \"\"\" return position vector from origin of body, in body coordinates, of a point in global \"\"\"\n return self.R_g2b.dot(x_gl - self._r_O)\n\n\n# --------------------------------------------------------------------------------}\n# --- Ground Body \n# --------------------------------------------------------------------------------{\nclass InertialBody(Body):\n def __init__(self):\n Body.__init__(self, name='Grd')\n\n# --------------------------------------------------------------------------------}\n# --- Rigid Body \n# --------------------------------------------------------------------------------{\nclass RigidBody(Body):\n def __init__(self, name, mass, J, s_OG, r_O=[0,0,0], R_b2g=np.eye(3), s_OP=None):\n \"\"\"\n Creates a rigid body \n\n INPUTS:\n - name: name of body (string)\n - mass: body mass (float)\n - J: inertia tensor (array-like) in body frame, defined either at:\n - center of mass G, located at s_OG from the body origin\n - OR point P, located at s_OP from the body origin\n J may be defined as:\n - a 3x3 matrix\n - a 3-vector (Jxx, Jyy, Jzz) representing the diagonal values\n - a 6-vector (Jxx, Jyy, Jzz, Jxy, Jyz, Jzx) representing the diagonal values\n - s_OG: vector from body origin to body COG in body frame \n\n - s_OP: vector from body origin to point where inertia is defined,\n in body frame\n (only if inertia is not defined at COG).\n - r_O: vector from global origin to body origin, in global coordinates\n - R_b2g : transformation matrix from body to gobal coordinates\n\n \"\"\"\n Body.__init__(self, name, r_O=r_O, R_b2g=R_b2g)\n self._mass = mass\n self._s_OG = np.asarray(s_OG).ravel()\n\n # Ensuring a 3x3 inertia matrix\n J = np.asarray(J)\n Jflat=J.ravel()\n if len(Jflat)==3:\n J = np.diag(Jflat)\n elif len(Jflat)==6:\n J = np.diag(Jflat[:3])\n J[0,1]=J[1,0]=Jflat[3]\n J[1,2]=J[2,1]=Jflat[4]\n J[1,3]=J[3,1]=Jflat[5]\n \n # inertia at COG\n if s_OP is not None:\n s_PG= self._s_OG - s_OP\n self._J_G = translateInertiaMatrixToCOG(J, mass, s_PG)\n else:\n self._J_G = J\n\n def shiftOrigin(self, s_OOnew):\n \"\"\" change body origin\n s_OOnew: vector from old origin to new origin\n \"\"\"\n s_OnewG = -np.asarray(s_OOnew) + self._s_OG\n self._s_OG = s_OnewG\n\n # --------------------------------------------------------------------------------\n # --- Inertia\n # --------------------------------------------------------------------------------\n @property \n def masscenter(self):\n \"\"\" Position of mass center in body frame\"\"\"\n return self._s_OG\n\n @property\n def masscenter_pos_global(self):\n \"\"\" return masscenter position from inertial frame \"\"\"\n try:\n return self._r_O + self.R_b2g.dot(self._s_OG)\n except:\n import pdb; pdb.set_trace()\n\n @property \n def inertia(self):\n return self.inertia_at([0,0,0])\n\n @property \n def masscenter_inertia(self):\n \"\"\" Returns inertia matrix at COG in body frame\"\"\"\n return self._J_G\n\n def inertia_at(self, s_OP, R_f2g=None):\n \"\"\" returns body inertia at a given point, and given frame (default body frame)\n INPUTS:\n - s_OP: point coordinates from body origin in body coordinates\n - R_f2g: transformation matrix from a given frame when inertia is wanted to global\n \"\"\"\n # \n s_GP = np.asarray(s_OP) - self._s_OG\n J = translateInertiaMatrixFromCOG(self._J_G, self.mass, s_GP)\n if R_f2g is not None:\n R_b2f = np.dot(R_f2g.T, self.R_b2g)\n J = R_b2f.dot(J).dot(R_b2f.T)\n return J\n\n @property\n def mass_matrix(self):\n \"\"\" Body mass matrix at origin\"\"\"\n return rigidBodyMassMatrix(self.mass, self.inertia, self._s_OG) # TODO change interface\n\n def mass_matrix_at(self, s_OP):\n \"\"\" Body mass matrix at a given point\"\"\"\n J = self.inertia_at(s_OP)\n s_PG = -np.asarray(s_OP)+ self._s_OG\n return rigidBodyMassMatrix(self.mass, J, s_PG) # TODO change interface\n\n def __repr__(self):\n s='<{} {} object>:\\n'.format(type(self).__name__, self.name)\n s+=' * pos_global: {} (origin)\\n'.format(np.around(self.pos_global,6))\n s+=' * masscenter: {} (body frame)\\n'.format(np.around(self.masscenter,6))\n s+=' * masscenter_pos_global: {} \\n'.format(np.around(self.masscenter_pos_global,6))\n s+=' - mass: {}\\n'.format(self.mass)\n s+=' * R_b2g: \\n {}\\n'.format(self.R_b2g)\n s+=' * masscenter_inertia: \\n{}\\n'.format(np.around(self.masscenter_inertia,6))\n s+=' * inertia: (at origin)\\n{}\\n'.format(np.around(self.inertia,6))\n s+='Useful getters: inertia_at, mass_matrix\\n'\n return s\n\n def combine(self, other, name=None, R_b2g=np.eye(3), r_O=None):\n \"\"\" Combine two rigid bodies and form a new rigid body\n \n \"\"\"\n M = self.mass + other.mass\n x_G = (self.mass * self.masscenter_pos_global + other.mass * other.masscenter_pos_global)/M\n\n if name is None:\n name=self.name + other.name\n\n # Inertias in new body frame and at new COG\n s_O1_G = self.pos_local(x_G)\n s_O2_G = other.pos_local(x_G)\n J1 = self.inertia_at(s_O1_G, R_b2g)\n J2 = other.inertia_at(s_O2_G, R_b2g)\n #print('s_O1_G ',s_O1_G)\n #print('s_O2_G ',s_O2_G)\n #print('J1\\n ',J1)\n #print('J2\\n ',J2)\n #print('J12\\n ',J1+J2)\n\n if r_O is None:\n # Putting origin of new body at COG of common body\n r_O = x_G\n s_OG = [0,0,0]\n else:\n s_OG = (R_b2g.T).dot(x_G-r_O)\n return RigidBody(name, M, J1+J2, s_OG, r_O=r_O, R_b2g=R_b2g)\n\n\n# --------------------------------------------------------------------------------}\n# --- Flexible Body \n# --------------------------------------------------------------------------------{\nclass FlexibleBody(Body):\n def __init__(self, name, \n r_O=[0,0,0], R_b2g=np.eye(3) # Position and orientation in global\n ):\n \"\"\"\n Creates a Flexible body \n \"\"\"\n Body.__init__(self, name, r_O=r_O, R_b2g=R_b2g)\n\n# --------------------------------------------------------------------------------}\n# --- Beam Body \n# --------------------------------------------------------------------------------{\nclass BeamBody(FlexibleBody):\n def __init__(self, name, s_span, s_P0, m, EI, PhiU, PhiV, PhiK, jxxG=None, s_G0=None, \n s_min=None, s_max=None,\n r_O=[0,0,0], R_b2g=np.eye(3), # Position and orientation in global\n damp_zeta=None, RayleighCoeff=None, DampMat=None,\n bAxialCorr=False, bOrth=False, Mtop=0, Omega=0, bStiffening=True, gravity=None, main_axis='z', massExpected=None):\n \"\"\"\n Creates a Flexible Beam body \n Points P0 - Undeformed mean line of the body\n \"\"\"\n FlexibleBody.__init__(self, name, r_O=r_O, R_b2g=R_b2g)\n self.main_axis = main_axis\n self.s_span = s_span\n if s_min is None:\n self.s_min = np.min(s_span)\n else:\n self.s_min = s_min\n if s_max is None:\n self.s_max = np.max(s_span)\n else:\n self.s_max = s_max\n self.m = m\n self.s_G0 = s_G0\n self.PhiU = PhiU\n self.PhiV = PhiV\n self.PhiK = PhiK\n self.jxxG = jxxG\n self.s_P0 = s_P0\n self.EI = EI\n if jxxG is None:\n self.jxxG = 0*m\n if self.s_G0 is None:\n self.s_G0=self.s_P0\n \n self.s_G = self.s_G0\n self.bAxialCorr = bAxialCorr\n self.bOrth = bOrth\n self.bStiffening= bStiffening\n self.Mtop = Mtop\n self.Omega = Omega # rad/s\n self.gravity = gravity\n\n self.damp_zeta = damp_zeta\n self.RayleighCoeff = RayleighCoeff\n self.DampMat = DampMat\n\n if massExpected is not None:\n self.computeMassMatrix()\n Mass = self.MM[0,0]\n factor = Mass/massExpected\n if np.abs(factor-1)>1e-5:\n print('>>>BeamBody: Scaling mass distribution with factor {:.4f} in order to get a desired mass of {}'.format(factor,massExpected))\n self.m /= factor\n\n self.computeMassMatrix()\n self.computeStiffnessMatrix()\n self.computeDampingMatrix(damp_zeta)\n\n # TODO\n #self.V0 = np.zeros((3,self.nSpan))\n #self.K0 = np.zeros((3,self.nSpan))\n #self.rho_G0_inS = np.zeros((3,self.nSpan)) # location of COG in each cross section\n #[o.PhiV,o.PhiK] = fBeamSlopeCurvature(o.s_span,o.PhiU,o.PhiV,o.PhiK,1e-2);\n #[o.V0,o.K0] = fBeamSlopeCurvature(o.s_span,o.s_P0,o.V0,o.K0,1e-2) ;\n\n def toRigidBody(self):\n \"\"\" Create a rigid body from a flexible body \"\"\"\n return RigidBody(self.name+'_rigid', self.mass, self.masscenter_inertia, self.masscenter, r_O=self.pos_global, R_b2g=self.R_b2g)\n\n def computeStiffnessMatrix(B, Mtop=None, Omega=None):\n B.KK0 = GKBeam(B.s_span, B.EI, B.PhiK, bOrth=B.bOrth)\n\n if Mtop is not None:\n B.Mtop=Mtop\n if Omega is not None:\n B.Omega=Omega\n\n if B.bStiffening:\n B.KKg_self = GKBeamStiffnening(B.s_span, B.PhiV, B.gravity, B.m, B.Mtop, B.Omega, main_axis=B.main_axis, bSelfWeight=True, bMtop=False, bRot=False)\n B.KKg_Mtop = GKBeamStiffnening(B.s_span, B.PhiV, B.gravity, B.m, B.Mtop, B.Omega, main_axis=B.main_axis, bSelfWeight=False, bMtop=True , bRot=False)\n B.KKg_rot = GKBeamStiffnening(B.s_span, B.PhiV, B.gravity, B.m, B.Mtop, B.Omega, main_axis=B.main_axis, bSelfWeight=False, bMtop=False, bRot=True)\n B.KKg = B.KKg_self+B.KKg_Mtop+B.KKg_rot\n else:\n B.KKg = B.KK0*0\n B.KKg_self = B.KK0*0\n B.KKg_Mtop = B.KK0*0\n B.KKg_rot = B.KK0*0\n\n B.KK=B.KK0+B.KKg\n if len(np.isnan(B.KK))>0:\n #print('>>> WARNING, some stiffness matrix values are nan, replacing with 0')\n B.KK[np.isnan(B.KK)]=0\n\n def computeDampingMatrix(self, damp_zeta=None):\n self.DD = np.zeros((6+self.nf,6+self.nf))\n if damp_zeta is None:\n return\n for j,zeta in enumerate(damp_zeta):\n gm = self.MM[6+j,6+j]\n gk = self.KK[6+j,6+j]\n om = np.sqrt(gk/gm)\n xi = zeta*2*np.pi\n c = xi * gm * om / np.pi\n self.DD[6+j,6+j] = c\n\n if len(np.isnan(self.DD))>0:\n #print('>>> WARNING, some damping matrix values are nan, replacing with 0')\n self.DD[np.isnan(self.DD)]=0\n\n\n @property \n def start_pos(self):\n \"\"\" start of body wrt origin \"\"\"\n return self.s_P0[:,0]\n @property \n def end_pos(self):\n \"\"\" end of body wrt origin \"\"\"\n return self.s_P0[:,-1]\n # --------------------------------------------------------------------------------}\n # --- Inertia \n # --------------------------------------------------------------------------------{\n @property \n def mass(self):\n \"\"\" Body mass\"\"\"\n return self.MM[0,0]\n\n @property \n def masscenter(self):\n \"\"\" Position of mass center in body frame\"\"\"\n if self.mass>0:\n return np.trapz(self.m*self.s_G0,self.s_span)/self.mass\n else:\n return np.array([0,0,0])\n\n @property\n def masscenter_pos_global(self):\n \"\"\" return masscenter position from inertial frame \"\"\"\n return self._r_O + self.R_b2g.dot(self.masscenter)\n\n @property \n def inertia(self):\n \"\"\" Returns inertia matrix at Origin in body frame\"\"\"\n return self.MM[3:6,3:6]\n\n @property \n def masscenter_inertia(self):\n \"\"\" Returns inertia matrix at COG in body frame. \n NOTE: this is approximate for flexible bodies\n \"\"\"\n return translateInertiaMatrixToCOG(self.inertia, self.mass, self.masscenter)\n\n def inertia_at(self, s_OP, R_f2g=None):\n \"\"\" returns body inertia at a given point, and given frame (default body frame)\n NOTE: this is approximate for flexible bodies\n INPUTS:\n - s_OP: point coordinates from body origin in body coordinates\n - R_f2g: transformation matrix from a given frame when inertia is wanted to global\n \"\"\"\n # \n s_GP = np.asarray(s_OP) - self.masscenter\n J = translateInertiaMatrixFromCOG(self.masscenter_inertia, self.mass, s_GP)\n if R_f2g is not None:\n R_b2f = np.dot(R_f2g.T, self.R_b2g)\n J = R_b2f.dot(J).dot(R_b2f.T)\n return J\n\n @property\n def mass_matrix(self):\n \"\"\" Body mass matrix at origin\"\"\"\n return self.MM\n\n def mass_matrix_at(self, s_OP):\n \"\"\" Body mass matrix at a ginve point\"\"\"\n J = self.inertia_at(s_OP)\n s_PG = -np.asarray(s_OP)+ self._s_OG\n return rigidBodyMassMatrix(self.mass, J, s_PG) # TODO change interface\n\n def computeMassMatrix(B):\n B.MM, B.Gr, B.Ge, B.Oe, B.Oe6 = GMBeam(B.s_G, B.s_span, B.m, B.PhiU, jxxG=B.jxxG, bUseIW=True, main_axis=B.main_axis, bAxialCorr=B.bAxialCorr, bOrth=B.bOrth, rot_terms=True)\n if len(np.isnan(B.MM))>0:\n #print('>>> WARNING, some mass matrix values are nan, replacing with 0')\n B.MM[np.isnan(B.MM)]=0\n\n @property\n def length(B):\n return B.s_max-B.s_min\n\n @property\n def nSpan(B):\n return len(B.s_span)\n\n @property\n def nf(B):\n return len(B.PhiU)\n\n @property\n def Bhat_x_bc(self,iNode=-1):\n Bhat_x_bc = Matrix(np.zeros((3,self.nf)))\n for j in np.arange(self.nf):\n Bhat_x_bc[:,j]=self.PhiU[j][:,iNode] # along x\n return Bhat_x_bc\n\n @property\n def Bhat_t_bc(self,iNode=-1):\n \"\"\" unit \"alpha\" couplings \"\"\"\n Bhat_t_bc = Matrix(np.zeros((3,self.nf)))\n for j in np.arange(self.nf):\n if self.main_axis=='x':\n Bhat_t_bc[0,j]=0 # torsion\n Bhat_t_bc[1,j]=-self.PhiV[j][2,iNode]\n Bhat_t_bc[2,j]= self.PhiV[j][1,iNode]\n elif self.main_axis=='z':\n Bhat_t_bc[0,j]=-self.PhiV[j][1,iNode]\n Bhat_t_bc[1,j]= self.PhiV[j][0,iNode]\n Bhat_t_bc[2,j]= 0 # torsion\n return Bhat_t_bc\n\n def __repr__(self):\n s='<{} {} object>:\\n'.format(type(self).__name__, self.name)\n s+=' * pos_global: {} (origin)\\n'.format(np.around(self.pos_global,6))\n s+=' * masscenter: {} (body frame)\\n'.format(np.around(self.masscenter,6))\n s+=' * masscenter_pos_global: {} \\n'.format(np.around(self.masscenter_pos_global,6))\n s+=' - mass: {}\\n'.format(self.mass)\n s+=' * length: {}\\n'.format(self.length)\n s+=' * R_b2g: \\n {}\\n'.format(self.R_b2g)\n s+=' * masscenter_inertia: \\n{}\\n'.format(np.around(self.masscenter_inertia,6))\n s+=' * inertia: (at origin)\\n{}\\n'.format(np.around(self.inertia,6))\n s+=' - Properties: s_span, m, EI, Mtop, PhiU, PhiV, PhiW\\n'\n s+=' MM, KK, KK0, KKg, KKg_Mtop, KKg_self\\n'\n s+='Usefull getters: inertia_at, mass_matrix_at, toRigidBody \\n'\n return s\n\n# --------------------------------------------------------------------------------}\n# --- FAST Beam body \n# --------------------------------------------------------------------------------{\nclass FASTBeamBody(BeamBody):\n def __init__(self, ED, inp, Mtop=0, shapes=None, main_axis='z', nSpan=None, bAxialCorr=False, bStiffening=True, jxxG=None, Omega=0,\n spanFrom0=False,\n massExpected=None,\n algo=''):\n \"\"\" \n INPUTS:\n ED: ElastoDyn inputs as read from weio\n inp: blade or tower file, as read by weio\n Mtop: top mass if any\n nSpan: number of spanwise station used (interpolated from input)\n Use -1 or None to use number of stations from input file\n \"\"\"\n damp_zeta = None\n RayleighCoeff = None\n DampMat = None\n # --- Reading properties, coefficients\n exp = np.arange(2,7)\n if 'BldProp' in inp.keys():\n # --- Blade\n name = 'bld'\n shapeBase = ['BldFl1','BldFl2','BldEdg']\n if shapes is None:\n shapes=[0,1,2]\n coeff = np.zeros((len(exp), len(shapes)))\n for iishape, ishape in enumerate(shapes):\n base=shapeBase[ishape]\n coeff[0, iishape] = inp[base+'Sh(2)']\n coeff[1, iishape] = inp[base+'Sh(3)']\n coeff[2, iishape] = inp[base+'Sh(4)']\n coeff[3, iishape] = inp[base+'Sh(5)']\n coeff[4, iishape] = inp[base+'Sh(6)']\n damp_zeta = np.array([ inp['BldFlDmp(1)'], inp['BldFlDmp(2)'], inp['BldEdDmp(1)']])/100\n damp_zeta=damp_zeta[shapes]\n mass_fact = inp['AdjBlMs'] # Factor to adjust blade mass density (-)\n prop = inp['BldProp'] \n s_bar, m, EIFlp, EIEdg =prop[:,0], prop[:,3], prop[:,4], prop[:,5]\n\n # TODO we need two or three options with better naming\n if spanFrom0:\n s_span=s_bar*(ED['TipRad']-ED['HubRad']) + ED['HubRad'] # NOTE: span starting at HubRad\n if np.abs(s_span[0])<1e-6:\n pass \n else:\n # We add two positions with zero before\n s_span = np.concatenate(([0,s_span[0]*0.99],s_span))\n m = np.concatenate(([0,0],m))\n EIFlp = np.concatenate(([0,0],EIFlp))\n EIEdg = np.concatenate(([0,0],EIEdg))\n #s_span=s_bar*ED['TipRad'] # NOTE: this is a wrong scaling\n else:\n s_span=s_bar*(ED['TipRad']-ED['HubRad']) + ED['HubRad'] # NOTE: span starting at HubRad\n r_O = [0,0,0] # NOTE: blade defined wrt point R for now\n #print(s_span)\n\n\n\n psi_B= 0\n if main_axis=='x':\n R_SB = R_z(0*np.pi + psi_B)\n elif main_axis=='z':\n R_SB = R_x(0*np.pi + psi_B)\n R_SB = np.dot(R_SB, R_y(ED['PreCone(1)']*np.pi/180)) # Blade 2 shaft\n R_b2g= R_SB\n\n elif 'TowProp' in inp.keys():\n # --- Tower\n name = 'twr'\n shapeBase = ['TwFAM1','TwFAM2','TwSSM1','TwSSM2']\n if shapes is None:\n shapes=[0,1,2,3]\n coeff = np.zeros((len(exp), len(shapes)))\n for iishape, ishape in enumerate(shapes):\n base=shapeBase[ishape]\n coeff[0, iishape] = inp[base+'Sh(2)']\n coeff[1, iishape] = inp[base+'Sh(3)']\n coeff[2, iishape] = inp[base+'Sh(4)']\n coeff[3, iishape] = inp[base+'Sh(5)']\n coeff[4, iishape] = inp[base+'Sh(6)']\n damp_zeta = np.array([inp['TwrFADmp(1)'], inp['TwrFADmp(2)'], inp['TwrSSDmp(1)'], inp['TwrSSDmp(2)']])/100 # structural damping ratio \n damp_zeta=damp_zeta[shapes]\n\n mass_fact = inp['AdjTwMa'] # Factor to adjust tower mass density (-)\n prop = inp['TowProp']\n span_max = ED['TowerHt']-ED['TowerBsHt']\n s_bar, m, EIFlp, EIEdg = prop[:,0], prop[:,1], prop[:,2], prop[:,3]\n r_O = [0,0,ED['TowerBsHt']]\n R_b2g=np.eye(3)\n s_span=s_bar*span_max\n\n elif 'SttcSolve' in inp.keys():\n import welib.FEM.fem_beam as femb\n # --- Tower\n name = 'fnd'\n # --- Option 1 - Read data from SubDyn\n # Read SubDyn file\n # Convert to \"welib.fem.Graph\" class to easily handle the model (overkill for a monopile)\n graph = inp.toGraph()\n graph.divideElements(inp['NDiv'])\n graph.sortNodesBy('z')\n df = graph.nodalDataFrame()\n x = df['z'].values # NOTE: FEM uses \"x\" as main axis\n if np.any(df['y']!=0): \n raise NotImplementedError('FASTBeamBody for substructure only support monopile, structure not fully vertical in file: {}'.format(inp.filename))\n if np.any(df['x']!=0): \n raise NotImplementedError('FASTBeamBody for substructure only support monopile, structure not fully vertical in file: {}'.format(inp.filename))\n D = df['D'].values # Diameter [m]\n t = df['t'].values # thickness [m]\n E = df['E'].values # Young modules [N/m^2]\n G = df['G'].values # Shear modules [N/m^2]\n rho = df['rho'].values # material density [kg/m^3]\n # NOTE: interpolate to nSpan to get uniform spacing\n nSpan = len(D)\n xOld = x\n x = np.linspace(np.min(x),np.max(x), nSpan)\n D = np.interp(x, xOld, D)\n t = np.interp(x, xOld, t)\n E = np.interp(x, xOld, E)\n G = np.interp(x, xOld, G)\n rho = np.interp(x, xOld, rho)\n\n # Derive section properties for a hollow cylinder based on diameter and thickness\n A = np.pi*( (D/2)**2 - (D/2-t)**2) # Area for annulus [m^2] \n I = np.pi/64*(D**4-(D-2*t)**4) # Second moment of area for annulus (m^4)\n Kt = I # Torsion constant, same as I for annulus [m^4]\n Ip = 2*I # Polar second moment of area [m^4]\n L = np.max(x)-np.min(x) # Monopile length\n m=rho*A\n\n # --- Compute FEM model and mode shapes\n FEM=femb.cbeam(x,m=m,EIx=E*Ip,EIy=E*I,EIz=E*I,EA=E*A,A=A,E=E,G=G,Kt=Kt,\n element='frame3d', BC='clamped-free', M_tip=None)\n\n # --- Perform Craig-Bampton reduction, fixing the top node of the beam\n Q_G,_Q_CB, df_G, df_CB, Modes_G, Modes_CB, CB = femb.CB_topNode(FEM, nCB=0, element='frame3d', main_axis='x') # TODO main_axis\n # TODO TODO finda way to use these matrices instead of the ones computed with flexibility\n #print('CB MM\\n',CB['MM'])\n #print('CB KK\\n',CB['KK'])\n if main_axis=='x':\n raise NotImplementedError('')\n else:\n pass\n # we need to swap the CB modes\n nShapes=len(shapes)\n PhiU = np.zeros((nShapes,3,nSpan)) # Shape\n PhiV = np.zeros((nShapes,3,nSpan)) # Shape\n PhiK = np.zeros((nShapes,3,nSpan)) # Shape\n dx=np.unique(np.around(np.diff(x),4))\n if len(dx)>1:\n print(x)\n print(dx)\n raise NotImplementedError()\n from welib.mesh.gradient import gradient_regular\n for iShape, idShape in enumerate(shapes):\n if idShape==0:\n # shape 0 \"ux\" (uz in FEM)\n PhiU[iShape][0,:] = df_G['G3_uz'].values\n PhiV[iShape][0,:] =-df_G['G3_ty'].values\n PhiK[iShape][0,:] = gradient_regular(PhiV[iShape][0,:],dx=dx[0],order=4)\n elif idShape==1:\n # shape 1, \"uy\"\n PhiU[iShape][1,:] = df_G['G2_uy'].values\n PhiV[iShape][1,:] = df_G['G2_tz'].values\n PhiK[iShape][1,:] = gradient_regular(PhiV[iShape][1,:],dx=dx[0],order=4)\n elif idShape==4:\n # shape 4, \"vy\" (vz in FEM)\n PhiU[iShape][0,:] = df_G['G6_uy'].values\n PhiV[iShape][0,:] = df_G['G6_tz'].values\n PhiK[iShape][0,:] = gradient_regular(PhiV[iShape][0,:],dx=dx[0],order=4)\n else:\n raise NotImplementedError()\n \n\n\n p=dict()\n p['s_span']=x-np.min(x)\n p['s_P0']=np.zeros((3,nSpan))\n if main_axis=='z':\n p['s_P0'][2,:]=x-np.min(x)\n r_O = (df['x'].values[0], df['y'].values[0], df['z'].values[0])\n R_b2g = np.eye(3)\n print('r_O',r_O)\n p['m']=m\n p['EI']=np.zeros((3,nSpan))\n if main_axis=='z':\n p['EI'][0,:]=E*I\n p['EI'][1,:]=E*I\n p['jxxG']=rho*Ip # TODO verify\n p['s_min']=p['s_span'][0]\n p['s_max']=p['s_span'][-1]\n p['PhiU']=PhiU\n p['PhiV']=PhiV\n p['PhiK']=PhiK\n if inp['GuyanDampMod']==1:\n # Rayleigh Damping\n RayleighCoeff=inp['RayleighDamp']\n #if RayleighCoeff[0]==0:\n # damp_zeta=omega*RayleighCoeff[1]/2. \n elif inp['GuyanDampMod']==2:\n # Full matrix\n DampMat = inp['GuyanDampMatrix']\n DampMat=DampMat[np.ix_(shapes,shapes)]\n else:\n print(inp.keys())\n raise Exception('Body type not supported, key `BldProp`, `TowProp`, or `SttcSolve` not found in file')\n\n gravity=ED['Gravity']\n\n if name in ['twr','bld']:\n m *= mass_fact\n p = GeneralizedMCK_PolyBeam(s_span, m, EIFlp, EIEdg, coeff, exp, damp_zeta, jxxG=jxxG, \n gravity=gravity, Mtop=Mtop, Omega=Omega, nSpan=nSpan, bAxialCorr=bAxialCorr, bStiffening=bStiffening, main_axis=main_axis, shapes=shapes, algo=algo)\n elif name in ['fnd']:\n pass\n\n else:\n raise NotImplementedError()\n\n # TODO TODO sort out span for Blades and HubRad \n\n BeamBody.__init__(self, name, p['s_span'], p['s_P0'], p['m'], p['EI'], p['PhiU'], p['PhiV'], p['PhiK'], jxxG=p['jxxG'], \n s_min=p['s_min'], s_max=p['s_max'],\n r_O = r_O, R_b2g=R_b2g, # NOTE: this is lost in YAMS\n damp_zeta=damp_zeta, RayleighCoeff=RayleighCoeff, DampMat=DampMat,\n bAxialCorr=bAxialCorr, bOrth=name=='bld', gravity=gravity, Mtop=Mtop, Omega=Omega, bStiffening=bStiffening, main_axis=main_axis,\n massExpected=massExpected\n )\n","sub_path":"welib/yams/bodies.py","file_name":"bodies.py","file_ext":"py","file_size_in_byte":28726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"51286350","text":"#随机抽取优惠券\r\nimport random\r\na=random.randint(0,2)\r\nzheshu=[\"机械革命9折\",\"卫龙辣条2折\",\"老干妈1折\"]\r\na=zheshu[a]\r\nb=a\r\nprint(\"恭喜您选中:\",a,\"优惠券\")\r\n\r\n# 1.准备商品\r\nshop = [\r\n [\"机械革命\",9000], # shop[chose][1]\r\n [\"Think pad\",4500],\r\n [\"Mac book pro\",12000],\r\n [\"洗衣坟\",20],\r\n [\"西瓜\",2],\r\n [\"老干妈\",15],\r\n [\"卫龙辣条\",3.5]\r\n]\r\n\r\n\r\n# 2.准备足够的钱\r\nmoney = input(\"请输入初始余额:\")\r\nmoney = int(money) # \"5\" --> 5\r\n# 3.准备空的购物车\r\nmycart = []\r\ndanjia = 0\r\ndanjia = float(danjia)\r\n\r\n# 4.开始购物:\r\nwhile True: # 死循环\r\n # 展示商品\r\n for key ,value in enumerate(shop):\r\n print(key,value)\r\n\r\n # 输入\r\n chose = input(\"请输入您想要的商品编号:\")\r\n if chose.isdigit():# \"5\" --> 5\r\n chose = int(chose)\r\n # 商品是否存在\r\n if chose > len(shop): # len()\r\n print(\"对不起,您输入商品不存在!\")\r\n else:\r\n # 金钱是否足够\r\n if b==\"机械革命9折\":\r\n\r\n if chose == 0:\r\n if money < 8100:\r\n print(\"穷鬼,钱不够!\")\r\n else:\r\n mycart.append(shop[chose])\r\n money = money-8100\r\n\r\n else:\r\n mycart.append(shop[chose])\r\n money = money - shop[chose][1]\r\n\r\n elif b==\"卫龙辣条2折\":\r\n\r\n if chose == 6:\r\n if money < 0.7:\r\n print(\"穷鬼,钱不够!\")\r\n else:\r\n mycart.append(shop[chose])\r\n money = money-0.7\r\n\r\n else:\r\n mycart.append(shop[chose])\r\n money = money - shop[chose][1]\r\n\r\n\r\n elif b==\"老干妈1折\":\r\n\r\n if chose == 5:\r\n if money < 3:\r\n print(\"穷鬼,钱不够!\")\r\n else:\r\n mycart.append(shop[chose])\r\n money = money - 3\r\n\r\n else:\r\n mycart.append(shop[chose])\r\n money = money - shop[chose][1]\r\n\r\n print(\"恭喜您,成功添加购物车!,您的余额还剩:\", money, \"¥\")\r\n\r\n elif chose == 'q' or chose == 'Q':\r\n print(\"欢迎下次光临!\")\r\n break\r\n else:\r\n print(\"对不起,输入非法,请重新输入!\")\r\n\r\n# 打印购物小条\r\nprint(\"以下是您的购物小票,请拿好:\")\r\nprint(\"--------------山海城----------------------\")\r\nfor key ,value in enumerate(mycart):\r\n print(key,value)\r\n\r\nprint(\"您的钱包余额还剩:\",money,\"¥\")\r\nprint(\"-----------欢迎下次光临---------------------\")","sub_path":"购物.py","file_name":"购物.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648454561","text":"import numpy as np\n #how to calculate macros per day: https://www.healthline.com/nutrition/how-to-count-macros#step-by-step\ndef Cal(height, weight, age, gender, fitness_goal, activity_level):\n # age = raw_input(\"Please insert your age( 3 < y <100 ):\");\n # weight = raw_input(\"Please insert your weight(kg):\");\n # height = raw_input(\"Please insert your height(cm):\"); height should be count in [cm]\n # gender = raw_input(\"Please insert your gender(male/female):\");\n # activity_level = raw_input(\"Please insert your activity level(Sedentary/LowActive/Active/VeryActvie/Extra_active):\");\n Calories = 0\n\n if activity_level == 'Sedentary':\n PA = 1.2\n elif activity_level == 'LowActive':\n PA = 1.375\n elif activity_level == 'Active':\n PA = 1.55\n elif activity_level == 'High_active':\n PA = 1.725\n else:\n print ('unresolved activity level')\n\n if gender == 'male':\n if (age >= 20) and (age <= 60):\n Calories = PA * (10 * float(weight) + 6.25 * float(height) - 5 * float(age) + 5)\n else:\n print ('unresolved age value')\n elif gender == 'female':\n if (age >= 20) and (age <= 60):\n Calories = PA * (10 * float(weight) + 6.25 * float(height) - 5 * float(age) - 161)\n else:\n print ('unresolved age value')\n\n #weight factor for different fitness goals\n if fitness_goal == 'Lose Weight':\n Calories = Calories * 0.8\n elif fitness_goal == 'Keep Fit':\n Calories = Calories\n elif fitness_goal == 'Build Muscle':\n Calories = Calories * 1.2\n else:\n print('unresolved fitness_goal')\n\n return Calories\n\n\n #vitamins :https://health.gov/dietaryguidelines/2015-scientific-report/15-appendix-E3/e3-1-a4.asp\n #calories : Carb: 4 calories/g Protein: 4 calories/g Fat: 9 calories/g Alcohol 7 calories/g\n #g_of_carb = g_of_protein = g_of_fat = 0\ndef nutrition(height, weight, age, gender, fitness_goal, activity_level):\n real_age = age\n Calories = Cal(height, weight, age, gender, fitness_goal, activity_level)\n Calories = Calories / 3 # divide into three meals\n if fitness_goal == 'Lose Weight':\n # 45% protein 40% cab 15% fat\n prot_cal = Calories * 0.45\n cab_cal = Calories * 0.4 \n fat_cal = Calories * 0.15\n\n\n elif fitness_goal == 'Keep Fit':\n prot_cal = Calories * 0.4\n cab_cal = Calories * 0.4 \n fat_cal = Calories * 0.2\n elif fitness_goal == 'Build Muscle':\n prot_cal = Calories * 0.4\n cab_cal = Calories * 0.5\n fat_cal = Calories * 0.2\n\n prot_weight = prot_cal / 4\n cab_weight = cab_cal / 4\n fat_weight = fat_cal / 9 * 2.5\n Fiber_weight = prot_weight * 0.3\n\n Macronutrients = {'Protein,g':str(prot_weight), 'Protein,kcal': str(prot_cal), 'Carbonhydrate,g': str(cab_weight),\n 'Carbonhydrate,kcal': str(cab_cal), 'DietaryFiber,g': str(Fiber_weight), 'AddedSugars,kcal': '<10%',\n 'TotalFat,kcal': str(fat_weight), 'SaturatedFat,kcal': '<10%', 'LinoleicAcid,g': '7',\n 'LinolenicAcid,g': '0.7'}\n\n if gender == 'male':\n if real_age > 3 and real_age <= 8:\n Minerals = {'Calcium,mg': '700', 'Iron,mg': '7', 'Magnesium,mg': '80', 'Phosphorus,mg': '460',\n 'Potassium,mg': '3000', 'Sodium,mg': '1500', 'Zinc,mg': '3', 'Copper,mcg': '340',\n 'Manganese,mg': '1.2', 'Selenium,mcg': '20'}\n Vitamins = {'Vitamin A, mg RAE': '300', 'Vitamin E,mg AT': '6', 'Vitamin D, IU': '600', 'Vitamin C, mg': '15',\n 'Thiamin, mg': '0.5', 'Riboflavin, mg': '0.5', 'Niacin, mg': '6', 'Vitamin B6, mg': '0.5',\n 'Vitamin B12, mcg': '0.9', 'Choline, mg': '200', 'Vitamin K, mcg': '30', 'Folate, mcg DFE': '150'}\n elif real_age > 8 and real_age <= 13:\n Minerals = {'Calcium,mg': '1300', 'Iron,mg': '8', 'Magnesium,mg': '240', 'Phosphorus,mg': '1250',\n 'Potassium,mg': '4500', 'Sodium,mg': '2200', 'Zinc,mg': '8', 'Copper,mcg': '700', 'Manganese,mg': '1.9',\n 'Selenium,mcg': '40'}\n Vitamins = {'Vitamin A, mg RAE': '600', 'Vitamin E,mg AT': '11', 'Vitamin D, IU': '600', 'Vitamin C, mg': '45',\n 'Thiamin, mg': '0.9', 'Riboflavin, mg': '0.9', 'Niacin, mg': '12', 'Vitamin B6, mg': '1',\n 'Vitamin B12, mcg': '1.8', 'Choline, mg': '250', 'Vitamin K, mcg': '55', 'Folate, mcg DFE': '200'}\n elif real_age > 13 and real_age <= 18:\n Minerals = {'Calcium,mg': '1300', 'Iron,mg': '11', 'Magnesium,mg': '410', 'Phosphorus,mg': '1250',\n 'Potassium,mg': '4700', 'Sodium,mg': '2300', 'Zinc,mg': '11', 'Copper,mcg': '890', 'Manganese,mg': '2.2',\n 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '900', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600', 'Vitamin C, mg': '75',\n 'Thiamin, mg': '1.2', 'Riboflavin, mg': '1.3', 'Niacin, mg': '16', 'Vitamin B6, mg': '1.3',\n 'Vitamin B12, mcg': '2.4', 'Choline, mg': '550', 'Vitamin K, mcg': '75', 'Folate, mcg DFE': '400'}\n elif real_age > 18 and real_age <= 30:\n Minerals = {'Calcium,mg': '1000', 'Iron,mg': '8', 'Magnesium,mg': '400', 'Phosphorus,mg': '700', 'Potassium,mg': '4700',\n 'Sodium,mg': '2300', 'Zinc,mg': '11', 'Copper,mcg': '900', 'Manganese,mg': '2.3', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '900', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600', 'Vitamin C, mg': '90',\n 'Thiamin, mg': '1.2', 'Riboflavin, mg': '1.3', 'Niacin, mg': '16', 'Vitamin B6, mg': '1.3',\n 'Vitamin B12, mcg': '2.4', 'Choline, mg': '550', 'Vitamin K, mcg': '120', 'Folate, mcg DFE': '400'}\n elif real_age > 30 and real_age <= 50:\n Minerals = {'Calcium,mg': '1000', 'Iron,mg': '8', 'Magnesium,mg': '420', 'Phosphorus,mg': '700', 'Potassium,mg': '4700',\n 'Sodium,mg': '2300', 'Zinc,mg': '11', 'Copper,mcg': '900', 'Manganese,mg': '2.3', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '900', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600', 'Vitamin C, mg': '90',\n 'Thiamin, mg': '1.2', 'Riboflavin, mg': '1.3', 'Niacin, mg': '16', 'Vitamin B6, mg': '1.3',\n 'Vitamin B12, mcg': '2.4', 'Choline, mg': '550', 'Vitamin K, mcg': '120', 'Folate, mcg DFE': '400'}\n elif real_age > 50:\n Minerals = {'Calcium,mg': '1000', 'Iron,mg': '8', 'Magnesium,mg': '420', 'Phosphorus,mg': '700', 'Potassium,mg': '4700',\n 'Sodium,mg': '2300', 'Zinc,mg': '11', 'Copper,mcg': '900', 'Manganese,mg': '2.3', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '900', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600', 'Vitamin C, mg': '90',\n 'Thiamin, mg': '1.2', 'Riboflavin, mg': '1.3', 'Niacin, mg': '16', 'Vitamin B6, mg': '1.7',\n 'Vitamin B12, mcg': '2.4', 'Choline, mg': '550', 'Vitamin K, mcg': '120', 'Folate, mcg DFE': '400'}\n else:\n print ('error')\n if gender == 'female':\n if real_age > 3 and real_age <= 8:\n Minerals = {'Calcium,mg': '1000', 'Iron,mg': '10', 'Magnesium,mg': '130', 'Phosphorus,mg': '500',\n 'Potassium,mg': '3800', 'Sodium,mg': '1900', 'Zinc,mg': '5', 'Copper,mcg': '440',\n 'Manganese,mg': '1.5', 'Selenium,mcg': '30'}\n Vitamins = {'Vitamin A, mg RAE': '400', 'Vitamin E,mg AT': '7', 'Vitamin D, IU': '600',\n 'Vitamin C, mg': '25', 'Thiamin, mg': '0.6', 'Riboflavin, mg': '0.6',\n 'Niacin, mg': '8', 'Vitamin B6, mg': '0.6', 'Vitamin B12, mcg': '1.2',\n 'Choline, mg': '250', 'Vitamin K, mcg': '55', 'Folate, mcg DFE': '200'}\n elif real_age > 8 and real_age <= 13:\n Minerals = {'Calcium,mg': '1300', 'Iron,mg': '8', 'Magnesium,mg': '240', 'Phosphorus,mg': '1250',\n 'Potassium,mg': '4500', 'Sodium,mg': '2200', 'Zinc,mg': '8', 'Copper,mcg': '700',\n 'Manganese,mg': '1.6', 'Selenium,mcg': '40'}\n Vitamins = {'Vitamin A, mg RAE': '600', 'Vitamin E,mg AT': '11', 'Vitamin D, IU': '600',\n 'Vitamin C, mg': '45', 'Thiamin, mg': '0.9', 'Riboflavin, mg': '0.9',\n 'Niacin, mg': '12', 'Vitamin B6, mg': '1', 'Vitamin B12, mcg': '1.8',\n 'Choline, mg': '375', 'Vitamin K, mcg': '60', 'Folate, mcg DFE': '300'}\n elif real_age > 13 and real_age <= 18:\n Minerals = {'Calcium,mg': '1300', 'Iron,mg': '15', 'Magnesium,mg': '360', 'Phosphorus,mg': '1250',\n 'Potassium,mg': '4700', 'Sodium,mg': '2300', 'Zinc,mg': '9', 'Copper,mcg': '890',\n 'Manganese,mg': '1.6', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '700', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600',\n 'Vitamin C, mg': '65', 'Thiamin, mg': '1', 'Riboflavin, mg': '1',\n 'Niacin, mg': '14', 'Vitamin B6, mg': '1.2', 'Vitamin B12, mcg': '2.4',\n 'Choline, mg': '400', 'Vitamin K, mcg': '75', 'Folate, mcg DFE': '400'}\n elif real_age > 18 and real_age <= 30:\n Minerals = {'Calcium,mg': '1000', 'Iron,mg': '18', 'Magnesium,mg': '310', 'Phosphorus,mg': '700',\n 'Potassium,mg': '4700', 'Sodium,mg': '2300', 'Zinc,mg': '8', 'Copper,mcg': '900',\n 'Manganese,mg': '1.8', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '700', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600',\n 'Vitamin C, mg': '75', 'Thiamin, mg': '1.1', 'Riboflavin, mg': '1.1',\n 'Niacin, mg': '14', 'Vitamin B6, mg': '1.3', 'Vitamin B12, mcg': '2.4',\n 'Choline, mg': '425', 'Vitamin K, mcg': '90', 'Folate, mcg DFE': '400'}\n elif real_age > 30 and real_age <= 50:\n Minerals = {'Calcium,mg': '1000', 'Iron,mg': '8', 'Magnesium,mg': '320', 'Phosphorus,mg': '700',\n 'Potassium,mg': '4700', 'Sodium,mg': '2300', 'Zinc,mg': '8', 'Copper,mcg': '900',\n 'Manganese,mg': '1.8', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '700', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600',\n 'Vitamin C, mg': '75', 'Thiamin, mg': '1.1', 'Riboflavin, mg': '1.1',\n 'Niacin, mg': '14', 'Vitamin B6, mg': '1.3', 'Vitamin B12, mcg': '2.4',\n 'Choline, mg': '425', 'Vitamin K, mcg': '120', 'Folate, mcg DFE': '400'}\n elif real_age > 50:\n\n Minerals = {'Calcium,mg': '1200', 'Iron,mg': '8', 'Magnesium,mg': '320', 'Phosphorus,mg': '700',\n 'Potassium,mg': '4700', 'Sodium,mg': '2300', 'Zinc,mg': '8', 'Copper,mcg': '900',\n 'Manganese,mg': '1.8', 'Selenium,mcg': '55'}\n Vitamins = {'Vitamin A, mg RAE': '700', 'Vitamin E,mg AT': '15', 'Vitamin D, IU': '600',\n 'Vitamin C, mg': '75', 'Thiamin, mg': '1.1', 'Riboflavin, mg': '1.1',\n 'Niacin, mg': '14', 'Vitamin B6, mg': '1.5', 'Vitamin B12, mcg': '2.4',\n 'Choline, mg': '425', 'Vitamin K, mcg': '90', 'Folate, mcg DFE': '400'}\n else:\n print ('error')\n\n Calories = Calories * 3\n nutrition_list = [Calories, Macronutrients['Protein,g'], \n Macronutrients['Carbonhydrate,g'],Macronutrients['DietaryFiber,g'], \n Macronutrients['TotalFat,kcal'], Minerals['Calcium,mg'], \n Minerals['Iron,mg'], Minerals['Magnesium,mg'],\n Minerals['Phosphorus,mg'], Minerals['Potassium,mg'], Minerals['Sodium,mg'], Minerals['Zinc,mg'],\n Minerals['Manganese,mg'], Minerals['Selenium,mcg'], Vitamins['Vitamin A, mg RAE'], Vitamins['Vitamin E,mg AT'],\n Vitamins['Vitamin C, mg'], Vitamins['Riboflavin, mg'],\n Vitamins['Niacin, mg'], Vitamins['Vitamin B6, mg'], Vitamins['Vitamin B12, mcg'], Vitamins['Choline, mg'],\n Vitamins['Vitamin K, mcg'], Vitamins['Folate, mcg DFE']]\n for i in range(len(nutrition_list)):\n nutrition_list[i]=float(nutrition_list[i])\n #print (nutrition_list)\n\n return nutrition_list\n\n#print(nutrition(175, 55, 24, 'female', 'Build Muscle', 'Active'))","sub_path":"reciperecommend/Nutrition_Extraction.py","file_name":"Nutrition_Extraction.py","file_ext":"py","file_size_in_byte":12689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272442567","text":"from bs4 import BeautifulSoup\nimport requests\n\nsettings.configure()\n\n\ndef craigslist_request_content():\n # get response from craigslixt software dev jobs board\n response = requests.get('https://portland.craigslist.org/search/sof')\n print('anything')\n return response.content\n\ndef scrape_craigslist():\n # scrape the result for the list of objects\n soup = BeautifulSoup(craigslist_request_content(), 'html.parser')\n result_rows = soup.find_all('p', class_='result-info')\n titles = []\n for row in result_rows:\n titles += row.find('a', class_='result-title').contents\n\n return titles\nscrape_craigslist()\n","sub_path":"just_scraping_by/scrape_o_matic/craigslist_scraper.py","file_name":"craigslist_scraper.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532763909","text":"import argparse\nfrom datetime import datetime\nfrom mpr.env import get_db_connstr\nfrom mpr.db import DB, Property\nfrom mpr.extractor import MyDwellworksPropertiesExtractor\n\n\ndef populate():\n db = DB.singleton()\n\n extractor = MyDwellworksPropertiesExtractor()\n raw_props = extractor.extract_properties()\n\n with db.session() as sess:\n for raw_prop in raw_props:\n raw_pp = raw_prop['property']\n prop = Property(\n name=raw_pp['display_name'],\n description=raw_pp['description'],\n address=raw_pp['address'],\n location='POINT(%f %f)' % (raw_pp['longitude'], raw_pp['latitude']),\n rent=raw_prop['rent'],\n parking_fee=raw_pp['parking_fee'],\n size=float(raw_pp['size'].split(';')[1].strip()),\n typ=raw_pp['type'],\n included_utilities=','.join(raw_pp['included_utilities'] or ''),\n excluded_utilities=','.join(raw_pp['excluded_utilities'] or ''),\n archived=raw_prop['archived'],\n date_available=datetime.strptime(raw_prop['date_available'], '%Y-%m-%d'),\n )\n sess.add(prop)\n\n\ndef query():\n db = DB.singleton()\n\n with db.session() as sess:\n q = sess.query(Property).order_by(Property.location)\n for prop in q:\n print(prop.location)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(required=True, dest='command')\n subparsers.add_parser('populate')\n subparsers.add_parser('query')\n args = parser.parse_args()\n command = args.command\n if command == 'populate':\n populate()\n elif command == 'query':\n query()\n else:\n exit(1)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"11781802","text":"import os\nfrom blockchain import util\nutil.TIMEOUT = 60\n\ngraph_mode = False\nnum_features = 30 # !!!!!!! MAKE SURE CORRECT\nnum_graph_features = 14\narray_size = 500\ntest_addresses = 20000 # full dataset size\naddresses_per_block = 250 # 500 addresses are taken per block for the control set\n\nlast_line = 21\nresume = False\n\ntest_set = 0.1\ntraining_set = 0.9\nlast_block = 426541\nfirst_block = 321421\nsatoshi = 100000000\n\napi_code = '87653cbb-53be-4995-be42-9a015941757b' # Arun\n#api_code = '0b3fcd09-789a-4694-ae81-a1dd584d691d' # Kieran\n\naddr_colour = [0, 153, 00] # green\nout_addr_colour = [255, 128, 0] # orange\nin_addr_colour = [70, 0, 130] # Indigo\nco_in_addr_colour = [204, 153, 255] # Light Indigo\nco_out_addr_colour = [255, 178, 102] # Light Orange\n\nout_tx_colour = [0, 0, 0]\nin_tx_colour = [255, 255, 255]\nco_in_tx_colour = [255, 255, 255]\nco_out_tx_colour = [0, 0, 0]\n\ntx_out_colour = [0, 0, 0]\ntx_in_colour = [255, 255, 255]\n\n\ngraph_features = [5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 26, 27, 28, 29]\n\nfeature_dict = {\"received\": 0,\n \"received_usd\": 1,\n \"sent\": 2,\n \"sent_usd\": 3,\n \"balance\": 4,\n \"num_tx\": 5, # graph feature\n \"num_change_tx\": 6, # graph feature\n \"num_sending_tx\": 7, # graph feature\n \"num_receiving_tx\": 8, # graph feature\n \"CB_tx\": 9,\n \"num_blocks\": 10,\n \"first_block\": 11,\n \"uniq_input_addr\": 12, # graph feature\n \"uniq_output_addr\": 13, # graph feature\n \"interesting_inputs\": 14, # graph feature\n \"interesting_outputs\": 15, # graph feature\n \"coinput_addresses\": 16, # graph feature\n \"cooutput_addresses\": 17, # graph feature\n \"avg_sent_by_in_addr\": 18,\n \"avg_sent_by_co_in_addr\": 19,\n \"avg_sent_by_out_addr\": 20,\n \"avg_sent_by_co_out_addr\": 21,\n \"avg_received_by_in_addr\": 22,\n \"avg_received_by_co_in_addr\": 23,\n \"avg_received_by_out_addr\": 24,\n \"avg_received_by_co_out_addr\": 25,\n \"avg_num_tx_by_in_addr\": 26, # graph feature\n \"avg_num_tx_by_out_addr\": 27, # graph feature\n \"avg_num_tx_by_co_in_addr\": 28, # graph feature\n \"avg_num_tx_by_co_out_addr\": 29} # graph feature\n\nfeature_list = [\"received\", # 0\n \"received_usd\", # 1\n \"sent\", # 2\n \"sent_usd\", # 3\n \"balance\", # 4\n \"num_tx\", # 5 # graph feature\n \"num_change_tx\", # 6 # graph\n \"num_sending_tx\", # 7\n \"num_receiving_tx\", # 8\n \"CB_tx\", # 9 # graph feature\n \"num_blocks\", # 10\n \"first_block\", # 11\n \"uniq_input_addr\", # 12 # graph feature\n \"uniq_output_addr\", # 13 # graph feature\n \"interesting_inputs\", # 14 # graph feature\n \"interesting_outputs\", # 15 # graph feature\n \"coinput_addresses\", # 16 # graph feature\n \"cooutput_addresses\", # 17 # graph feature\n \"avg_sent_by_in_addr\", # 18\n \"avg_sent_by_co_in_addr\", # 19\n \"avg_sent_by_out_addr\", # 20\n \"avg_sent_by_co_out_addr\", # 21\n \"avg_received_by_in_addr\", # 22\n \"avg_received_by_co_in_addr\", # 23\n \"avg_received_by_out_addr\", # 24\n \"avg_received_by_co_out_addr\", # 25\n \"avg_num_tx_by_in_addr\", # 26 # graph feature\n \"avg_num_tx_by_out_addr\", # 27 # graph feature\n \"avg_num_tx_by_co_in_addr\", # 28\n \"avg_num_tx_by_co_out_addr\"] # 29 # graph feature\n\n\ngraph_feature_list = [feature_list[x] for x in graph_features]\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346346533","text":"import requests\nimport json\n\ndef getjson(loc,page_num=0):\n url = 'http://api.map.baidu.com/place/v2/search?'\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3650.400 QQBrowser/10.4.3341.400'}\n pa = {'q':'公园',\n 'region': loc,\n 'scope':'2',\n 'page_size':'20',\n 'output':'json',\n 'ak':'1eHRqys109MfLs4P3oA22qKXieLdKfh3',}\n r = requests.get(url,params=pa,headers=headers)\n decodejson = json.loads(r.text)\n return decodejson\n\ntext = getjson('广州市')\n\n\nfor i in text['results']:\n print(i['name'])\n print(i['address'])\n\n#每个账号一天只有2000次的调用限额,如果进行了认证一天就会有10万次的调用限额\n\n'''\n参数 是否必须 默认值 示例 含义\nQ 是 无 饭店、公园 检索关键字\nRegion 否 无 北京市、全国 检索区域(市级以上行政区域)\nScope 是 1 1、2 检索结果详细程度。若取值为1或空,\n 返回基本信息:若取值为2,则返回检索\n POI详细信息\npage_size 是 10 10-20 返回的数据,默认为10条记录,最大\n 返回20条\npage_num 否 0 0、1、2 分页页码,默认为0,0代表第一页,1代\n 表第二页,以此类推。\nOutput 否 xml xlm、json 输出格式为json或xml\nAK 是 无 你的密匙 用户否访问密匙,必填项\n'''\n#文档地址:https://lbsyun.baidu.com/index.php?title=webapi/guide/webservice-placeapi\n\n\n\n\n\n\n\n\n\n","sub_path":"第七章Api的使用/获取饭店和公园的地点api.py","file_name":"获取饭店和公园的地点api.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556639628","text":"import torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom torch.autograd import Variable\nfrom torchvision import models\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom cfg.config import cfg\nfrom GLAttention import GLAttentionGeneral as ATT_NET\n\n\nclass GLU(nn.Module):\n def __init__(self):\n super(GLU, self).__init__()\n\n def forward(self, x):\n nc = x.size(1)\n assert nc % 2 == 0, 'channels dont divide 2!'\n nc = int(nc / 2)\n return x[:, :nc] * F.sigmoid(x[:, nc:])\n\n\ndef conv1x1(in_planes, out_planes, bias=False):\n \"1x1 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,\n padding=0, bias=bias)\n\n\ndef conv3x3(in_planes, out_planes):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n\n\n# Upsale the spatial size by a factor of 2\ndef upBlock(in_planes, out_planes):\n block = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n conv3x3(in_planes, out_planes * 2),\n nn.BatchNorm2d(out_planes * 2),\n GLU())\n return block\n\n\n# Keep the spatial size\ndef Block3x3_relu(in_planes, out_planes):\n block = nn.Sequential(\n conv3x3(in_planes, out_planes * 2),\n nn.BatchNorm2d(out_planes * 2),\n GLU())\n return block\n\n\nclass ResBlock(nn.Module):\n def __init__(self, channel_num):\n # 32 * 2 = 64\n super(ResBlock, self).__init__()\n self.block = nn.Sequential(\n conv3x3(channel_num, channel_num * 2), # 64 -> 128\n nn.BatchNorm2d(channel_num * 2), #\n GLU(), # 64\n conv3x3(channel_num, channel_num), # 64\n nn.BatchNorm2d(channel_num))\n\n def forward(self, x):\n residual = x\n out = self.block(x)\n out += residual\n return out\n\n\n# ############## Text2Image Encoder-Decoder #######\nclass RNN_ENCODER(nn.Module):\n def __init__(self, ntoken, ninput=300, drop_prob=0.5,\n nhidden=128, nlayers=1, bidirectional=True):\n super(RNN_ENCODER, self).__init__()\n self.n_steps = cfg.TEXT.WORDS_NUM\n self.ntoken = ntoken # size of the dictionary\n self.ninput = ninput # size of each embedding vector\n self.drop_prob = drop_prob # probability of an element to be zeroed\n self.nlayers = nlayers # Number of recurrent layers\n self.bidirectional = bidirectional\n self.rnn_type = cfg.RNN_TYPE\n if bidirectional:\n self.num_directions = 2\n else:\n self.num_directions = 1\n # number of features in the hidden state\n self.nhidden = nhidden // self.num_directions\n\n self.define_module()\n self.init_weights()\n\n def define_module(self):\n self.encoder = nn.Embedding(self.ntoken, self.ninput)\n self.drop = nn.Dropout(self.drop_prob)\n if self.rnn_type == 'LSTM':\n # dropout: If non-zero, introduces a dropout layer on\n # the outputs of each RNN layer except the last layer\n self.rnn = nn.LSTM(self.ninput, self.nhidden,\n self.nlayers, batch_first=True,\n dropout=self.drop_prob,\n bidirectional=self.bidirectional)\n elif self.rnn_type == 'GRU':\n self.rnn = nn.GRU(self.ninput, self.nhidden,\n self.nlayers, batch_first=True,\n dropout=self.drop_prob,\n bidirectional=self.bidirectional)\n else:\n raise NotImplementedError\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n # Do not need to initialize RNN parameters, which have been initialized\n # http://pytorch.org/docs/master/_modules/torch/nn/modules/rnn.html#LSTM\n # self.decoder.weight.data.uniform_(-initrange, initrange)\n # self.decoder.bias.data.fill_(0)\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n if self.rnn_type == 'LSTM':\n return (Variable(weight.new(self.nlayers * self.num_directions,\n bsz, self.nhidden).zero_()),\n Variable(weight.new(self.nlayers * self.num_directions,\n bsz, self.nhidden).zero_()))\n else:\n return Variable(weight.new(self.nlayers * self.num_directions,\n bsz, self.nhidden).zero_())\n\n def forward(self, captions, cap_lens, hidden, mask=None):\n # input: torch.LongTensor of size batch x n_steps\n # --> emb: batch x n_steps x ninput\n emb = self.drop(self.encoder(captions))\n #\n # Returns: a PackedSequence object\n cap_lens = cap_lens.data.tolist()\n emb = pack_padded_sequence(emb, cap_lens, batch_first=True)\n # #hidden and memory (num_layers * num_directions, batch, hidden_size):\n # tensor containing the initial hidden state for each element in batch.\n # #output (batch, seq_len, hidden_size * num_directions)\n # #or a PackedSequence object:\n # tensor containing output features (h_t) from the last layer of RNN\n output, hidden = self.rnn(emb, hidden)\n # PackedSequence object\n # --> (batch, seq_len, hidden_size * num_directions)\n output = pad_packed_sequence(output, batch_first=True)[0]\n # output = self.drop(output)\n # --> batch x hidden_size*num_directions x seq_len\n words_emb = output.transpose(1, 2)\n # --> batch x num_directions*hidden_size\n if self.rnn_type == 'LSTM':\n sent_emb = hidden[0].transpose(0, 1).contiguous()\n else:\n sent_emb = hidden.transpose(0, 1).contiguous()\n sent_emb = sent_emb.view(-1, self.nhidden * self.num_directions)\n return words_emb, sent_emb\n\n\nclass CNN_ENCODER(nn.Module):\n def __init__(self, nef):\n super(CNN_ENCODER, self).__init__()\n if cfg.TRAIN.FLAG:\n self.nef = nef\n else:\n self.nef = 256 # define a uniform ranker\n\n model = models.inception_v3()\n url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'\n model.load_state_dict(model_zoo.load_url(url))\n for param in model.parameters():\n param.requires_grad = False\n print('Load pretrained model from ', url)\n # print(model)\n\n self.define_module(model)\n self.init_trainable_weights()\n\n def define_module(self, model):\n self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3\n self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3\n self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3\n self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1\n self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3\n self.Mixed_5b = model.Mixed_5b\n self.Mixed_5c = model.Mixed_5c\n self.Mixed_5d = model.Mixed_5d\n self.Mixed_6a = model.Mixed_6a\n self.Mixed_6b = model.Mixed_6b\n self.Mixed_6c = model.Mixed_6c\n self.Mixed_6d = model.Mixed_6d\n self.Mixed_6e = model.Mixed_6e\n self.Mixed_7a = model.Mixed_7a\n self.Mixed_7b = model.Mixed_7b\n self.Mixed_7c = model.Mixed_7c\n\n self.emb_features = conv1x1(768, self.nef)\n self.emb_cnn_code = nn.Linear(2048, self.nef)\n\n def init_trainable_weights(self):\n initrange = 0.1\n self.emb_features.weight.data.uniform_(-initrange, initrange)\n self.emb_cnn_code.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, x):\n features = None\n # --> fixed-size input: batch x 3 x 299 x 299\n x = nn.Upsample(size=(299, 299), mode='bilinear')(x)\n # 299 x 299 x 3\n x = self.Conv2d_1a_3x3(x)\n # 149 x 149 x 32\n x = self.Conv2d_2a_3x3(x)\n # 147 x 147 x 32\n x = self.Conv2d_2b_3x3(x)\n # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 73 x 73 x 64\n x = self.Conv2d_3b_1x1(x)\n # 73 x 73 x 80\n x = self.Conv2d_4a_3x3(x)\n # 71 x 71 x 192\n\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 35 x 35 x 192\n x = self.Mixed_5b(x)\n # 35 x 35 x 256\n x = self.Mixed_5c(x)\n # 35 x 35 x 288\n x = self.Mixed_5d(x)\n # 35 x 35 x 288\n\n x = self.Mixed_6a(x)\n # 17 x 17 x 768\n x = self.Mixed_6b(x)\n # 17 x 17 x 768\n x = self.Mixed_6c(x)\n # 17 x 17 x 768\n x = self.Mixed_6d(x)\n # 17 x 17 x 768\n x = self.Mixed_6e(x)\n # 17 x 17 x 768\n\n # image region features\n features = x\n # 17 x 17 x 768\n\n x = self.Mixed_7a(x)\n # 8 x 8 x 1280\n x = self.Mixed_7b(x)\n # 8 x 8 x 2048\n x = self.Mixed_7c(x)\n # 8 x 8 x 2048\n x = F.avg_pool2d(x, kernel_size=8)\n # 1 x 1 x 2048\n # x = F.dropout(x, training=self.training)\n # 1 x 1 x 2048\n x = x.view(x.size(0), -1)\n # 2048\n\n # global image features\n cnn_code = self.emb_cnn_code(x)\n # 512\n if features is not None:\n features = self.emb_features(features)\n return features, cnn_code\n\n\n# ############## G networks ###################\nclass CA_NET(nn.Module):\n # some code is modified from vae examples\n # (https://github.com/pytorch/examples/blob/master/vae/main.py)\n def __init__(self):\n super(CA_NET, self).__init__()\n self.t_dim = cfg.TEXT.EMBEDDING_DIM # 256\n self.c_dim = cfg.GAN.CONDITION_DIM # 100\n self.fc = nn.Linear(self.t_dim, self.c_dim * 4, bias=True) # 256 -> 100 * 4\n self.relu = GLU()\n\n def encode(self, text_embedding):\n x = self.relu(self.fc(text_embedding)) # because input is the sent_emb (16, 256) -> (16, 100 * 4)\n mu = x[:, :self.c_dim] # (16, 100)\n logvar = x[:, self.c_dim:] # (16, 100 - 400)\n return mu, logvar\n\n def reparametrize(self, mu, logvar):\n std = logvar.mul(0.5).exp_() # (16, 300)\n if cfg.CUDA:\n eps = torch.cuda.FloatTensor(std.size()).normal_()\n else:\n eps = torch.FloatTensor(std.size()).normal_()\n eps = Variable(eps)\n return eps.mul(std).add_(\n mu) # shape (16, 300) sample from normal dist -- mul-- (16, 300) -- add -- mu (16, 100)\n\n def forward(self, text_embedding):\n mu, logvar = self.encode(text_embedding)\n c_code = self.reparametrize(mu, logvar)\n return c_code, mu, logvar # c_code (16, 100)\n\n\nclass INIT_STAGE_G(nn.Module):\n def __init__(self, ngf, ncf):\n super(INIT_STAGE_G, self).__init__()\n self.gf_dim = ngf # 32 * 16 the generator feature vector length\n self.in_dim = cfg.GAN.Z_DIM + ncf # ncf = cfg.GAN.CONDITION_DIM = 100 (which is generated from CA net, so it is not the sent_emb size) Z_DIM = 100\n\n self.define_module()\n\n def define_module(self):\n nz, ngf = self.in_dim, self.gf_dim\n self.fc = nn.Sequential(\n nn.Linear(nz, ngf * 4 * 4 * 2, bias=False), # 200 -> (32 * 16) * 4 * 4 * 2\n nn.BatchNorm1d(ngf * 4 * 4 * 2),\n GLU()) # (32 * 16) * 4 * 4 * sigmoid (32 * 16) * 4 * 4 first half * second half\n # output 32 * 16 * 4 * 4\n\n self.upsample1 = upBlock(ngf, ngf // 2)\n self.upsample2 = upBlock(ngf // 2, ngf // 4)\n self.upsample3 = upBlock(ngf // 4, ngf // 8)\n self.upsample4 = upBlock(ngf // 8, ngf // 16)\n\n def forward(self, z_code, c_code):\n \"\"\"\n :param z_code: batch x cfg.GAN.Z_DIM\n :param c_code: batch x cfg.TEXT.EMBEDDING_DIM\n :return: batch x ngf/16 x 64 x 64\n \"\"\"\n c_z_code = torch.cat((c_code, z_code), 1) # 100 + 256\n # state size ngf x 4 x 4 ngf = 32 * 16\n out_code = self.fc(c_z_code) # 32 * 16 * 4 * 4\n out_code = out_code.view(-1, self.gf_dim, 4, 4) # [1, 32 * 16, 4, 4] 1d -> 4d\n\n # state size ngf/2 x 8 x 8\n out_code = self.upsample1(\n out_code) # [1, 32 * 16, 4, 4] -> [1, 32 * 16, 8, 8](upsample) -> [1, 32 * 16, 8, 8](conv3X3) -> [1, 32 * 8, 8, 8](GLU)([:, :nc] is only effective for the second dimension)\n\n # state size ngf/4 x 16 x 16\n out_code = self.upsample2(out_code) # [1, 32 * 8, 8, 8] -> [1, 32 * 4, 16, 16]\n\n # state size ngf/8 x 32 x 32\n out_code32 = self.upsample3(out_code) # [1, 32 * 4, 16, 16] -> [1, 32 * 2, 32, 32]\n\n # state size ngf/16 x 64 x 64\n out_code64 = self.upsample4(out_code32) # [1, 32 * 2, 32, 32] -> [1, 32 * 1, 64, 64]\n\n return out_code64\n\n\n# class NEXT_STAGE_G(nn.Module):\n# def __init__(self, ngf, nef, ncf):\n# super(NEXT_STAGE_G, self).__init__()\n# self.gf_dim = ngf\n# self.ef_dim = nef\n# self.cf_dim = ncf\n# self.num_residual = cfg.GAN.R_NUM\n# self.define_module()\n#\n# def _make_layer(self, block, channel_num):\n# layers = []\n# for i in range(cfg.GAN.R_NUM):\n# layers.append(block(channel_num))\n# return nn.Sequential(*layers)\n#\n# def define_module(self):\n# ngf = self.gf_dim\n# self.att = ATT_NET(ngf, self.ef_dim)\n# self.residual = self._make_layer(ResBlock, ngf * 2)\n# self.upsample = upBlock(ngf * 2, ngf)\n#\n# def forward(self, h_code, c_code, word_embs, mask):\n# \"\"\"\n# h_code1(query): batch x idf x ih x iw (queryL=ihxiw)\n# word_embs(context): batch x cdf x sourceL (sourceL=seq_len)\n# c_code1: batch x idf x queryL\n# att1: batch x sourceL x queryL\n# \"\"\"\n# self.att.applyMask(mask)\n# c_code, att = self.att(h_code, word_embs)\n# h_c_code = torch.cat((h_code, c_code), 1)\n# print('h_c_code:', h_c_code.size()) \\\n# ('h_c_code:', (16, 64, 64, 64))\n# ('h_c_code:', (16, 64, 128, 128))\n# out_code = self.residual(h_c_code)\n#\n# # state size ngf/2 x 2in_size x 2in_size\n# out_code = self.upsample(out_code)\n#\n# return out_code, att\n\n\nclass NEXT_STAGE_G(nn.Module):\n def __init__(self, ngf, nef, ncf):\n super(NEXT_STAGE_G, self).__init__()\n self.gf_dim = ngf # g net feature 32\n self.ef_dim = nef # text embedding size 256\n self.cf_dim = ncf # condition dim 100\n # print(ngf, nef, ncf) (32, 256, 100)\n # (32, 256, 100)\n self.num_residual = cfg.GAN.R_NUM # cfg.GAN.R_NUM = 2\n self.define_module()\n self.conv = conv1x1(ngf * 3, ngf * 2)\n\n def _make_layer(self, block, channel_num):\n # block ResBlock, channel_num 32 * 2\n layers = []\n for i in range(cfg.GAN.R_NUM): # 2\n layers.append(block(channel_num))\n return nn.Sequential(*layers)\n\n def define_module(self):\n ngf = self.gf_dim\n self.att = ATT_NET(ngf, self.ef_dim)\n self.residual = self._make_layer(ResBlock, ngf * 2)\n self.upsample = upBlock(ngf * 2, ngf)\n\n def forward(self, h_code, c_code, word_embs, mask):\n \"\"\"\n h_code1(query): batch x idf x ih x iw (queryL=ihxiw)\n word_embs(context): batch x cdf x sourceL (sourceL=seq_len)\n c_code1: batch x idf x queryL\n att1: batch x sourceL x queryL\n \"\"\"\n # print('========')\n # ((16, 32, 64, 64), (16, 100), (16, 256, 18), (16, 18))\n # print(h_code.size(), c_code.size(), word_embs.size(), mask.size())\n self.att.applyMask(mask)\n # here, a new c_code is generated by self.att() method.\n # weightedContext, weightedSentence, word_attn, sent_vs_att\n c_code, weightedSentence, att, sent_att = self.att(h_code, c_code, word_embs)\n # Then, image feature are concated with a new c_code, they become h_c_code,\n # so, here I can make some change, to concate more items together.\n # which means I need to get more output from line 369, self.att()\n # also, I need to feed more information to calculate the function, and let's see what the new idea will return.\n h_c_code = torch.cat((h_code, c_code), 1)\n # print('h_c_code.size:', h_c_code.size()) # ('h_c_code.size:', (16, 64, 64, 64))\n h_c_sent_code = torch.cat((h_c_code, weightedSentence), 1)\n # print('h_c_sent_code.size:', h_c_sent_code.size())\n # ('h_c_code.size:', (16, 64, 64, 64))\n # ('h_c_sent_code.size:', (16, 96, 64, 64))\n h_c_sent_code = self.conv(h_c_sent_code) # (16, 96, 64, 64) -> (16, 64, 64, 64)\n out_code = self.residual(h_c_sent_code) # (16, 64, 64, 64) -> 2 residual -> (16, 64, 64, 64)\n # print('out_code:', out_code.size())\n # state size ngf/2 x 2in_size x 2in_size\n out_code = self.upsample(\n out_code) # (16, 64, 64, 64) -> (16, 32, 128, 128)(second G net) -> (16, 32, 256, 256)(third G net)\n return out_code, att\n\n\nclass GET_IMAGE_G(nn.Module):\n def __init__(self, ngf):\n super(GET_IMAGE_G, self).__init__()\n self.gf_dim = ngf\n self.img = nn.Sequential(\n conv3x3(ngf, 3),\n nn.Tanh()\n )\n\n def forward(self, h_code):\n out_img = self.img(h_code)\n return out_img\n\n\n# G_NET used in the paper\nclass G_NET(nn.Module):\n def __init__(self):\n super(G_NET, self).__init__()\n ngf = cfg.GAN.GF_DIM\n nef = cfg.TEXT.EMBEDDING_DIM\n ncf = cfg.GAN.CONDITION_DIM\n self.ca_net = CA_NET()\n\n if cfg.TREE.BRANCH_NUM > 0:\n self.h_net1 = INIT_STAGE_G(ngf * 16, ncf) # [batch, 32, 64, 64]\n self.img_net1 = GET_IMAGE_G(ngf) # [batch, 3, 64, 64]\n # gf x 64 x 64\n if cfg.TREE.BRANCH_NUM > 1:\n self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)\n self.img_net2 = GET_IMAGE_G(ngf)\n if cfg.TREE.BRANCH_NUM > 2:\n self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)\n self.img_net3 = GET_IMAGE_G(ngf)\n\n # netG(noise, sent_emb, words_embs, mask)\n def forward(self, z_code, sent_emb, word_embs, mask):\n \"\"\"\n :param z_code: batch x cfg.GAN.Z_DIM\n :param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM\n :param word_embs: batch x cdf x seq_len\n :param mask: batch x seq_len\n :return:\n \"\"\"\n fake_imgs = []\n att_maps = []\n '''this is the Conditioning Augmentation'''\n # print('sent_emb:', sent_emb.size()) #('sent_emb:', (16, 256))\n c_code, mu, logvar = self.ca_net(sent_emb) # (16, 256) -> (16, 100)\n # print('=====')\n # print('first c_code.size():', c_code.size()) #(16, 100)\n # print('=====')\n if cfg.TREE.BRANCH_NUM > 0:\n h_code1 = self.h_net1(z_code, c_code)\n fake_img1 = self.img_net1(h_code1)\n fake_imgs.append(fake_img1)\n if cfg.TREE.BRANCH_NUM > 1:\n h_code2, att1 = \\\n self.h_net2(h_code1, c_code, word_embs, mask)\n fake_img2 = self.img_net2(h_code2)\n fake_imgs.append(fake_img2)\n if att1 is not None:\n att_maps.append(att1)\n if cfg.TREE.BRANCH_NUM > 2:\n h_code3, att2 = \\\n self.h_net3(h_code2, c_code, word_embs, mask)\n fake_img3 = self.img_net3(h_code3)\n fake_imgs.append(fake_img3)\n if att2 is not None:\n att_maps.append(att2)\n\n return fake_imgs, att_maps, mu, logvar\n\n\nclass G_DCGAN(nn.Module):\n def __init__(self):\n super(G_DCGAN, self).__init__()\n ngf = cfg.GAN.GF_DIM\n nef = cfg.TEXT.EMBEDDING_DIM\n ncf = cfg.GAN.CONDITION_DIM\n self.ca_net = CA_NET()\n\n # 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64\n if cfg.TREE.BRANCH_NUM > 0:\n self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)\n # gf x 64 x 64\n if cfg.TREE.BRANCH_NUM > 1:\n self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)\n if cfg.TREE.BRANCH_NUM > 2:\n self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)\n self.img_net = GET_IMAGE_G(ngf)\n\n def forward(self, z_code, sent_emb, word_embs, mask):\n \"\"\"\n :param z_code: batch x cfg.GAN.Z_DIM\n :param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM\n :param word_embs: batch x cdf x seq_len\n :param mask: batch x seq_len\n :return:\n \"\"\"\n att_maps = []\n c_code, mu, logvar = self.ca_net(sent_emb)\n if cfg.TREE.BRANCH_NUM > 0:\n h_code = self.h_net1(z_code, c_code)\n if cfg.TREE.BRANCH_NUM > 1:\n h_code, att1 = self.h_net2(h_code, c_code, word_embs, mask)\n if att1 is not None:\n att_maps.append(att1)\n if cfg.TREE.BRANCH_NUM > 2:\n h_code, att2 = self.h_net3(h_code, c_code, word_embs, mask)\n if att2 is not None:\n att_maps.append(att2)\n\n fake_imgs = self.img_net(h_code)\n return [fake_imgs], att_maps, mu, logvar\n\n\n# ############## D networks ##########################\ndef Block3x3_leakRelu(in_planes, out_planes):\n block = nn.Sequential(\n conv3x3(in_planes, out_planes),\n nn.BatchNorm2d(out_planes),\n nn.LeakyReLU(0.2, inplace=True)\n )\n return block\n\n\n# Downsale the spatial size by a factor of 2\ndef downBlock(in_planes, out_planes):\n block = nn.Sequential(\n nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.LeakyReLU(0.2, inplace=True)\n )\n return block\n\n\n# Downsale the spatial size by a factor of 16\ndef encode_image_by_16times(ndf):\n encode_img = nn.Sequential(\n # --> state size. ndf x in_size/2 x in_size/2\n nn.Conv2d(3, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # --> state size 2ndf x x in_size/4 x in_size/4\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # --> state size 4ndf x in_size/8 x in_size/8\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # --> state size 8ndf x in_size/16 x in_size/16\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True)\n )\n return encode_img\n\n\nclass D_GET_LOGITS(nn.Module):\n def __init__(self, ndf, nef, bcondition=False):\n super(D_GET_LOGITS, self).__init__()\n self.df_dim = ndf\n self.ef_dim = nef\n self.bcondition = bcondition\n if self.bcondition:\n self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8) # (32 * 8 + 256, 32 * 8)\n\n self.outlogits = nn.Sequential(\n nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),\n nn.Sigmoid())\n\n def forward(self, h_code, c_code=None):\n if self.bcondition and c_code is not None:\n # conditioning output\n c_code = c_code.view(-1, self.ef_dim, 1, 1) # (32?, 256, 1, 1)\n c_code = c_code.repeat(1, 1, 4, 4) # (32?, 256, 4, 4)\n # state size (ngf+egf) x 4 x 4\n h_c_code = torch.cat((h_code, c_code), 1) # (32 + 256, 4, 4)\n # state size ngf x in_size x in_size\n h_c_code = self.jointConv(h_c_code) # (32 * 8 + 256, 32 * 8)\n else:\n h_c_code = h_code\n output = self.outlogits(h_c_code) # (32 * 8) -> (1)\n return output.view(-1)\n\n\n# For 64 x 64 images\nclass D_NET64(nn.Module):\n def __init__(self, b_jcu=True):\n super(D_NET64, self).__init__()\n ndf = cfg.GAN.DF_DIM\n nef = cfg.TEXT.EMBEDDING_DIM\n self.img_code_s16 = encode_image_by_16times(ndf)\n if b_jcu:\n self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)\n else:\n self.UNCOND_DNET = None\n self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)\n\n def forward(self, x_var):\n x_code4 = self.img_code_s16(x_var) # 4 x 4 x 8df\n return x_code4\n\n\n# For 128 x 128 images\nclass D_NET128(nn.Module):\n def __init__(self, b_jcu=True):\n super(D_NET128, self).__init__()\n ndf = cfg.GAN.DF_DIM\n nef = cfg.TEXT.EMBEDDING_DIM\n self.img_code_s16 = encode_image_by_16times(ndf)\n self.img_code_s32 = downBlock(ndf * 8, ndf * 16)\n self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)\n #\n if b_jcu:\n self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)\n else:\n self.UNCOND_DNET = None\n self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)\n\n def forward(self, x_var):\n x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df\n x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df\n x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df\n return x_code4\n\n\n# For 256 x 256 images\nclass D_NET256(nn.Module):\n def __init__(self, b_jcu=True):\n super(D_NET256, self).__init__()\n ndf = cfg.GAN.DF_DIM\n nef = cfg.TEXT.EMBEDDING_DIM\n self.img_code_s16 = encode_image_by_16times(ndf)\n self.img_code_s32 = downBlock(ndf * 8, ndf * 16)\n self.img_code_s64 = downBlock(ndf * 16, ndf * 32)\n self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)\n self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)\n if b_jcu:\n self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)\n else:\n self.UNCOND_DNET = None\n self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)\n\n def forward(self, x_var):\n x_code16 = self.img_code_s16(x_var)\n x_code8 = self.img_code_s32(x_code16)\n x_code4 = self.img_code_s64(x_code8)\n x_code4 = self.img_code_s64_1(x_code4)\n x_code4 = self.img_code_s64_2(x_code4)\n return x_code4\n\n\nclass CAPTION_CNN(nn.Module):\n def __init__(self, embed_size):\n \"\"\"Load the pretrained ResNet-152 and replace top fc layer.\"\"\"\n super(CAPTION_CNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n for param in self.resnet.parameters():\n param.requires_grad = False\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n\n def forward(self, images):\n \"\"\"Extract feature vectors from input images.\"\"\"\n # print ('image feature size before unsample:', images.size())\n m = nn.Upsample(size=(224, 224), mode='bilinear')\n unsampled_images = m(images)\n # print ('image feature size after unsample:', unsampled_images.size())\n features = self.resnet(unsampled_images)\n features = features.view(features.size(0), -1)\n features = self.bn(self.linear(features))\n return features\n\n\nclass CAPTION_RNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers, max_seq_length=20):\n \"\"\"Set the hyper-parameters and build the layers.\"\"\"\n super(CAPTION_RNN, self).__init__()\n self.embed = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.max_seg_length = max_seq_length\n\n # def forward(self, features, captions, cap_lens):\n # \"\"\"Decode image feature vectors and generates captions.\"\"\"\n # # print ('feature.size():', features.size()) #(6L, 256L)\n # # print ('captions.size():', captions.size()) # (6L, 12L)\n # # print ('embeddings.size:',embeddings.size()) #(6L, 12L, 256L)\n # embeddings = self.embed(captions)\n # embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)\n # packed = pack_padded_sequence(embeddings, cap_lens.data.tolist(), batch_first=True)\n # outputs, hidden = self.lstm(packed)\n # output = self.linear(outputs[0]) # (batch size, vocab_size)\n # return output, hidden, outputs # words embedding, sentence embedding\n\n def forward(self, features, captions, cap_lens):\n \"\"\"Decode image feature vectors and generates captions.\"\"\"\n embeddings = self.embed(captions)\n embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)\n packed = pack_padded_sequence(embeddings, cap_lens, batch_first=True)\n hiddens, _ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n return outputs\n\n def sample(self, features, states=None):\n \"\"\"Generate captions for given image features using greedy search.\"\"\"\n sampled_ids = []\n inputs = features.unsqueeze(1)\n for i in range(self.max_seg_length):\n hiddens, states = self.lstm(inputs, states) # hiddens: (batch_size, 1, hidden_size)\n outputs = self.linear(hiddens.squeeze(1)) # outputs: (batch_size, vocab_size)\n _, predicted = outputs.max(1) # predicted: (batch_size)\n sampled_ids.append(predicted)\n inputs = self.embed(predicted) # inputs: (batch_size, embed_size)\n inputs = inputs.unsqueeze(1) # inputs: (batch_size, 1, embed_size)\n sampled_ids = torch.stack(sampled_ids, 1) # sampled_ids: (batch_size, max_seq_length)\n return sampled_ids\n","sub_path":"GLAM/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":29683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534769777","text":"import sys\nN = int(sys.stdin.readline())\nnums = []\nfor _ in range(N):\n n = int(sys.stdin.readline())\n nums.append(n)\n\n# 산술평균\nprint(round(sum(nums) / N))\n\n# 중앙값\nnums.sort()\nprint(nums[N // 2])\n\nfrom collections import Counter\ncountArray = Counter(nums).most_common()\nif len(countArray) > 1:\n if countArray[0][1] == countArray[1][1]:\n print(countArray[1][0])\n else:\n print(countArray[0][0])\nelse:\n print(countArray[0][0])\n# frequency = []\n# index = 0\n# for i in range(len(nums)):\n# if len(frequency) == 0:\n# frequency.append([nums[i], nums.count(nums[i])])\n# elif frequency[len(frequency) - 1][0] != nums[i]:\n# frequency.append([nums[i], nums.count(nums[i])])\n# frequency.sort(key=lambda x: (-x[1], x[0]))\n# if len(frequency) > 1 and frequency[0][1] == frequency[1][1]:\n# print(frequency[1][0])\n# else:\n# print(frequency[0][0])\n\n# 범위\nprint(nums[N - 1] - nums[0])\n","sub_path":"Python_workspace/2100/2108.py","file_name":"2108.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157085009","text":"#!/usr/bin/python3\n# -*-coding:UTF-8 -*-\n\nfrom bs4 import BeautifulSoup as bs\n\nhtml = '''\n\n \n \n \n\n'''\n\nsoup = bs(html,'html.parser')\na_tags = soup.findAll('a')\nb = soup.findAll('a',limit=3)\nfor a_tag in a_tags:\n print('a_tag[\"href\"] >>',a_tag['href'])\n print('a_tag.attrs >>',a_tag.attrs)\n print('a_tag.text >>',a_tag.text)\n \n# https://souljit2.tistory.com","sub_path":"Python/PythonAutomation/section1/beautifulsoup2.py","file_name":"beautifulsoup2.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415180173","text":"from django import forms\nfrom . import models\nfrom StandardInformation import models as SI_models\n\n\nclass UploadSingleOutForm(forms.ModelForm):\n class Meta:\n model = models.StockOfSingleProductOutRequest\n fields = (\n \"출하요청수량\",\n \"출하희망일\",\n \"수취인\",\n \"수취인주소\",\n \"연락처\",\n )\n help_texts = {\n \"출하희망일\": \"*형식 : (yyyy-mm-dd) (필수항목이 아닙니다.)\",\n }\n widgets = {}\n\n def save(self, *arg, **kwargs):\n partner = super().save(commit=False)\n return partner\n\n\nclass UploadSingleInForm(forms.ModelForm):\n class Meta:\n model = models.StockOfSingleProductInRequest\n fields = (\n \"입고요청수량\",\n \"입고요청일\",\n )\n help_texts = {\n \"입고요청일\": \"*형식 : (yyyy-mm-dd) (필수항목이 아닙니다.)\",\n }\n widgets = {}\n\n def save(self, *arg, **kwargs):\n partner = super().save(commit=False)\n return partner\n","sub_path":"stocksingle/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537094995","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport logging\nimport signal\nimport sys\n\nimport mamonsu.lib.platform as platform\n\nfrom mamonsu.lib.plugin import Plugin\nfrom mamonsu.lib.config import Config\nfrom mamonsu.lib.zbx import *\nfrom mamonsu.plugins import Loader as PluginLoader\n\n\nclass Supervisor(object):\n\n Running = True\n\n def __init__(self, config):\n self.Plugins = []\n self.config = config\n self.sender = None\n\n def start(self):\n self._load_plugins()\n self._find_sender()\n self._update_plugins()\n self._loop()\n\n def _load_plugins(self):\n PluginLoader.load()\n for klass in Plugin.__subclasses__():\n plugin = klass(self.config)\n self.Plugins.append(plugin)\n\n def _find_sender(self):\n for plugin in self.Plugins:\n if plugin.is_sender():\n if self.sender is not None:\n raise RuntimeError(\"Sender already setted\")\n self.sender = plugin\n if self.sender is None:\n raise RuntimeError(\"Can't find sender\")\n\n def _update_plugins(self):\n for plugin in self.Plugins:\n plugin.update_sender(self.sender)\n\n def _loop(self):\n while self.Running:\n for plugin in self.Plugins:\n if not plugin.is_alive():\n plugin.start()\n time.sleep(1)\n\n\ndef start():\n\n def quit_handler(_signo=None, _stack_frame=None):\n logging.info(\"Bye bye!\")\n sys.exit(0)\n\n signal.signal(signal.SIGTERM, quit_handler)\n if platform.LINUX:\n signal.signal(signal.SIGQUIT, quit_handler)\n\n config = Config()\n supervisor = Supervisor(config)\n\n try:\n logging.info(\"Start agent\")\n supervisor.start()\n except KeyboardInterrupt:\n quit_handler()\n","sub_path":"mamonsu/lib/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"60679697","text":"\n# scrap main content of product\n\n# browser is the selenium handle of a window\n# path is the path to store scraped html\n\ndef scrap_maincontent(browser, path):\n\n while True:\n try:\n # wait while page's loading\n wait = WebDriverWait(browser, 10)\n wait.until(EC.presence_of_element_located((By.CLASS_NAME, \"maincontent\")))\n # get the div that contians the info\n maincontent = browser.find_element_by_class_name(\"maincontent\")\n htmlstr = maincontent.get_attribute(\"innerHTML\")\n # write it into a file \n f = open(path + \".html\",\"w\")\n f.write(htmlstr.encode('utf8'))\n f.close\n return True\n except:\n # if failed, refresh and try again\n # sometimes chrome just plays dumb\n browser.refresh()\n time.sleep(1)\n continue\n\n","sub_path":"data_preparing/scraping_maincontent.py","file_name":"scraping_maincontent.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251378174","text":"from django.db import models\n\n# Create your models here.\n\n#定义salt表\nclass SaltMinion(models.Model):\n name = models.CharField('Minion名称',max_length=124)\n create_date = models.DateField('创建时间',auto_now_add=True)\n status_choices = (\n (1, u'已经认证'),\n (2, u'未认证'),\n )\n status = models.SmallIntegerField('认证状态',choices=status_choices)\n Autherized_date = models.DateField('认证时间',auto_now=True)\n memo = models.TextField(u'备注', null=True,blank=True)\n def ___str__(self):\n return self.name\n class Meta:\n verbose_name = 'Salt认证'\n verbose_name_plural = 'Salt认证'\n def colored_status(self):\n if self.status == 1:\n cell_html = '%s'\n elif self.status == 2 :\n cell_html = '%s'\n else:\n cell_html = '%s'\n return cell_html % self.get_status_display()\n colored_status.allow_tags = True\n colored_status.short_description = u'认证状态'","sub_path":"Mebius/deploy/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247674311","text":"# Function to find permutations of a given string\nfrom itertools import permutations, groupby, combinations\n\n\ndef proper_str(data):\n if data.startswith('-'):\n data1 = data.replace('-', \"\")\n return data1\n elif '-' in data:\n return 0\n else:\n return data\n\n\nsw = ''\n# string_given = 'co?3d5er45,3' # here can replace if you want any string\n# string_given = 'g4e3e5k3s4'\n# string_given = 'nu23m-2'\nstring_given = '-nknk4n4u23m-2'\nreq_data = proper_str(string_given)\nif req_data:\n for num in list(req_data):\n w = ''\n if num.isdigit():\n w += num\n sw += w\n # print(type(sw))\n permList = permutations(sw)\n all_list = [''.join(perm) for perm in list(permList)]\n list_num = [int(x) for x in all_list if x == x[::-1]]\n print(max(list_num))\nelse:\n print(-1)\n\n","sub_path":"Interview_tests/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605249413","text":"import time\nimport sys\n\ndef retry(n, message, f):\n while True:\n try:\n return f()\n except:\n if n == 0:\n raise sys.exc_info()[0]\n print(f\"Retrying {message}\")\n n = n - 1\n time.sleep(1)\n","sub_path":"slack/retry.py","file_name":"retry.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516517810","text":"from django.db import models\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.files import File\n\nimport random\nimport string\n\nfrom face_train_and_recognition.train_and_recognition_controller import TrainRecognitionController\nfrom face_train_and_recognition.train_recognition_scybiometry_client import TrainRecognitionScyBiometryClient\nfrom face_train_and_recognition.train_recognition_scybiometry_client import NamespaceOverlimitException\n\nclass Consumer(models.Model):\n uid = models.CharField(max_length=100)\n\n description = models.TextField()\n\n is_employer = models.BooleanField(default=False)\n\n @staticmethod\n def create_consumer(namespace, photos=None, description=\"\", number_train_photos=4):\n def id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n uid = id_generator() + \"@\" + namespace\n\n tr_client = TrainRecognitionScyBiometryClient(namespace)\n tr_controller = TrainRecognitionController(tr_client)\n\n try:\n tr_controller.train(uid, photos[:number_train_photos])\n except NamespaceOverlimitException:\n raise\n except:\n raise\n #return None\n\n consumer = Consumer(uid=uid, description=description)\n consumer.save()\n\n if photos is not None:\n for photo in photos:\n if not isinstance(photo, File):\n # TODO: Hide side effect!!!\n\n photo = File(photo)\n\n consumer_face = ConsumerFace(consumer=consumer, photo=photo)\n consumer_face.save()\n\n return consumer\n\n @staticmethod\n def delete_consumer(uid):\n try:\n Consumer.objects.get(uid=uid).delete()\n except ObjectDoesNotExist:\n return\n\n namespace = uid.split('@')[1]\n\n tr_client = TrainRecognitionScyBiometryClient(namespace)\n tr_controller = TrainRecognitionController(tr_client)\n\n tr_controller.remove(uid)\n\n @staticmethod\n def recognize_consumer(namespace, photos):\n tr_client = TrainRecognitionScyBiometryClient(namespace)\n tr_controller = TrainRecognitionController(tr_client)\n\n try:\n uid = tr_controller.recognize_one(photos=photos, threshold=40)\n return Consumer.get_consumer(uid)\n except:\n return None\n\n @staticmethod\n def get_known_consumers():\n return Consumer.objects.all().exclude(uid=\"unknown\")\n\n @staticmethod\n def get_consumer(uid):\n try:\n consumer = Consumer.objects.get(uid=uid)\n except ObjectDoesNotExist:\n try:\n consumer = Consumer.objects.get(uid=\"unknown\")\n except ObjectDoesNotExist:\n consumer = Consumer(uid=\"unknown\")\n consumer.save()\n\n return consumer\n\n def get_photos(self):\n return self.consumerface_set.all()\n\n def get_first_photo(self):\n return self.consumerface_set.first()\n\n\nclass ConsumerFace(models.Model):\n photo = models.ImageField(\"Profile Pic\", upload_to=settings.MEDIA_ROOT + \"images/\")\n\n consumer = models.ForeignKey(Consumer)\n\n\n","sub_path":"face_consumers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91997490","text":"from webapi import *\n\n\n# 先登录\nloginRet,cookies = login('auto','sdfsdfsdf')\nif loginRet[\"retcode\"] != 0:\n raise Exception('认证失败')\n\n\n# 记录下sessionid\nsessionid =cookies['sessionid']\n\n# 先列出课程\ncoureListBefore = list_course(sessionid)['retlist']\n\n# 再添加一门课程\nfrom datetime import datetime\ncourseName = f'python_{datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")}'\nretDict = add_course(courseName,'python语言','2',sessionid)\n\nassert retDict['retcode'] == 0\n\n# 再列出课程\ncoureListAfter = list_course(sessionid)['retlist']\n\ncreateCount = len(coureListAfter) - len(coureListBefore)\n\nassert createCount == 1\n\n\n# 取出,多出来的一门课程对象\nnewcourse = None\nfor one in coureListAfter:\n if one not in coureListBefore:\n newcourse = one\n break\n\n\n\n# 检查是否是刚刚添加的课程\nassert newcourse!=None\nassert newcourse['name']== courseName\nassert newcourse['desc']=='python语言'\nassert newcourse['display_idx']==2\n\n\n# 清除环境操作\n\ndelete_course(newcourse['id'],sessionid)\n\n\n\nprint('\\n========= test case pass =============')\n\n","sub_path":"课程代码/webapi/lesson07/before/case001.py","file_name":"case001.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539407642","text":"import requests\nimport json\nfrom .settings import PLANURL, PAYSTACK_AUTH\n\nclass Base:\n\n\tCONTENT_TYPE = \"application/json\"\n\tbaseurl = ''\n\n\t@classmethod\n\tdef __init__(self, auth=None):\n\n\t\tself.requests = requests\n\t\tself.json = json\n\t\tself.data = {}\n\t\tif auth:\n\t\t\tself.header = {'Authorization': auth,'content-type': 'application/json'}\n\t\telif PAYSTACK_AUTH:\n\t\t\tself.header = {'Authorization': PAYSTACK_AUTH,'content-type': 'application/json'}\n\t\telse:\n\t\t\traise NotImplementedError('Missing authorization Key')\n\n\n\tdef load(self):\n\t\treturn self.json.loads(self.data)\n\n\tdef dump(self):\n\t\treturn self.json.dumps(self.data)\n\n\tdef execute(self, method=0, endpoint=''):\n\t\tif method == 0:\n\t\t\tr = self.requests.get(self.baseurl + endpoint, params=self.data, headers=self.header)\n\t\telif method == 1:\n\t\t\tr = self.requests.post(self.baseurl + endpoint, data=self.dump(), headers=self.header)\n\t\telif method == 2:\n\t\t\tr = self.requests.put(self.baseurl + endpoint, data=self.dump(), headers=self.header)\n\t\telse: raise ValueError('Please specify a valid method')\n\n\t\tself.payload = r.json()\n\t\tif self.payload['status'] == True:\n\t\t\ttry:\n\t\t\t\treturn self.payload['data']\n\t\t\texcept KeyError:\n\t\t\t\treturn self.payload['status']\n\t\telse:\n\t\t\tprint(self.payload)\n\t\t\tprint(self.payload['message'])\n\n\n\tdef list_all(self):\n\t\treturn self.execute()\n\n\tdef get_one(self, id):\n\t\tif type(id) is not str: id=str(id)\n\t\tendpoint = '/' + id\n\t\treturn self.execute(endpoint=endpoint)\n\n\tdef get_data(self):\n\t\treturn self.payload","sub_path":"djpaystack/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"327803124","text":"# Example by Eugenius Klimentovsky\n\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets, QtQuick, QtWebEngine, QtWebEngineWidgets, uic\nfrom PyQt5.QtWidgets import QWidget, QApplication\n\napp = QApplication(sys.argv)\nw = QWidget()\nuic.loadUi('example.ui', w)\nw.setLayout(w.grid)\nw.tab1.setLayout(w.tab1grid)\nw.tab2.setLayout(w.tab2grid)\nw.tab3.setLayout(w.tab3grid)\nw.address.setText(str(w.webEngineView.url())[19:-2])\nw.pushButton_OK.clicked.connect(lambda: print(\"hello :)\"))\nw.show()\nsys.exit(app.exec_())\n","sub_path":"School/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192541074","text":"from simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\n\nimport numpy as np\n\nfrom sys import stdout\nimport sys\nfrom tqdm import tqdm\n\n\n\n\"\"\"\nEquilibrates the drug-protein complex. Starts with strong restraints on both protein \nand drug and gradually releases them. The drug restraint is released\na little bit faster so it can find it's niche before the protein moves to \naccommodate it's starting structure. \n\"\"\"\n\nID = sys.argv[1]\n\n#load\ncomplex_pdb = PDBFile('./processed_data/complex_systems/complex_coords_'+ID+'.pdb')\ncomplex_system = XmlSerializer.deserialize(open('./processed_data/complex_systems/complex_system_'+ID+'.xml').read())\n\n#get prot and drug indices for doing restraints later. \nprotein_indices = np.array([count for count, i in enumerate(complex_pdb.topology.atoms()) if i.residue.name not in [\"DPPC\", \"DPP\", \"CLA\", \"NA\", \"HOH\"]])\ndrug_indices = np.array([count for count, i in enumerate(complex_pdb.topology.atoms()) if i.residue.name=='UNL'])\n\n##add a barostat. Not only will this initially fix any vacuum between periodic cells,\n##it will be maintained throughout to keep pressure reasonable. \nbarostat = MonteCarloMembraneBarostat(1*bar, 200*bar*nanometer, 310*kelvin, MonteCarloMembraneBarostat.XYIsotropic, MonteCarloMembraneBarostat.ZFree)\ncomplex_system.addForce(barostat)\n\n\n#A restraining force on the drug atoms for equilibration:\nproteinForce = CustomExternalForce(\"proteinF*periodicdistance(x, y, z, x0, y0, z0)^2\")\nproteinForce.addGlobalParameter(\"proteinF\", 5*kilocalories_per_mole/angstroms**2)\nproteinForce.addPerParticleParameter(\"x0\")\nproteinForce.addPerParticleParameter(\"y0\")\nproteinForce.addPerParticleParameter(\"z0\")\nfor count, coord in enumerate(complex_pdb.positions):\n if count in protein_indices:\n proteinForce.addParticle(int(count), [coord[0], coord[1], coord[2]])\n\nprint(\"Does it use periodic conditions?\", proteinForce.usesPeriodicBoundaryConditions())\ncomplex_system.addForce(proteinForce)\n\n\n##A restraining force on the drug atoms for equilibration:\n#drugForce = CustomExternalForce(\"drugF*periodicdistance(x, y, z, x0, y0, z0)^2\")\n#drugForce.addGlobalParameter(\"drugF\", 5*kilocalories_per_mole/angstrom**2)\n#drugForce.addPerParticleParameter(\"x0\")\n#drugForce.addPerParticleParameter(\"y0\")\n#drugForce.addPerParticleParameter(\"z0\")\n#for count, coord in enumerate(complex_pdb.positions):\n# if count in drug_indices:\n# drugForce.addParticle(int(count), [coord[0], coord[1], coord[2]])\n#\n#print(\"Does it use periodic conditions?\", drugForce.usesPeriodicBoundaryConditions())\n#complex_system.addForce(drugForce)\n\n\n#OpenMM simulation machinery:\nintegrator = LangevinMiddleIntegrator(310*kelvin, 10/picosecond, 0.003*picoseconds)\nplatform = Platform.getPlatformByName('OpenCL')\nprop = {'OpenCLPrecision':'single', 'OpenCLDeviceIndex':'0'}\nsimulation = Simulation(complex_pdb.topology, complex_system, integrator, platform, prop)\n\n#minimize starting coords:\nsimulation.context.setPositions(complex_pdb.positions)\nsimulation.minimizeEnergy(maxIterations=150)\n#for temp in [150, 175, 200, 225, 250, 275, 300, 310]:\n# integrator.setTemperature(150*kelvin)\n# simulation.step(1000)\n \n#add a DCD writer:\nsimulation.reporters.append(DCDReporter('./processed_data/equilibration/traj_equilibration_'+ID+'.dcd', 10000))\nsimulation.reporters.append(StateDataReporter('./processed_data/equilibration/log_equilibration_'+ID+'.log', 1000, \n step=True, \n potentialEnergy=True, \n temperature=True, speed=True))\n\n\n#reduce restraints:\n#we are looking to avoid clashes more than remove any structure errors\n#from crystallization, since receptor activity changes take order 10^3-4 nanoseconds\nfor i in tqdm(range(300)):\n simulation.step(4000) #12ps per it, = 12*300, 3ns.\n #simulation.context.setParameter('drugF', simulation.context.getParameter('drugF')*0.9)\n simulation.context.setParameter('proteinF', simulation.context.getParameter('proteinF')*0.9)\n\n##The restraint forces are basically zero now, so remove both:\nforces = np.array([isinstance(i, CustomExternalForce) for i in complex_system.getForces()])\nwhile np.any(forces):\n complex_system.removeForce(int(forces.nonzero()[0][0]))\n forces = np.array([isinstance(i, CustomExternalForce) for i in complex_system.getForces()])\n\n##Extra 1 ns for good measure.\nsimulation.step(400000)\n\n\n\n##Save it all.\n#System:\nwith open('./processed_data/equilibration/equilibration_system_'+ID+'.xml', 'w') as f:\n f.write(\n XmlSerializer.serialize(\n complex_system\n )\n )\n\n#Current state:\nwith open('./processed_data/equilibration/equilibration_state_'+ID+'.xml', 'w') as f:\n f.write(\n XmlSerializer.serialize(\n simulation.context.getState(getPositions=True,\n getForces=True, getEnergy=True,\n enforcePeriodicBox=True)\n )\n )\n\n#PDB file (we already have one but this just keeps the three in the equilibration dir for neatness):\npositions = simulation.context.getState(getPositions=True).getPositions()\nPDBFile.writeFile(simulation.topology, positions, open('./processed_data/equilibration/equilibration_coords_'+ID+'.pdb', 'w'))\n","sub_path":"code/equilibrate.py","file_name":"equilibrate.py","file_ext":"py","file_size_in_byte":5352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412644673","text":"from __future__ import division\nfrom ExamPapers.DBManagement.models import *\nfrom ExamPapers.DBManagement.models import Image\nfrom ExamPapers.settings import ROOT_PATH\nimport re\nimport math\n\n\ndef getViewQuestion(qid):\n\t\"\"\"\n\tGet question for view page\n\tInput: question id\n\tOutput: Question object including all attributes and formated content and solution\n\t\"\"\"\n\tquestion = Question.objects.get(id=qid)\n\tanswer = list(Answer.objects.filter(question_id=question.id).values())\n\tquestion.content = formatContent(question, \"Question\")\n\tquestion.topic = Topic.objects.get(id=question.topic_id)\n\tquestion.subtopic = Subtopic.objects.get(id=question.subtopic_id)\n\tquestion.distinct_anstype = getAllDistinctAnsType(answer)\n\tquestion.stars = star((question.marks+1)/2)\n\tquestion.solution = formatContent(question,\"Solution\")\n\tquestion.answer = \"Not available\"\n\tquestion.images = Image.objects.filter(qa_id = question)\n\tif Answer.objects.filter(question = question.id).count != 0:\n\t\tquestion.answer = Answer.objects.filter(question = question.id)\n\tquestion.tag = Tag.objects.filter(question_id = question.id)\n\tquestion.tagdef = []\n\tfor ta in question.tag:\n\t\ttdeg = TagDefinition.objects.get(id = ta.tagdefinition.id)\n\t\tif tdeg not in question.tagdef:\n\t\t\ttdeg.title = tdeg.title.replace(\"_\",\" \")\n\t\t\tquestion.tagdef.append(tdeg)\n\treturn question\n\ndef formatContent(question,type):\n\t\"\"\"\n\tFormat question content and solution, convert image and align content\n\tInput: question object, content type: Question or Solution\n\tOutput: formated content\n\t\"\"\"\n\timages = list(Image.objects.filter(qa_id=question.id, qa=type).only('id','imagepath').order_by('id').values())\n\t\n\tif type == \"Question\":\n\t\t'''\n\t\tfor i in images:\n\t\t\thtml_img = \"\"\n\t\t\tquestion.content = question.content.replace('img', html_img, 1)\n\t\tquestion.content = question.content.replace('').replace('img','')\n\t\t#print question.content\n\t\treturn question.content\n\tif type ==\"Solution\":\n\t\t\n\t\tsolutionContent = \"Not available\"\n\t\tif Solution.objects.filter(question_id = question.id).count() != 0:\n\t\t\tsolutionContent = Solution.objects.get(question_id = question.id).content\n\t\t'''\n\t\tfor i in images:\n\t\t\thtml_img = \"\"\n\t\t\tsolutionContent = solutionContent.replace('img', html_img, 1)\n\t\tsolutionContent = solutionContent.replace('')\n\t\treturn solutionContent\n\t\t\ndef getAllDistinctAnsType(answer):\n\tdistinct_anstype = list()\n\tfor a in answer:\n\t\tdesc = AnswerType.objects.get(id=a['answertype_id']).description\n\t\tif not desc in distinct_anstype:\n\t\t\tdistinct_anstype.append(desc)\n\treturn distinct_anstype\n\ndef star(rate):\n\t\"Convert difficulty int value to star icon using bootstrap star\"\n\tstars = []\n\tfor s in range(int(rate)):\n\t\tstars.append(\"\")\n\t#for s in range(5-rate):\n\t#\tstars.append(\"\")\n\treturn stars\n\t\ndef formatIntoLabelDictList(labellist, anslist):\n\tdictList = list()\n\tcounter = 1\n\n\tfor ll in labellist:\n\t\ttry:\n\t\t\tdictList.append({'sub': counter, 'label': ll, 'ans':anslist[counter-1]})\n\t\texcept IndexError:\n\t\t\tdictList.append({'sub': counter, 'label': ll})\n\t\tcounter += 1\n\n\treturn dictList\n\t\ndef display_finalanswer(finalanswer):\n\tcurr_qns_id = ''\n\tcurrqnscount = 1\n\tnoanscount = 0\n\n\tfor fa in finalanswer:\n\t\tif fa.content is not None and fa.content!='':\n\t\t\ttempDict = extractLabelandAns(fa.content)\n\t\t\tfa.labellist = formatIntoLabelDictList(tempDict['labellist'], tempDict['anslist'])\n\t\t\t\n\t\tif isSketch(fa.answertype_id) or fa.content == '':\n\t\t\tanstype = AnswerType.objects.get(id=fa.answertype_id).description\n\t\t\tfa.label = \" [\" + anstype + \" Question]. View Papers for detailed solution.\"\n\t\t\tnoanscount += 1\n\n\t\tif curr_qns_id == fa.question_id:\n\t\t\tcurrqnscount += 1\n\t\telse:\n\t\t\tcurr_qns_id = fa.question_id\n\n\tif currqnscount == noanscount:\n\t\treturn None\n\telse:\n\t\treturn finalanswer\n\n\ndef formatIntoLabelDictList(labellist, anslist):\n\tdictList = list()\n\tcounter = 0\n\n\tfor ll in labellist:\n\t\ttry:\n\t\t\tdictList.append({'sub': counter, 'label': ll, 'ans':anslist[counter] , 'counter':counter})\n\t\texcept IndexError:\n\t\t\tdictList.append({'sub': counter, 'label': ll})\n\t\tcounter += 1\n\n\treturn dictList\n\ndef getAllDistinctAnsType(finalanswer):\n\tdistinct_anstype = list()\n\tfor fa in finalanswer:\n\t\tdesc = AnswerType.objects.get(id=fa['answertype_id']).description\n\t\tif not desc in distinct_anstype:\n\t\t\tdistinct_anstype.append(desc)\n\treturn distinct_anstype\n\ndef extractLabelandAns(content):\n\tlabellist = list()\n\tanslist = list()\n\n\tif content != '':\n\t\tlabelanslist = content.split('\"')\n\t\tcounter = 0\n\t\twhile counter < len(labelanslist):\n\t\t\ttemp = (\"$$\"+labelanslist[counter]+\"$$\").replace(\";\", \"$$
$$\").replace(\" \", \" \\space \")\n\t\t\tlabellist.append(temp)\n\t\t\n\t\t\tif (counter+1) < len(labelanslist):\n\t\t\t\tanslist.append(labelanslist[counter+1])\n\t\t\tcounter+=2\n\n\treturn {'labellist': labellist, 'anslist': anslist}\n\t\ndef getAnswer(qid):\n\tanswers = Answer.objects.filter(question_id = qid)\n\treturn display_finalanswer(answers)\n","sub_path":"logic/question_processing.py","file_name":"question_processing.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435652501","text":"import numpy as np\n\nimport os\n\nfrom audio import batch_scale_specs\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom hparams import hparams as hp\nfrom tqdm import tqdm\n\n#offset = hp.stft_frames//2\n\nclass SpectrogramDataset(Dataset):\n def __init__(self, data_path, spec_info):\n self.data_path = data_path\n self.mix_path = os.path.join(data_path, \"mix\")\n self.vox_path = os.path.join(data_path, \"vox\")\n self.offset = hp.stft_frames//2\n self.metadata = self.get_slices(spec_info)\n\n def get_slices(self, spec_info):\n metadata = []\n print(\"Preparing dataset\")\n for spec in tqdm(spec_info):\n size = spec[1] - hp.stft_frames\n for i in range(0, size, hp.stft_stride):\n j = i + hp.stft_frames\n slice_info = (spec[0], i, j)\n metadata.append(slice_info)\n \n return metadata\n\n def __getitem__(self, index):\n slice_info = self.metadata[index]\n fname = slice_info[0]\n i = slice_info[1]\n j = slice_info[2]\n XH = np.load(os.path.join(self.mix_path, fname+\"H.npy\"), mmap_mode='r')\n XP = np.load(os.path.join(self.mix_path, fname+\"P.npy\"), mmap_mode='r')\n XR = np.load(os.path.join(self.mix_path, fname+\"R.npy\"), mmap_mode='r')\n x = np.stack([XH[:,i:j], XP[:,i:j], XR[:,i:j]])\n Y = np.load(os.path.join(self.vox_path, fname+\".npy\"), mmap_mode='r')\n y = Y[:,i+self.offset]\n return x, y\n\n def __len__(self):\n return len(self.metadata)\n\n\ndef basic_collate(batch):\n x = [it[0] for it in batch]\n x = np.stack(x).astype(np.float32)\n x = torch.FloatTensor(x)\n y = [it[1] for it in batch]\n y = np.stack(y).astype(np.float32)\n y = torch.FloatTensor(y)\n return x, y\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124224974","text":"# -*- coding: utf-8 -*-\n# @Author : Jing\n# @FileName: 矩阵中的路径.py\n# @IDE: PyCharm\n# Solution 1: DFS(回溯法)\n\n\nclass Solution:\n def hasPath(self, matrix, rows, cols, path):\n if len(matrix) == 0:\n return False\n if len(path) == 0:\n return True\n # parse matrix\n m = []\n for i in range(rows):\n offset = i * cols\n row = []\n for j in range(cols):\n row.append(matrix[offset + j])\n m.append(row)\n\n # build visited\n visited = [[False for j in range(cols)] for i in range(rows)]\n\n # scan all cells\n for i in range(rows):\n for j in range(cols):\n if self.dfs(m, visited, i, j, rows, cols, path):\n return True\n return False\n\n def dfs(self, m, visited, i, j, rows, cols, path):\n # terminal conditions\n if i < 0 or i == rows:\n return False\n if j < 0 or j == cols:\n return False\n if visited[i][j]:\n return False\n\n # visit self\n if path[0] != m[i][j]:\n return False\n visited[i][j] = True\n nextPath = path[1:]\n if len(nextPath) == 0:\n return True\n\n # visit neighbours\n if self.dfs(m, visited, i + 1, j, rows, cols, nextPath):\n return True\n if self.dfs(m, visited, i - 1, j, rows, cols, nextPath):\n return True\n if self.dfs(m, visited, i, j + 1, rows, cols, nextPath):\n return True\n if self.dfs(m, visited, i, j - 1, rows, cols, nextPath):\n return True\n\n # unvisit self\n visited[i][j] = False\n return False\n\n\nif __name__ == '__main__':\n nums = ['A', 'B', 'C', 'E', 'S', 'F', 'C', 'S', 'A', 'D', 'E', 'E']\n rows, cols = 3, 4\n path = ['A', 'B', 'C', 'C', 'E', 'D']\n s = Solution()\n print(s.hasPath(nums, rows, cols, path))\n\n","sub_path":"剑指offer/12.矩阵中的路径.py","file_name":"12.矩阵中的路径.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156450939","text":"import time\ndef insertData(P_ID, data):\n f = open(\"database.txt\", \"a\")\n ts = time.time()\n f.write(str(P_ID) + \"\\tTIME: \" + str(ts) + \"\\t\" +str(data) + \"\\n\")\n\ndef getData(P_ID):\n with open('database.txt','r') as f:\n line = f.readline()\n while line:\n line = f.readline()\n if(line[:8]==str(P_ID)):\n print(line)\n","sub_path":"datastore.py","file_name":"datastore.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358354498","text":"'''personal_thresholds.py - Joshua Wallace - Apr 2019\n\nThis stores the SNR threshold functions determined to work for my\nM4 work.\n\n'''\n\nimport numpy as np\n\n\ndef pdm_cutoff(p):\n\n p1_y = 7.5\n p1_x = np.log10(.04)\n p2_y = 5.8\n p2_x = np.log10(.3)\n p3_y = 5.8\n p3_x = np.log10(1.5)\n p4_y = 5.1\n p4_x = np.log10(4.)\n p5_y = 4.5\n p5_x = np.log10(100.)\n\n m1 = (p2_y-p1_y)/(p2_x-p1_x)\n b1 = p1_y - m1*p1_x\n m2 = (p3_y-p2_y)/(p3_x-p2_x)\n b2 = p2_y - m2*p2_x\n m3 = (p4_y-p3_y)/(p4_x-p3_x)\n b3 = p3_y - m3*p3_x\n m4 = (p5_y-p4_y)/(p5_x-p4_x)\n b4 = p4_y - m4*p4_x\n \n logp = np.log10(p)\n if logp < p1_x:\n raise RuntimeError(\"Period too short\")\n elif logp < p2_x:\n return m1*logp + b1\n elif logp < p3_x:\n return m2*logp + b2\n elif logp < p4_x:\n return m3*logp + b3\n elif logp < p5_x:\n return m4*logp + b4\n else:\n raise RuntimeError(\"Period too long\")\n\n\ndef ls_cutoff(p):\n\n p1_y = 13.3\n p1_x = np.log10(.04)\n p2_y = 13.3\n p2_x = np.log10(.1)\n p3_y = 6.5\n p3_x = np.log10(3.5)\n p4_y = 4.5\n p4_x = np.log10(4)\n p5_y = 4.0\n p5_x = np.log10(100.)\n\n m1 = (p2_y-p1_y)/(p2_x-p1_x)\n b1 = p1_y - m1*p1_x\n m2 = (p3_y-p2_y)/(p3_x-p2_x)\n b2 = p2_y - m2*p2_x\n m3 = (p4_y-p3_y)/(p4_x-p3_x)\n b3 = p3_y - m3*p3_x\n m4 = (p5_y-p4_y)/(p5_x-p4_x)\n b4 = p4_y - m4*p4_x\n\n\n logp = np.log10(p)\n if logp < p1_x:\n raise RuntimeError(\"Period too short\")\n elif logp < p2_x:\n return m1*logp + b1\n elif logp < p3_x:\n return m2*logp + b2\n elif logp < p4_x:\n return m3*logp + b3\n elif logp < p5_x:\n return m4*logp + b4\n else:\n raise RuntimeError(\"Period too long\")\n\n\ndef bls_cutoff(p):\n\n p1_y = 19.2\n p1_x = np.log10(.04)\n p2_y = p1_y\n p2_x = np.log10(.09)\n p3_y = 12.5\n p3_x = np.log10(14.)\n p4_y = 10.\n p4_x = np.log10(15.)\n p5_y = 10.\n p5_x = np.log10(18.0)\n p6_y = 0.\n p6_x = np.log10(19.5)\n p7_y = 0.\n p7_x = np.log10(100.)\n\n m1 = (p2_y-p1_y)/(p2_x-p1_x)\n b1 = p1_y - m1*p1_x\n m2 = (p3_y-p2_y)/(p3_x-p2_x)\n b2 = p2_y - m2*p2_x\n m3 = (p4_y-p3_y)/(p4_x-p3_x)\n b3 = p3_y - m3*p3_x\n m4 = (p5_y-p4_y)/(p5_x-p4_x)\n b4 = p4_y - m4*p4_x\n m5 = (p6_y-p5_y)/(p6_x-p5_x)\n b5 = p5_y - m5*p5_x\n m6 = (p7_y-p6_y)/(p7_x-p6_x)\n b6 = p6_y - m6*p6_x\n\n logp = np.log10(p)\n if logp < p1_x:\n raise RuntimeError(\"Period too short\")\n elif logp < p2_x:\n return m1*logp + b1\n elif logp < p3_x:\n return m2*logp + b2\n elif logp < p4_x:\n return m3*logp + b3\n elif logp < p5_x:\n return m4*logp + b4\n elif logp < p6_x:\n return m5*logp + b5\n elif logp < p7_x:\n return m6*logp + b6\n else:\n raise RuntimeError(\"Period too long\")\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n xes = np.linspace(0.05,90,10000)\n\n ls = [ls_cutoff(val) for val in xes]\n bls = [bls_cutoff(val) for val in xes]\n pdm = [pdm_cutoff(val) for val in xes]\n\n plt.plot(xes,ls,label='LS')\n plt.plot(xes,pdm,label='PDM')\n plt.plot(xes,bls,label='BLS')\n plt.xscale('log')\n\n plt.legend(loc='best')\n\n plt.savefig(\"temp.pdf\")\n","sub_path":"example/personal_thresholds.py","file_name":"personal_thresholds.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566532941","text":"import numpy as np\nimport scipy.optimize\nimport scipy.stats\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\nimport datetime\nimport pandas as pd\nfrom HelperFunc import initialisation,getCleanSignal\nimport os\nfrom multiprocessing import Pool\n\ndef getV(filepath, Nacp_limit = np.inf):\n #initialisation\n a_opt_avg = {1:[],2:[]}\n a_std_avg = {1:[],2:[]}\n N_acp = {1:[],2:[]}\n SNRmean = {1:[],2:[]}\n hdulist = fits.open(filepath)\n def fringeShape(t,A,DT,t_0,phi):\n return A*np.sinc(np.pi*(t-t_0)/DT)*np.cos(2*np.pi*(t-t_0)*f_max+phi)\n\n for x in [1,2]:\n print(filepath, 'beam', x)\n C = getCleanSignal(hdulist, x)\n d = initialisation(hdulist,x)\n t_array,dt,step,start = d['t_array'],d['dt'],d['step'],d['start'] \n f_max = 1/4/dt\n \n \n a_opt_list = []\n a_std_list = []\n reject_list = []\n fail_list = []\n weight_list = []\n while start+step<= len(t_array) and len(a_opt_list)=b_end:\n Fsq_final[i] = 0\n else:\n Delta = Fsq_avg[b_start]+(Fsq_avg[b_end]-Fsq_avg[b_start])/(b_end-b_start)*(i-b_start)\n Fsq_final[i] = Fsq_avg[i] - Delta\n \n df = (f[-1]-f[0])/len(f)\n Vsq_raw = sum(Fsq_final)*df\n\n #normalisation\n V_raw[x] = np.sqrt(Vsq_raw/(dt/1e6*step))\n \n #plot\n # plt.plot(f, Fsq_avg)\n # plt.plot([f[b_start], f[b_end]], [Fsq_avg[b_start],Fsq_avg[b_end]],ls='--')\n # plt.figure()\n # plt.plot(f, Fsq_final)\n else:\n filename = hdulist[0].header['ARCFILE']\n print(filename, x, 'Does not have any valid data')\n V_raw[x] = None\n\n B = {1:None,2:None}\n B[1] = (hdulist[0].header['HIERARCH ESO ISS PBL23 START']+ hdulist[0].header['HIERARCH ESO ISS PBL23 END'])/2\n B[2] = (hdulist[0].header['HIERARCH ESO ISS PBL12 START']+ hdulist[0].header['HIERARCH ESO ISS PBL12 END'])/2\n date_time =datetime.datetime.strptime(hdulist[0].header['HIERARCH ESO PCR ACQ START'], '%Y-%m-%dT%H:%M:%S.%f')\n output = {'time':date_time,'N_acp':N_acp,'SNR': SNRmean,'B':B,'V':V_raw}\n return pd.Series(output, index = ['time','N_acp','SNR','B','V'])\n \nif __name__ == \"__main__\":\n d = getVPaper('C:\\\\Users\\\\fish\\\\Desktop\\\\2012-08-26\\\\AMBER.2012-08-29T08%3A43%3A57.311.fits')\n\n \n \n","sub_path":"Old Method/FileOp.py","file_name":"FileOp.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426965339","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\n# 구구단의 단수 및 곱해지는 수를 증가 시키기 위한 상수\nnStep = tf.constant(1)\n\n# 구구단의 시작 단수\ndan = tf.Variable(2)\n\n# 구구단의 곱해지는 수\nmul = tf.Variable(1)\n\n# 현재 단수에서 1증가된 값을 반환하는 텐서\naddDan = tf.add(dan, nStep)\n# 현재 곱해지는 수에서 1증가된 값을 반환하는 텐서\naddMul = tf.add(mul, nStep)\n\n# 현재 단수를 1 증가시킨 값으로 수정하는 텐서\nupdateDan = tf.assign(dan, addDan)\n# 현재 곱해지는 수를 1 증가시킨 값으로 수정하는 텐서\nupdateMul = tf.assign(mul, addMul)\n\n# 현재 단수와 곱해지는 연산하여 구구단의 결과를 \n# 반환하는 텐서\ngugudan_result = tf.multiply(dan, mul)\n\n# 곱해지는 수를 초기화하는 텐서\ninitMul = tf.assign(mul, nStep)\n\n# 텐서플로우의 모든 Variable 변수들을 초기화하기 위한\n# 연산 텐서를 정의\ninit_variables = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init_variables)\n \n for i in range(2, 10):\n \n print(\"{0} 단을 출력합니다.\\n\".format(sess.run(dan)))\n \n sess.run(initMul)\n for j in range(9):\n gugudan_str = \"{0} * {1} = {2}\".format(\n sess.run(dan), \n sess.run(mul), sess.run(gugudan_result))\n print(gugudan_str)\n sess.run(updateMul)\n \n sess.run(updateDan)\n print(\"\\n\")\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"Lecture_note_ML/3_tensorflow/1_basic/1_variable/tf_07_op_gugudan.py","file_name":"tf_07_op_gugudan.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40788355","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport sklearn\nfrom scipy import misc\nfrom matplotlib import pylab as plt\nfrom scipy.spatial.distance import cdist\nimport matplotlib.cm as cm\nfrom functools import reduce\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\nfrom numpy import linalg as LA\n\nget_ipython().magic('matplotlib inline')\n\n\n# In[2]:\n\ntrain_labels, train_data = [], []\n\n\n# In[3]:\n\nfor line in open('./faces/train.txt'):\n im = misc.imread(line.strip().split()[0])\n train_data.append(im.reshape(2500,))\n train_labels.append(line.strip().split()[1])\ntrain_data, train_labels = np.array(train_data, dtype=float), np.array(train_labels, dtype=int)\n\n\n# In[4]:\n\nprint (train_data.shape, train_labels.shape)\nplt.imshow(train_data[50, :].reshape(50,50), cmap = cm.Greys_r)\nplt.show()\n\n\n# In[5]:\n\nX = train_data\n\n\n# In[6]:\n\ntest_labels, test_data = [], []\n\n\n# In[7]:\n\nfor line in open('./faces/test.txt'):\n im = misc.imread(line.strip().split()[0])\n test_data.append(im.reshape(2500,))\n test_labels.append(line.strip().split()[1])\ntest_data, test_labels = np.array(test_data, dtype=float), np.array(test_labels, dtype=int)\n\n\n# In[8]:\n\nXtest = test_data\n\n\n# In[9]:\n\nprint (Xtest.shape, test_labels.shape)\nplt.imshow(Xtest[50, :].reshape(50,50), cmap = cm.Greys_r)\nplt.show()\n\n\n# In[10]:\n\nµ = X.mean(axis=0) \n\n\n# In[11]:\n\n#c, average face\nplt.imshow(µ.reshape(50,50), cmap = cm.Greys_r)\n\n\n# In[12]:\n\n#d\nXnew = np.subtract(X, µ) \n\n\n# In[13]:\n\nXnew.shape\n\n\n# In[14]:\n\nplt.imshow(Xnew[50, :].reshape(50,50), cmap = cm.Greys_r)\n\n\n# In[15]:\n\nXtestnew = np.subtract(Xtest, µ) \nXtestnew.shape\n\n\n# In[16]:\n\nplt.imshow(Xtestnew[50, :].reshape(50,50), cmap = cm.Greys_r)\n#test after subtraction\n\n\n# In[99]:\n\nU, s, V = np.linalg.svd(Xnew, full_matrices=True)\n\n\n# In[106]:\n\nfor i in range(1, 11):\n plt.figure()\n plt.imshow(V[i, :].reshape(50,50), cmap = cm.Greys_r)\n\n\n# In[101]:\n\nS = np.zeros((540, 2500), dtype=complex)\nS[:540, :540] = np.diag(s)\nnp.allclose(Xnew, U.dot(S.dot(V)))\n\n\n# In[80]:\n\nnp.allclose(Xnew, (U.dot(S)).dot(V))\n\n\n# In[82]:\n\n(U.dot(S)).dot(V)\n\n\n# In[102]:\n\nrankr = []\n\n#frobenius norm\nfor r in range(1, 201):\n\n Xr = np.dot(U[:,: r ], np.dot(S[: r,: r ], V[: r,:]))\n U[:,: r ].shape, V[: r,:].shape, S[: r,: r ] .shape, \n rankr.append(np.linalg.norm((X-Xr)))\n\n\n# In[103]:\n\nr = list(range(1, 201))\n\n\n# In[105]:\n\nplt.plot(r, rankr, lw = 2)\nplt.xlabel('r', fontsize=14, color='red')\nplt.ylabel('approximation error', fontsize=14, color='red')\nplt.title(\"Low-rank Approximation\")\n\n\n# In[83]:\n\nUtest, stest, Vtest = np.linalg.svd(Xtestnew, full_matrices=True)\n\n\n# In[125]:\n\n#a function to generate r -dimensional feature matrix F\ndef featureMatrix(r):\n F = np.dot(Xnew, np.transpose(V[: r,:]))\n return F\n\n\n# In[110]:\n\nX[0].shape\n\n\n# In[86]:\n\nfeatureMatrix(4)[:4,:]\n\n\n# In[126]:\n\n#a function to generate r -dimensional feature matrix F\ndef featureTest(r):\n Ftest = np.dot(Xtestnew, np.transpose(V[: r,:]))\n return Ftest\n\n\n# In[88]:\n\nfeatureMatrix(10).shape\n\n\n# In[89]:\n\nlen(train_labels)\n\n\n# In[133]:\n\nlogreg = LogisticRegression(multi_class='ovr')\nclf = logreg.fit(featureMatrix(10), train_labels)\nscores = cross_val_score(clf, featureMatrix(10), train_labels, cv=10)\nprint (scores)\nprint (np.mean(scores))\n\n\n# In[134]:\n\nlogreg.score(featureTest(10), test_labels)\n\n\n# In[122]:\n\nlogreg.score(featureMatrix(100), train_labels)\n\n\n# In[129]:\n\naccuracy = []\nfor r in range (1,201):\n logreg = LogisticRegression(multi_class='ovr')\n clf = logreg.fit(featureMatrix(r), train_labels)\n #Y_pred = clf.predict(featureTest(r))\n #accuracy.append(logreg.score(featureMatrix(r), train_labels))\n accuracy.append(logreg.score(featureTest(r), test_labels))\n\n\n# In[130]:\n\nr = list(range(1, 201))\nplt.plot(r, accuracy, lw=2)\nplt.xlabel('r', fontsize=14, color='red')\nplt.ylabel('Accuracy', fontsize=14, color='red')\nplt.title(\"Prediction accuracy\")\n\n\n# In[31]:\n\nM = np.matrix('1 0 3; 3 7 2; 2 -2 8; 0 -1 1; 5 8 7')\n\n\n# In[131]:\n\naccuracy\n\n\n# In[32]:\n\nMTM = M.dot(np.transpose(M))\n\n\n# In[33]:\n\nMMT = np.transpose(M).dot(M)\n\n\n# In[34]:\n\nw1, v1 = LA.eig(MTM)\n\n\n# In[35]:\n\nw1, v1\n\n\n# In[36]:\n\nw2, v2 = LA.eig(MMT)\n\n\n# In[37]:\n\nw2, v2\n\n\n# In[38]:\n\nU1, s1, V1 = np.linalg.svd(M, full_matrices=True)\n\n\n# In[39]:\n\nU1, s1, V1\n\n\n# In[40]:\n\ns1\n\n","sub_path":"hw2/Face+Recognition+SVD.py","file_name":"Face+Recognition+SVD.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371190636","text":"# given a list, count how many duplicate it has.\n\n# a = [5, 7, 11, 14, 15, 18, 7, 7, 11, 5]\n# output:3\n\n\na = [5, 7, 11, 14, 15, 18, 7, 7, 11, 5]\n\ncount = 0\nc = []\n\nfor i in range(0, len(a)):\n if a[i] not in c:\n is_found = False\n for j in range(i + 1, len(a)):\n if a[i] == a[j]:\n is_found = True\n break\n if is_found == True:\n count = count + 1\n c.append(a[i])\nprint(count)\n","sub_path":"Part 1 CE-CS essentials/source/p28.py","file_name":"p28.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146316787","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport healpy as hp\nimport os\nimport sys\nimport shutil\nimport time\nimport importlib\nimport pickle as pkl\nfrom simulation.lib.utilities.generic_class import Generic\nfrom memory_profiler import profile\nfrom simulation.lib.data_management.data_utilities import get_local_bolo_segment_list\nfrom simulation.lib.utilities.time_util import get_time_stamp\nfrom simulation.timestream_simulation.bolo import Bolo\nfrom simulation.lib.utilities.prompter import prompt\nfrom simulation.lib.utilities.time_util import get_time_stamp \nimport simulation.map_maker.covariance_matrix_utils as cov_ut\n\n\n#@profile\ndef run_mpi():\n #Getting list of bolos and segments for the particular process\n bolo_segment_dict = get_local_bolo_segment_list(rank, size, config.bolo_list, config.segment_list)\n\n tot_seg = 0\n for keys in bolo_segment_dict.keys():\n tot_seg += len(bolo_segment_dict[keys])\n\n time.sleep(0.1*rank)\n prompt(\"Rank : {} \\nBolos and Segments : {} \\n# of segments : {}\\n\".format(rank, bolo_segment_dict, tot_seg))\n comm.Barrier()\n\n #Defining the dimensions of the arrays and matrices\n npix = hp.nside2npix(config.nside_out)\n dim, ind_elements = cov_ut.get_dim(config.pol_type)\n\n dir_names = get_dir_names()\n\n #The matrices local to the process\n inv_cov_matrix_local = np.zeros((npix, ind_elements), dtype=np.float)\n b_matrix_local = np.zeros((npix, dim), dtype=np.float)\n hitmap_local = np.zeros(npix, dtype=np.float)\n\n count = 0\n time_taken = 0\n #Iterating over the bolos\n for bolo_name in bolo_segment_dict.keys():\n if config.subtract_template:\n estimated_y = np.load(os.path.join(config.general_output_dir, config.sim_tag, bolo_name+\"_estimated_y.npy\"))\n #If I am taking a differenced signal\n if config.take_diff_signal:\n bolo_a = Bolo(bolo_name + 'a', config)\n bolo_b = Bolo(bolo_name + 'b', config)\n else:\n bolo = Bolo(bolo_name, config)\n #Iterating over the segments in the bolo\n for segment in bolo_segment_dict[bolo_name]:\n count += 1\n start_seg = time.time()\n prompt(\"Rank : {} doing Bolo : {} and segment : {}, {} of {}\\n\".format(rank, bolo_name, segment, count, tot_seg))\n #Acquiring the signal\n if config.sim_type == \"signal\":\n if config.take_diff_signal:\n signal, hitpix, pol_ang = acquire_difference_signal(bolo_a, bolo_b, segment, config.noise_only_map)\n else:\n signal, hitpix, pol_ang = acquire_signal(bolo, segment, config.noise_only_map)\n if config.subtract_template:\n for i in range(len(config.TEMPLATE_list)): \n TEMPLATE_name = config.TEMPLATE_list[i]\n TEMPLATE_signal = bolo_a.read_timestream(segment, return_field=[TEMPLATE_name])[TEMPLATE_name]\n signal -= estimated_y[i] * TEMPLATE_signal\n if config.sim_type == \"template\":\n signal, hitpix, pol_ang = acquire_signal_template(bolo, segment)\n #Generating the inverse covariance matrix\n cov_ut.get_inv_cov_matrix(hitpix, pol_ang, signal, inv_cov_matrix_local, b_matrix_local, hitmap_local, npix, config.pol_type)\n stop_seg = time.time()\n time_taken += stop_seg - start_seg\n prompt(\"Rank : {}, Time taken : {}. Total time take : {}, Projected time : {}, Finished {} of {}\\n\".format(rank, stop_seg - start_seg, time_taken, time_taken*tot_seg/count, count, tot_seg))\n\n #Saving space\n if config.subtract_template:\n del TEMPLATE_signal\n del signal\n del pol_ang\n del hitpix\n \n start_dist_inv = time.time()\n\n #Distributing and gathering the segments of the matrices in the proper process\n #The sky pixels are chunked and are handled by individual processes\n inv_cov_matrix_local_segment = distribute_matrix(inv_cov_matrix_local, \"cov_matrix\")\n del inv_cov_matrix_local\n b_matrix_local_segment = distribute_matrix(b_matrix_local, \"b_matrix\")\n del b_matrix_local\n hitmap_local_segment = distribute_matrix(hitmap_local, \"hitmap\")\n del hitmap_local\n\n #Inverting the local segment of the inverse covariance matrix\n cov_matrix_local_segment = cov_ut.get_covariance_matrix(inv_cov_matrix_local_segment, hitmap_local_segment, config.pol_type)\n write_segments(inv_cov_matrix_local_segment, \"inverse_covariance_matrix\", dir_names.recon_dir) \n del inv_cov_matrix_local_segment\n\n #Estimating the local sky segment\n sky_map_local_segment = cov_ut.get_sky_map(cov_matrix_local_segment, b_matrix_local_segment, hitmap_local_segment, config.pol_type)\n\n stop_dist_inv = time.time()\n prompt(\"Rank : {}, Time taken to distribute and invert : {}\\n\".format(rank, stop_dist_inv - start_dist_inv))\n\n write_segments(hitmap_local_segment, \"hitmap\", dir_names.recon_dir) \n write_segments(cov_matrix_local_segment, \"covariance_matrix\", dir_names.recon_dir) \n write_segments(sky_map_local_segment, \"sky_map\", dir_names.recon_dir) \n\n\ndef acquire_signal(bolo, segment, noise_only=False):\n if config.simulate_ts:\n t_stream = bolo.simulate_timestream_signal(segment, return_field=[\"signal\", \"pointing_vec\", \"pol_ang\"])\n else:\n t_stream = bolo.read_timestream(segment, return_field=[\"signal\", \"pointing_vec\", \"pol_ang\"], noise_only=noise_only)\n\n hitpix = hp.vec2pix(config.nside_out, t_stream[\"pointing_vec\"][...,0], t_stream[\"pointing_vec\"][...,1], t_stream[\"pointing_vec\"][...,2])\n return t_stream[\"signal\"], hitpix, t_stream[\"pol_ang\"]\n\ndef acquire_signal_template(bolo, segment, tm_type=None):\n if config.template_type == \"tm_bandpass\":\n signal_type = config.tm_bandpass_type[0]\n t_stream = bolo.read_timestream(segment, return_field=[signal_type, \"pointing_vec\", \"pol_ang\"])\n if config.template_type == \"tm_gradient\":\n signal_type = config.tm_gradient_type[0]\n t_stream = bolo.read_timestream(segment, return_field=[signal_type, \"pointing_vec\", \"pol_ang\"])\n\n hitpix = hp.vec2pix(config.nside_out, t_stream[\"pointing_vec\"][...,0], t_stream[\"pointing_vec\"][...,1], t_stream[\"pointing_vec\"][...,2])\n return t_stream[signal_type], hitpix, t_stream[\"pol_ang\"]\n\n\ndef acquire_difference_signal(bolo_a, bolo_b, segment, noise_only=False):\n if config.simulate_ts:\n t_stream_a = bolo_a.simulate_timestream_signal(segment, return_field=[\"signal\", \"pointing_vec\", \"pol_ang\"])\n t_stream_b = bolo_b.simulate_timestream_signal(segment, return_field=[\"signal\"])\n else:\n t_stream_a = bolo_a.read_timestream(segment, return_field=[\"signal\", \"pointing_vec\", \"pol_ang\"], noise_only=noise_only)\n t_stream_b = bolo_b.read_timestream(segment, return_field=[\"signal\"], noise_only=noise_only)\n\n signal = 0.5*(t_stream_a[\"signal\"] - t_stream_b[\"signal\"])\n\n hitpix = hp.vec2pix(config.nside_out, t_stream_a[\"pointing_vec\"][...,0], t_stream_a[\"pointing_vec\"][...,1], t_stream_a[\"pointing_vec\"][...,2])\n return signal, hitpix, t_stream_a[\"pol_ang\"]\n \n#Where all the distribution and gathering takes place\n#@profile\ndef distribute_matrix(local_full_matrix, matrix_type):\n npix = hp.nside2npix(config.nside_out)\n dim, ind_elements = cov_ut.get_dim(config.pol_type)\n segment_length = npix/size \n\n if matrix_type == \"hitmap\":\n local_segmented_matrix = np.zeros(segment_length) \n else:\n inner_dim = {\"cov_matrix\" : ind_elements, \"b_matrix\" : dim}\n local_segmented_matrix = np.zeros((segment_length, inner_dim[matrix_type])) \n\n for i in range(size):\n start = i*segment_length\n stop = (i+1)*segment_length\n comm.Reduce(local_full_matrix[start:stop], local_segmented_matrix, MPI.SUM, root=i)\n \n return local_segmented_matrix\n\n\ndef write_segments(maps, map_name, recon_dir):\n np.save(os.path.join(recon_dir, map_name + \"_segments\", str(rank).zfill(4)), maps) \n\n\ndef get_dir_names():\n dir_names = Generic()\n dir_names.sim_dir = os.path.join(config.general_output_dir, config.sim_tag)\n dir_names.scan_dir = os.path.join(dir_names.sim_dir, config.scan_tag)\n dir_names.recon_dir = os.path.join(dir_names.sim_dir, config.map_making_tag)\n dir_names.map_segment_dir = os.path.join(dir_names.recon_dir, \"sky_map_segments\")\n dir_names.hitmap_segment_dir = os.path.join(dir_names.recon_dir, \"hitmap_segments\")\n dir_names.cov_matrix_segment_dir = os.path.join(dir_names.recon_dir, \"covariance_matrix_segments\")\n dir_names.inv_cov_matrix_segment_dir = os.path.join(dir_names.recon_dir, \"inverse_covariance_matrix_segments\")\n\n return dir_names \n\n\n#The different sky chunks are brought together to form the final maps\n#@profile\ndef accumulate_segments(size):\n acc_start_time = time.time()\n dir_names = get_dir_names()\n\n npix = hp.nside2npix(config.nside_out)\n npix_segment = npix/size\n dim, ind_elements = cov_ut.get_dim(config.pol_type)\n\n sky_map = np.empty((dim, npix))\n\n for i in range(size):\n start = i*npix_segment\n stop = (i+1)*npix_segment\n sky_map_segment = np.load(os.path.join(dir_names.recon_dir, dir_names.map_segment_dir, str(i).zfill(4) + '.npy')) \n sky_map[..., start:stop] = sky_map_segment\n\n #if config.pol_type == 'T':\n # sky_map[np.isnan(sky_map)] = 0.0\n #else:\n # sky_map[..., np.isnan(sky_map[0])] = 0.0\n\n hp.write_map(os.path.join(dir_names.recon_dir, \"sky_map.fits\"), sky_map)\n del sky_map\n del sky_map_segment\n shutil.rmtree(os.path.join(dir_names.recon_dir, dir_names.map_segment_dir))\n\n hitmap = np.empty(npix)\n\n for i in range(size):\n start = i*npix_segment\n stop = (i+1)*npix_segment\n hitmap_segment = np.load(os.path.join(dir_names.recon_dir, dir_names.hitmap_segment_dir, str(i).zfill(4) + '.npy')) \n hitmap[..., start:stop] = hitmap_segment\n\n hp.write_map(os.path.join(dir_names.recon_dir, \"hitmap.fits\"), hitmap)\n if config.pol_type == \"TQU\":\n mask_map = hitmap > 3\n elif config.pol_type == \"QU\":\n mask_map = hitmap > 2\n else:\n mask_map = hitmap > 1\n hp.write_map(os.path.join(dir_names.recon_dir, \"mask.fits\"), mask_map)\n\n del hitmap\n del hitmap_segment\n shutil.rmtree(os.path.join(dir_names.recon_dir, dir_names.hitmap_segment_dir))\n\n inverse_cov_matrix = np.empty((npix, ind_elements))\n\n for i in range(size):\n start = i*npix_segment\n stop = (i+1)*npix_segment\n inverse_cov_matrix_segment = np.load(os.path.join(dir_names.recon_dir, dir_names.inv_cov_matrix_segment_dir, str(i).zfill(4) + '.npy')) \n inverse_cov_matrix[start:stop] = inverse_cov_matrix_segment\n\n hp.write_map(os.path.join(dir_names.recon_dir, \"inverse_covariance_maps.fits\"), inverse_cov_matrix.T)\n del inverse_cov_matrix\n del inverse_cov_matrix_segment\n shutil.rmtree(os.path.join(dir_names.recon_dir, dir_names.inv_cov_matrix_segment_dir))\n\n cov_matrix = np.empty((npix, ind_elements))\n\n for i in range(size):\n start = i*npix_segment\n stop = (i+1)*npix_segment\n cov_matrix_segment = np.load(os.path.join(dir_names.recon_dir, dir_names.cov_matrix_segment_dir, str(i).zfill(4) + '.npy')) \n if config.pol_type == \"TQU\":\n cov_matrix_segment = cov_matrix_segment.reshape((npix_segment, dim**2))[..., np.array([0,1,2,4,5,8])]\n elif config.pol_type ==\"QU\":\n cov_matrix_segment = cov_matrix_segment.reshape((npix_segment, dim**2))[..., np.array([0,1,3])]\n else: \n cov_matrix_segment = cov_matrix_segment.reshape((npix_segment, dim**2))\n cov_matrix[start:stop] = cov_matrix_segment\n\n hp.write_map(os.path.join(dir_names.recon_dir, \"covariance_maps.fits\"), cov_matrix.T)\n del cov_matrix\n del cov_matrix_segment\n shutil.rmtree(os.path.join(dir_names.recon_dir, dir_names.cov_matrix_segment_dir))\n acc_stop_time = time.time()\n prompt(\"Time taken to accumulate segments : {}s\\n\".format(acc_stop_time - acc_start_time))\n\n\ndef make_data_dirs():\n dir_names = get_dir_names()\n if not os.path.exists(dir_names.sim_dir):\n try:\n os.makedirs(dir_names.sim_dir)\n except OSError:\n pass\n\n if not os.path.exists(dir_names.scan_dir):\n try:\n os.makedirs(dir_names.scan_dir)\n except OSError:\n pass\n\n if not os.path.exists(dir_names.recon_dir):\n os.makedirs(dir_names.recon_dir)\n else:\n shutil.rmtree(dir_names.recon_dir)\n os.makedirs(dir_names.recon_dir)\n\n os.makedirs(dir_names.map_segment_dir)\n os.makedirs(dir_names.hitmap_segment_dir)\n os.makedirs(dir_names.inv_cov_matrix_segment_dir)\n os.makedirs(dir_names.cov_matrix_segment_dir)\n\n config_dir = os.path.join(dir_names.sim_dir, \"config_files\")\n if not os.path.exists(config_dir):\n try:\n os.makedirs(config_dir)\n except OSError:\n pass\n this_config_dir = os.path.join(config_dir, time_stamp)\n try:\n os.makedirs(this_config_dir)\n except OSError:\n pass\n with open(os.path.join(this_config_dir, \"config_file.pkl\"), \"w\") as outfile:\n pkl.dump(config, outfile)\n bolo_config = importlib.import_module(config.bolo_config_file).bolo_config\n with open(os.path.join(this_config_dir, \"bolo_config_file.pkl\"), \"w\") as outfile:\n pkl.dump(bolo_config, outfile)\n\n code_dir = os.path.join(dir_names.sim_dir, \"code_files\")\n if not os.path.exists(code_dir):\n try:\n os.makedirs(code_dir)\n except OSError:\n pass\n this_code_dir = os.path.join(code_dir, time_stamp)\n try:\n os.makedirs(this_code_dir)\n except OSError:\n pass\n shutil.copyfile(os.path.join(config.base_dir, \"map_maker\", \"map_maker.py\"), os.path.join(this_code_dir, \"map_maker.py\"))\n shutil.copyfile(os.path.join(config.base_dir, \"timestream_simulation\", \"bolo.py\"), os.path.join(this_code_dir, \"bolo.py\"))\n shutil.copyfile(os.path.join(config.base_dir, \"timestream_simulation\", \"beam_kernel.py\"), os.path.join(this_code_dir, \"beam_kernel.py\"))\n\n\ndef start_message():\n display_string = \"\\n#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\\n\"\n display_string += \"#* BEGINNING SIMULATION\\n\"\n display_string += \"#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\\n\"\n display_string += \"TIME STAMP : {}\\n\".format(time_stamp)\n display_string += \"RUN TYPE : {}\\n\".format(run_type)\n sim_duration = config.t_segment*len(config.segment_list)\n display_string += \"SIMULATION DURATION : {}s, {}years\\n\".format(sim_duration, sim_duration/365.25/24.0/60.0/60.0)\n display_string += \"SCAN STRATEGY : {}\\n\".format(config.scan_strategy_name)\n display_string += \"COORDINATE SYSTEM : {}\\n\".format(config.coordinate_system)\n display_string += \"SIMULATE / READ TIMESTREAM DATA : {}\\n\".format(\"simulate\" if config.simulate_ts else \"read\")\n display_string += \"No. OF PROCESSES : {}\\n\".format(size)\n display_string += \"DETECTOR LIST : {}\\n\".format(config.bolo_list)\n display_string += \"No. OF DETECTORS : {}\\n\".format(len(config.bolo_list))\n display_string += \"SEGMENT LIST : {}\\n\".format(config.segment_list)\n display_string += \"No. OF SEGMENTS : {}\\n\".format(len(config.segment_list))\n display_string += \"Segment length : {}s, {}h\\n\".format(config.t_segment, config.t_segment/60.0/60.0)\n display_string += \"SIMULATION TYPE : {}\\n\".format(config.sim_type)\n if config.sim_type == \"signal\":\n if config.simulate_ts:\n display_string += \"BEAM TYPE : {}\\n\".format(config.beam_type)\n display_string += \"WRITE FIELD : {}\\n\".format(config.timestream_data_products)\n else:\n if config.template_type == \"tm_gradient\":\n display_string += \"GRADIENT TYPES : {}\\n\".format(config.tm_gradient_type)\n display_string += \"NOTES : {}\\n\".format(config.notes)\n display_string += \"#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\\n\\n\"\n prompt(display_string, sys.stdout)\n\n if config.simulate_ts:\n bolo = Bolo(config.bolo_list[0], config)\n bolo.display_params()\n for bolo_name in config.bolo_list:\n bolo = Bolo(bolo_name, config)\n bolo.beam.display_beam_settings()\n\ndef run_check(verbose=True):\n if 12*config.nside_out**2 % size:\n if rank == 0 and verbose:\n prompt(\"# of processors is not compatible with the distribution of pixels on the segmented maps. Exiting\") \n sys.exit()\n\n if config.simulate_ts:\n if config.sim_type == \"gradient\":\n if config.sim_pol_type != 'T':\n if rank == 0 and verbose:\n prompt(\"When computing gradients, the simulation polarisation is set to T only. Changing\")\n config.sim_pol_type = 'T'\n\n\n#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n#* Main function definition. This is where the code begins when executed\n#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n\nif __name__==\"__main__\":\n config_file = sys.argv[1]\n run_type = sys.argv[2]\n\n config = importlib.import_module(config_file).config\n\n time_stamp = get_time_stamp()\n\n if run_type == 'accumulate_segments':\n size = int(sys.argv[3])\n accumulate_segments(size)\n\n if run_type == 'run_mpi':\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\n comm.bcast(time_stamp, root=0)\n if rank == 0:\n make_data_dirs()\n start_message()\n comm.Barrier()\n run_check()\n run_mpi()\n comm.Barrier()\n if rank==0:\n accumulate_segments(size)\n\n if run_type == 'run_serial':\n make_data_dirs()\n start_message()\n run_serial()\n","sub_path":"map_maker/map_maker.py","file_name":"map_maker.py","file_ext":"py","file_size_in_byte":18011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301349595","text":"\"\"\"\r\n1~Kを全て含む最小の区間\r\n\"\"\"\r\n\r\nfrom collections import defaultdict\r\n\r\nn,k=map(int,input().split())\r\na=list(map(int,input().split()))\r\ncount=defaultdict(int)\r\nleft=0\r\nright=0\r\nK=0\r\nINF=(1<<31)-1\r\nans=INF\r\n\r\nfor left in range(n):\r\n while right= 1:\n if k % 12 != 0:\n make_animation(k - 1, animate_color1, animate_color2)\n #make_animation(k, animate_color1, animate_color2)\n if (k + 1) % 12 != 0:\n make_animation(k + 1, animate_color1, animate_color2)\n #if (k+12) % 11 != 0:\n make_animation(k + 12, animate_color1, animate_color2)\n #if k % 12 != 0:\n make_animation(k - 12, animate_color1, animate_color2)\n if k % 12 != 0:\n make_animation(k + 11, animate_color1, animate_color2)\n if (k+13) % 12 != 0:\n make_animation(k + 13, animate_color1, animate_color2)\n if (k-11) % 12 != 0:\n make_animation(k - 11, animate_color1, animate_color2)\n if k % 12 != 0:\n make_animation(k - 13, animate_color1, animate_color2)\n\ndef on_click_obj():\n count = 0\n for k in range(0, 144):\n on_click_animate(k, Red, Blue, count)\n\ndef add_coins():\n for i in range(0,24):\n game_objs[i][5] = Blue\n\n#Colors:\nWhite = (255,255,255)\nLightBlack = (50,50,50)\nBlack = (0,0,0)\nLightWhite = (200,200,200)\nGray = (128,128,128)\nLightGray = (100, 100 ,100)\nRed = (255,0,0)\nBlue = (0,0,255)\n\n#Dimensions:\ngame_window_width = 600\ngame_window_height = 600\ngame_obj_back = [30, 30, 20, Gray]\ngame_obj_front = [30, 30, 15, Black]\ngame_objs = []\nx_init = 30\ny_init = 30\n\n#--------------------------Creating List of Game Objects-----\n\nfor i in range(0, 144):\n x = x_init\n for j in range(0, 12):\n game_objs.append([x_init, y_init, 20, 15, LightBlack, LightGray])\n x_init += 49\n x_init = x\n y_init += 49\n\n#------------------------------------------------------------\n\n#add_coins()\n\nvar = 0\n\npygame.init()\n\ngame_window = create_game_window(game_window_width, game_window_height)\n\ndef StartSinglePlayer(var):\n while True:\n game_window.fill(LightWhite)\n\n #--------------Declaring class---------------\n createobj = create_obj()\n\n #--------------Creating Objects--------------\n for i in range(0, 144):\n createobj.layout(game_objs[i])\n\n #--------------------------------------------\n\n #--------------Mouseover Animation-----------\n if var == 0:\n for i in range(0, 144):\n createobj.Animate(i, LightBlack, LightGray, Black, Gray)\n\n #--------------------------------------------\n\n for key in pygame.event.get():\n #------------------Animation on click object--------------\n if key.type == pygame.MOUSEBUTTONDOWN:\n if var == 1:\n var = 0\n else:\n var = 1\n on_click_obj()\n\n #---------------------------------------------------------\n\n #if key.type == pygame.MOUSEBUTTONUP:\n # var = 0\n\n #-----------------Exiting Condition-----------------------\n if key.type == pygame.QUIT:\n pygame.quit()\n\n #---------------------------------------------------------\n\n pygame.display.update()\n\n#StartSinglePlayer(var)","sub_path":"single_player.py","file_name":"single_player.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611768377","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: tabstop=4 shiftwidth=4 softtabstop=4 et\n#\n# Copyright @ 2014 OPS, Qunar Inc. (qunar.com)\n#\n# Author: tingfang.bao \n# DateTime: 14-7-24 下午3:52\n\nimport sys\nimport multiprocessing\n\nfrom flask import Flask\nfrom oslo.config import cfg\nfrom werkzeug.serving import run_simple\nfrom werkzeug.wsgi import DispatcherMiddleware\n\nfrom common.web.app.application import QApplication\nfrom common.web.wsgi import wsgi_simple_app\nfrom common.web.exception import exception\n\n\nweb_opts = [\n cfg.StrOpt('base-url',\n default='/',\n help='The url prefix of this site.'),\n cfg.StrOpt('run-mode',\n default=\"local\",\n choices=('gunicorn', 'local'),\n help=\"Run server use the specify mode.\"),\n cfg.StrOpt('bind',\n default='0.0.0.0',\n help='The IP address to bind'),\n cfg.IntOpt('port',\n default=5000,\n help='The port to listen'),\n]\n\ngunicorn_opts = [\n cfg.StrOpt('config',\n default=None,\n help='The path to a Gunicorn config file.'),\n cfg.IntOpt('worker-count',\n default=0,\n help='Process worker count in gunicorn mode.'),\n cfg.BoolOpt('daemon',\n default=False,\n help='Run gunicorn mode as a daemon.'),\n cfg.StrOpt('accesslog',\n default=None,\n help='The Access log file to write to.'\n '\"-\" means log to stderr.'),\n cfg.BoolOpt('ignore-healthcheck-accesslog',\n default=False),\n cfg.IntOpt('timeout',\n default=30,\n help='Workers silent for more than this many seconds are '\n 'killed and restarted.')\n]\n\nCONF = cfg.CONF\nCONF.register_cli_opts(web_opts, 'web')\nCONF.register_cli_opts(gunicorn_opts, 'gunicorn')\n\n\nclass QFlaskApplication(QApplication):\n name = \"QFlaskApplication\"\n version = \"0\"\n\n def __init__(self, *args, **kwargs):\n super(QFlaskApplication, self).__init__(*args, **kwargs)\n self.flask_app = None\n self.wsgi_app = None\n\n def _set_base_url(self, base_url):\n base_url = base_url.strip()\n if not base_url.startswith(\"/\"):\n base_url = \"/\" + base_url\n self.base_url = base_url\n\n def init_flask_app(self, flask_args=None, flask_kwargs=None):\n # init flask arguments\n flask_args = [] if flask_args is None else flask_args\n flask_kwargs = {} if flask_kwargs is None else flask_kwargs\n flask_args.insert(0, self.name)\n # create flask application\n self.flask_app = Flask(*flask_args, **flask_kwargs)\n self.flask_app.debug = CONF.debug\n # NOTE(jianingy): Pass exceptions to faultwrapper\n self.flask_app.config['PROPAGATE_EXCEPTIONS'] = True\n self._set_base_url(CONF.web.base_url)\n if self.base_url != \"/\":\n self.wsgi_app = DispatcherMiddleware(wsgi_simple_app.simple_app, {\n self.base_url: self.flask_app\n })\n else:\n self.wsgi_app = self.flask_app\n # NOTE(zhen.pei): init mixin\n for mixin in self.__class__.__bases__:\n if hasattr(mixin, 'flask_mixin_init'):\n getattr(mixin, 'flask_mixin_init')(self)\n\n def init_app(self):\n super(QFlaskApplication, self).init_app()\n self.init_flask_app()\n\n def init_config(self, *args, **kwargs):\n super(QFlaskApplication, self).init_config(*args, **kwargs)\n # NOTE(zhen.pei): 如果不开启debug则不显示调用日志\n if CONF.web.run_mode == \"local\":\n CONF.debug = True\n\n def _debug_run(self):\n self.flask_app.debug = True\n CONF.debug = True\n run_simple(CONF.web.bind,\n CONF.web.port,\n self.wsgi_app,\n use_reloader=CONF.debug,\n use_debugger=CONF.debug)\n\n def _gunicorn_run(self):\n from gunicorn.app.base import Application\n\n app = self.wsgi_app\n\n class QlibGunicornApp(Application):\n\n def init(self, parser, opts, args):\n worker_count = CONF.gunicorn.worker_count\n if worker_count <= 0:\n worker_count = multiprocessing.cpu_count() * 2 + 1\n logger_class = \"simple\"\n if CONF.gunicorn.ignore_healthcheck_accesslog:\n logger_class = \"qlib.web.glogging.GunicornLogger\"\n return {\n 'bind': '{0}:{1}'.format(CONF.web.bind, CONF.web.port),\n 'workers': worker_count,\n 'daemon': CONF.gunicorn.daemon,\n 'config': CONF.gunicorn.config,\n 'accesslog': CONF.gunicorn.accesslog,\n 'timeout': CONF.gunicorn.timeout,\n 'logger_class': logger_class\n }\n\n def load(self):\n return app\n\n # NOTE(zhen.pei): 为了不让gunicorn默认匹配sys.argv[1:]\n sys.argv = [sys.argv[0]]\n QlibGunicornApp().run()\n\n def main_loop(self):\n if CONF.web.run_mode == \"local\":\n self._debug_run()\n elif CONF.web.run_mode == \"gunicorn\":\n self._gunicorn_run()\n\n def register_blueprint(self, *args, **kwargs):\n self.flask_app.register_blueprint(*args, **kwargs)\n\n def append_wsgi_middlewares(self, *middlewares):\n if self.wsgi_app is None:\n raise exception.QFlaskApplicationError(\n \"must invoke init_app() first\")\n for middleware in middlewares:\n self.wsgi_app = middleware(self.wsgi_app)\n return self\n","sub_path":"common/web/flask/flask_application.py","file_name":"flask_application.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"367874913","text":"#!/bin/python2.7\n# -*- coding: utf-8 -*-\n\nimport sqlite3 as sql\n\nimport time\nfrom time import gmtime, strftime\n\nclass Database:\n def __init__(self):\n self.dbFile = 'data.db'\n self.initDB()\n\n def getStatus(self):\n return True\n\n def initDB(self):\n # checking if table exists is done in side schema.sql\n with sql.connect(self.dbFile) as con:\n cur = con.cursor()\n with open('schema.sql', 'r') as schema_file:\n cur.executescript(schema_file.read())\n\n def createQuery(self, data):\n # http://stackoverflow.com/questions/415511/how-to-get-current-time-in-python\n time.sleep(1)\n query = 'INSERT INTO data VALUES (\"{}\",'.format(strftime(\"%Y-%m-%d %H:%M:%S\",\n gmtime()))\n attributes = 'longitude', 'latitude', 'light', 'acc', 'tmp', 'baro'\n for i, attr in enumerate(attributes):\n if i < len(attributes)-1:\n query += str(data[attr]) + ','\n else:\n query += str(data[attr]) + ')'\n return query\n\n def insert(self, data):\n query = self.createQuery(data)\n with sql.connect(self.dbFile) as con:\n cur = con.cursor()\n cur.execute(query)\n\n def retrieve(self, type_, from_=None, to=None):\n with sql.connect(self.dbFile) as con:\n cur = con.cursor()\n if type_ == 'all':\n cur.execute('SELECT * FROM data')\n return cur.fetchall()\n elif type_ == 'range':\n cur.execute('SELECT * FROM data WHERE time >= {} AND time <= {}'.format(from_, to))\n return cur.fetchall()\n elif type_ == 'latest':\n cur.execute('SELECT * FROM data')\n return cur.fetchall()[-1]\n \n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451252874","text":"\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\n\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\n\nclass StockRanker(object):\n def __init__(self,algorithm='gbdt'):\n algo = GradientBoostingClassifier(random_state=10)\n if algorithm == 'gbdt':\n algo = GradientBoostingClassifier(random_state=10)\n if algorithm == 'svm':\n algo = svm.LinearSVC()\n if algorithm == 'rf':\n algo = RandomForestClassifier()\n self.algo = algo\n\n def train(self,X,y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n self.algo.fit(X_train,y_train)\n print('模型训练完成!')\n y_train_pred = self.algo.predict(X_train)\n print(\"在训练上Accuracy : %.4g\" % metrics.accuracy_score(y_train.values, y_train_pred))\n y_pred = self.algo.predict(X_test)\n print(\"在测试集上Accuracy : %.4g\" % metrics.accuracy_score(y_test.values, y_pred))","sub_path":"quant/engine/bak/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324827861","text":"import pyaudio\r\nfrom ibm_watson import SpeechToTextV1\r\nfrom ibm_watson.websocket import RecognizeCallback, AudioSource\r\nfrom threading import Thread\r\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\r\n\r\ntry:\r\n from Queue import Queue, Full\r\nexcept ImportError:\r\n from queue import Queue, Full\r\n\r\n\r\n\r\n###############################################\r\n#### Initalize queue to store the recordings ##\r\n###############################################\r\nCHUNK = 1024\r\n# Note: It will discard if the websocket client can't consumme fast enough\r\n# So, increase the max size as per your choice\r\nBUF_MAX_SIZE = CHUNK * 10\r\n# Buffer to store audio\r\nq = Queue(maxsize=int(round(BUF_MAX_SIZE / CHUNK)))\r\n\r\n# Create an instance of AudioSource\r\naudio_source = AudioSource(q, True, True)\r\n\r\n###############################################\r\n#### Prepare Speech to Text Service ########\r\n###############################################\r\n\r\n# Place your api key and URL here.\r\nAPI_KEY = \"XXXXXXXXXXXXXXXXXXXXXXX-XXXXXXXXXXXXXXXXXXXX\"\r\nURL_S2T = \"https://api.jp-tok.speech-to-text.watson.cloud.ibm.com/instances/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\r\n\r\n\r\n# initialize speech to text service\r\nauthenticator = IAMAuthenticator(API_KEY)\r\nspeech_to_text = SpeechToTextV1(authenticator=authenticator)\r\nspeech_to_text.set_service_url(URL_S2T) # this line is not in IBM repo\r\n\r\n\r\n\r\n# define callback for the speech to text service\r\nclass MyRecognizeCallback(RecognizeCallback):\r\n def __init__(self):\r\n RecognizeCallback.__init__(self)\r\n\r\n def on_transcription(self, transcript):\r\n # pass\r\n print( \"\\n\\n\\n Final predicted text is : \" + transcript[0][\"transcript\"] , end=\"\\n\\n\\n\")\r\n print(\"\\n Final predicted text's confidence is : \",transcript[0][\"confidence\"])\r\n\r\n def on_connected(self):\r\n print('Connection was successful')\r\n\r\n def on_error(self, error):\r\n print('Error received: {}'.format(error))\r\n\r\n def on_inactivity_timeout(self, error):\r\n print('Inactivity timeout: {}'.format(error))\r\n\r\n def on_listening(self):\r\n print('Service is listening')\r\n\r\n #Returns interim results or maximum alternatives from the service when those responses are requested.\r\n def on_hypothesis(self, hypothesis):\r\n # pass\r\n print(\"\\n Non Final output is : \" , hypothesis )\r\n\r\n #Returns all response data for the request from the service.\r\n def on_data(self, data):\r\n pass\r\n # print(\"data : \" , data)\r\n\r\n def on_close(self):\r\n print(\"Connection closed\")\r\n\r\n# this function will initiate the recognize service and pass in the AudioSource\r\ndef recognize_using_weboscket(*args):\r\n mycallback = MyRecognizeCallback()\r\n speech_to_text.recognize_using_websocket(audio=audio_source,\r\n content_type='audio/l16; rate=44100',\r\n interim_results=True,\r\n model=\"en-US_BroadbandModel\",\r\n # keywords=KEYWORD_PHRASES, #keywords to focus on\r\n # keywords_threshold=0.1, #if a keyword's probability is more than this threashold then that is 'outputted'\r\n recognize_callback=mycallback\r\n )\r\n \r\n\r\n###############################################\r\n#### Prepare the for recording using Pyaudio ##\r\n###############################################\r\n\r\n# Variables for recording the speech\r\nFORMAT = pyaudio.paInt16\r\nCHANNELS = 1\r\nRATE = 44100\r\n\r\n# define callback for pyaudio to store the recording in queue\r\ndef pyaudio_callback(in_data, frame_count, time_info, status):\r\n try:\r\n q.put(in_data)\r\n except Full:\r\n pass # discard\r\n return (None, pyaudio.paContinue)\r\n\r\n# instantiate pyaudio\r\naudio = pyaudio.PyAudio()\r\n\r\n# open stream using callback\r\nstream = audio.open(\r\n format=FORMAT,\r\n channels=CHANNELS,\r\n rate=RATE,\r\n input=True,\r\n frames_per_buffer=CHUNK,\r\n stream_callback=pyaudio_callback,\r\n start=False\r\n)\r\n\r\n#########################################################################\r\n#### Start the recording and start service to recognize the stream ######\r\n#########################################################################\r\n\r\nprint(\"IBM Watson Speech To Text API\")\r\nprint(\"----------------------------- \\n\\n\")\r\n\r\nstream.start_stream()\r\n\r\ntry:\r\n recognize_thread = Thread(target=recognize_using_weboscket, args=())\r\n recognize_thread.start()\r\n\r\n while True:\r\n pass\r\nexcept KeyboardInterrupt:\r\n # stop recording\r\n stream.stop_stream()\r\n stream.close()\r\n audio.terminate()\r\n audio_source.completed_recording()","sub_path":"IBM voice recognition.py","file_name":"IBM voice recognition.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446354617","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass FAQ(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome('C:\\chromedriver\\chromedriver.exe')\n\n def test_FAQ(self):\n driver = self.driver\n driver.maximize_window()\n driver.get(\"https://affbank.com/\")\n element = WebDriverWait(driver, 10).until(EC.visibility_of_element_located\n ((By.XPATH, '//a[contains(@href, \"/discount\")]')))\n element.click()\n assert \"Best discounts for Affiliate Marketers\" in driver.title\n\n def tearDown(self):\n self.driver.close()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"old_files/Chrome.py","file_name":"Chrome.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}